~/f/dealii/RPMS.2017 ~/f/dealii ~/f/dealii RPMS.2017/deal_II-openmpi4-devel-9.5.1-0.0.x86_64.rpm RPMS/deal_II-openmpi4-devel-9.5.1-0.0.x86_64.rpm differ: byte 225, line 1 Comparing deal_II-openmpi4-devel-9.5.1-0.0.x86_64.rpm to deal_II-openmpi4-devel-9.5.1-0.0.x86_64.rpm comparing the rpm tags of deal_II-openmpi4-devel --- old-rpm-tags +++ new-rpm-tags @@ -10125 +10125 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/DEALGlossary.html c2afb605a421679a4ea841545cf2b286f52cf778e5df341083824ab0635eff1c 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/DEALGlossary.html 8a49ef8a7b956cf763d9d503aaa9b36fa6026f1f0e67e66e007df420766a2939 2 @@ -10128,3 +10128,3 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/Tutorial.html 5981bc17594d634d5c998b9f9b3c307ffb0ca69ae02e2a31812b14438922439a 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/_formulas.tex 9702dd3bd33ef6bccab8cd94a1ac922dd9b4c71114694b4a8de6a3cc2eabe4ae 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/_formulas_dark.tex 91cce4ff6be8910fa07745c06e3263b2389705c297b233352fee71dd08b9b5ea 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/Tutorial.html 686d7c6e24f551f4b8fff6d0a54638fbf071428874fd7c3324c1130a29cc5acb 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/_formulas.tex c7fe65427ecfc7ee1966d36ea1c8059d746279efd33a0f8206afea768ab625d8 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/_formulas_dark.tex f65073c8125709c360fd001c9140e61d1c0ea88742c38e53961edb3369cd03c8 2 @@ -10297 +10297 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/changes_between_6_1_and_6_2.html 53db53bece7471547af09b61dc04e95a5a8f858bae382f23c4273b46971dd471 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/changes_between_6_1_and_6_2.html 00a90296bfa47441d5053629d678ee0b434f3faa6a8020dea6d92373e0243364 2 @@ -10299 +10299 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/changes_between_6_2_and_6_3.html 016f674fa8b87a2a1dcf1f689a328a3f94909adbd12ee7309a21ba8150748732 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/changes_between_6_2_and_6_3.html 1ac612f4b09451f76edb3712b790e31563d22664065135fe5ea2c6765fc0ae47 2 @@ -10302,2 +10302,2 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/changes_between_7_0_and_7_1.html 6e6fdf60ed3398db378bb9e9a5dc9a5ff4eefb09e61df6d5c5b3fb23d5df5895 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/changes_between_7_1_and_7_2.html ac9fb69e9f967304ae73754b996a2039fee7d656cf03c8934b1a2e7a82f61b83 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/changes_between_7_0_and_7_1.html 0e00db3140984cb4e7a7b716a28808c871b696d59e41a35cc9f577e88934fdf6 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/changes_between_7_1_and_7_2.html f9c8bfbb7ecdce8d500cb68ac61d976b0a4b0addbda86d24076487b38a0020e4 2 @@ -10307 +10307 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/changes_between_8_1_and_8_2.html c9df07a9ca91cfd7509a01fc0e48d00c999c54d0065c516736fd83f2daf32780 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/changes_between_8_1_and_8_2.html 1cfdcfcc7acb5832de926339d2abd9ed6185751598127571455272c450371248 2 @@ -10313 +10313 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/changes_between_8_4_2_and_8_5_0.html 460621cca93c4de4c69b1022d037a99079c24288780209499998d3f111774e3c 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/changes_between_8_4_2_and_8_5_0.html 621ece18a816c1a5f9eb8f625aa55df9fd071d579a5bd2163a444bcf4e566b4a 2 @@ -10318 +10318 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/changes_between_9_1_1_and_9_2_0.html 2d01761a43d5f6f6370db8b35e4e12799402814c32aa124c0a9edeac9bf79be1 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/changes_between_9_1_1_and_9_2_0.html d119b01f39c6ed6f23e08dd0df0179e6173280ebd1a89564a57337202bd10a4e 2 @@ -10345 +10345 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classAffineConstraints.html 4cb08f3b88fb7bf36e2082bf1f00a83e2e1f19f717e9bb027fd850e0fd6b190c 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classAffineConstraints.html 8a7756ec9070e7da12f20f034047bae68edf1c30b50c8aac6bee3f210baf36ff 2 @@ -10363 +10363 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classAlgorithms_1_1ThetaTimestepping.html 04c7944e0f1adb2f7a425a6de1c6c5e08aa79ff6718ed0eedc9401e5df63f72d 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classAlgorithms_1_1ThetaTimestepping.html 576f3c8590db66b7951c9157b28bc152c22f7c87086a17f53d705d3b475adfde 2 @@ -10380 +10380 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classAnisotropicPolynomials.html b9cf57f696cabb38fcf781c20371659b1e111d4c2d45840d0d4dff193edd48e2 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classAnisotropicPolynomials.html ee6bc3f8eb57ee1235ef804f1f2e9d29c734d13760373512418d39d3d48116cf 2 @@ -10417 +10417 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classArpackSolver.html 199c65f6d699a1c2384ec6f3673b272e30c357430a90724ae9cebdd647972fe4 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classArpackSolver.html a4193a2b15e9a0012da2b0675e0e86daa26ec6218fdb103d1562ccbd9566ac71 2 @@ -10420 +10420 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classArrayView.html abd04d002a9a591a420e5d26d5626b6aaf650a08d06942da6dc938074ac2fe41 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classArrayView.html c4de72cee678cb9d3ee48d272a45bcf40c9f6310be3f0e4fc5e5644ca18a2c7b 2 @@ -10423 +10423 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classAutoDerivativeFunction.html 868f29a981483d7a2425abc55d1a209724c3b259421b67df9a53e9f90bc4d6aa 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classAutoDerivativeFunction.html 4f8671801c631714cc794d5ae33bf991b19cbc62b14b69feca4990d52bf48e2c 2 @@ -10426 +10426 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classBarycentricPolynomial.html d07fce8002c5e787f0d2a8fa6532ae78c51a9b3fb25ce194c673014db37088a0 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classBarycentricPolynomial.html 145ec71a4c7985a829424d3e2f52b7b2b99be8f6a692b6ac382a1af33163e988 2 @@ -10433 +10433 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classBaseQR.html 4b30d6425415d31be7fbda2a860dd37ed53b1670d20a728ae006c5b74e37c6ce 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classBaseQR.html ce3c7b5c5ce5e4b12cc699aee1a590ea037aa63f93cfe075b03061379cd1245f 2 @@ -10439 +10439 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classBlockIndices.html 7b4f5c7b7f016addc62e517453246685242899cdf1863164f31cb97eb4c5e612 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classBlockIndices.html 28200009e6843167effa0d4b203bae1293164c5d36fb6cf1cd36780402c34a84 2 @@ -10445 +10445 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classBlockLinearOperator.html d84d273d468970d1998aca93a18324c2c116c949af9bc9fe01ee4317b905366a 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classBlockLinearOperator.html da8bf0c014a01e67f480b681723923c4b7ae54dc85e70e22b4baf042a1821252 2 @@ -10450 +10450 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classBlockMatrixBase.html 683a2aa7b8ef5beec4724ee3fa2292c138135c1dee6d729823bef64f96d78e7a 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classBlockMatrixBase.html f9a05f50c843e50e2986d7733dce5751116722b943e82a472f85dd9b96de8abd 2 @@ -10463 +10463 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classBlockSparseMatrix.html 9293e59fe6cc7d23a4a7e5d4233d90b6fc7eee5150903c3aa4239c34f979cdc8 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classBlockSparseMatrix.html ad4699b3c60c05c508da0f3741d4cafb0683fb93c1784706517e37fb46a27902 2 @@ -10465 +10465 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classBlockSparseMatrixEZ.html 1c4f0c1d792dc7e23d9ffd70f52a82efcb41191c84d8ac1380ef9c89257d609e 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classBlockSparseMatrixEZ.html dfa19685c4c4492248087490089064e73bcb42fe97d62ec1aec53a4afa2139d9 2 @@ -10475 +10475 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classBlockVector.html 66b49f8d156e45d7c4777ba63cb3901097c851d31c03843ce2f51606c3d5a24d 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classBlockVector.html 601eddd7558e32c9423da6643e5ff87029b2a24b43ae2c759f5a02847ca51e0f 2 @@ -10477 +10477 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classBlockVectorBase.html 21e7b022c5826fde78edae3fc4c02e2e1874e03b8bdd2ac602e269986cd64082 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classBlockVectorBase.html 4c620024fb28f9503d0cfb376be6409efac825ff1e5e41ceb83bdffe248e2b00 2 @@ -10481 +10481 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classBoundingBox.html 82f6a021ea0e1dd6149a18ea7763ac1acec45f2fba9bf4df967345195ac5b087 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classBoundingBox.html a500431ea5864821f19c79dc1696225dba1e732aa6eed7a25df3577c92955388 2 @@ -10494 +10494 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classCUDAWrappers_1_1PreconditionIC.html 4e920f51099519b0de45507edd6d2de1abc98c121df87e77af51611f0d18617b 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classCUDAWrappers_1_1PreconditionIC.html df36a54aba46f922edab321b52484591c460135578ddee0516fea722550de82f 2 @@ -10496 +10496 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classCUDAWrappers_1_1PreconditionILU.html aa7909afac2f195ebf899beae8c3c04b3155500ac5cce357b3de92d452f4d591 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classCUDAWrappers_1_1PreconditionILU.html dec1d68d22e5bd0a41211b98ee3e0c7faef72f1c12047e274d5c3527555f0762 2 @@ -10500 +10500 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classCUDAWrappers_1_1SparseMatrix.html 90964c56c903b8ea337cf87b37a9d0e5e5a744347943a392dcecdd64159ddb47 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classCUDAWrappers_1_1SparseMatrix.html 9b8b63d3564be85403bd6cf49c6d059418d187a3b5053351fb4843428be01fe2 2 @@ -10503 +10503 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classCellAccessor.html 9e89028ad79233a943d09629a53fea22d096f525d319b61a0ad3ec24277f857a 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classCellAccessor.html 5128adcc69e4e7ae842ccf254f2733d9beb95300f5c066d4652dcfeafa486fdb 2 @@ -10513 +10513 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classChartManifold.html edb0750d3d8fa4c10c733cdb8c329251e51cdf857d0dbb1e8736ac89ff79a8ad 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classChartManifold.html 653cc9c6be5ff839722b0eb4e9cda3bbe74bd46c35b06ab23f37bb3562d1454b 2 @@ -10516 +10516 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classChunkSparseMatrix.html 6d8a614b44024b1d87323ed4b403f6d63976637ab4c0583b4010623777a1b6c7 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classChunkSparseMatrix.html 7681d718593e81c198cbd1f410a8fbf9dc0f7f5570ffbce3ee0e6f4241bea164 2 @@ -10532 +10532 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classChunkSparsityPattern.html da71d77b7403c2a1cc5badca022a20fbd30c17299208450216067c34873717bc 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classChunkSparsityPattern.html c5b20eb72b39aea93f1213e5f1fa3500d6c71f57a334138c90397cfb47e53529 2 @@ -10540 +10540 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classComponentMask.html 7de0278211786c3a43c6d6df3f1c952737b6d9399107e89237655c964d095672 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classComponentMask.html bffda4c0c8738e78ad81cb1204a07087efab58a80cab794852b189ae79e8e027 2 @@ -10545 +10545 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classCompositionManifold.html e93682095e741624cf7a9fc0f82199d1b321649e4029b96ee6b845dfc3a492bd 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classCompositionManifold.html e20ab928ec5fcd9760daf4cb8f9254b53c8d9294dade014ef8a6bba3ed52e4a1 2 @@ -10556 +10556 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classConvergenceTable.html 3fac39689584e2070a6af5b1917335d4f385860fe785837757fa5e736604df48 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classConvergenceTable.html ee123c828755b69b2095c30320e41d12db293a1f5372961c1d275a69050be2ba 2 @@ -10559 +10559 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classCylindricalManifold.html 73e541d3dc15af52e86928d731ab89b9af88c18e2b8645056d50bd0a12cc205c 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classCylindricalManifold.html 3968074e57a065b53e44f46bf13cfcb0a828373ad9f4daf632c5dcb88ae904f8 2 @@ -10588 +10588 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classDataPostprocessor.html fd8bf2dd7a6fdd514c950dec7a1566701cbeb524cfbe5a236a5b5c899e498cde 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classDataPostprocessor.html 7b02518e218fe72a9a324dee987e61db5c37e486840301d280bbd584f1a57385 2 @@ -10593 +10593 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classDataPostprocessorTensor.html f1b2d5f8c2a20111f69b3b89a7e46d4c45d8dd289ee2dbc4ef3dffb9f92ce17e 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classDataPostprocessorTensor.html b359737a1c87ce772bbc03d117544c82292315eb2b45c77dd9f0c8874e8474fa 2 @@ -10596 +10596 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classDataPostprocessorVector.html e3efeb7dd0598936441aa58ea010f9c1b7ca757d38fc0f510d4ea3cd587c27e2 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classDataPostprocessorVector.html 60ddd00a69f136cf2da90b3ddbb26a149b5a5ced68f61ecc38d0e0c7591fa411 2 @@ -10613 +10613 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classDerivativeApproximation_1_1internal_1_1SecondDerivative.html b36662d142b284a426f88405ed4f2d1a727df681e2967134c6e1ddc3f557fcda 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classDerivativeApproximation_1_1internal_1_1SecondDerivative.html 2678621a0fc4dbaf9bb3110d25b33060e01227b7de1f74814fc67d885155ed6e 2 @@ -10615 +10615 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classDerivativeApproximation_1_1internal_1_1ThirdDerivative.html 0acda2652d2c745f5794259a906e71a21e3fa2acc1a0aaa5bad6352c97e0084f 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classDerivativeApproximation_1_1internal_1_1ThirdDerivative.html 187970bb379bace7134777a00ce0803679c788056689b240419c2635d826ddb5 2 @@ -10617 +10617 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classDerivativeForm.html fbcaa5ed54e97a81b53e72baddc129f1f62796764aeef8f6fdf474c3ae1402e3 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classDerivativeForm.html 887cd5ccc8cc212fc19f11c85b7a38e59994c37eb6cf8a5338a986db6a9e8d3c 2 @@ -10623 +10623 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classDifferentiation_1_1AD_1_1CellLevelBase.html b10e651b639c64e38b1800c38c8f2d1817b37fd832ba57fdbff17f1cb62d1703 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classDifferentiation_1_1AD_1_1CellLevelBase.html 7423e0d0e71c85c600054be1f0358bf2b9b70439590f095794e74b981aa2229e 2 @@ -10626 +10626 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classDifferentiation_1_1AD_1_1EnergyFunctional.html dc9f517e681eaa9f272bb5e547311d013cb40f9af22885083e48743bd987b998 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classDifferentiation_1_1AD_1_1EnergyFunctional.html a93195f0887f9e87ac8966a9b7787a4502290129ae45216e79063cc5773418e4 2 @@ -10629 +10629 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classDifferentiation_1_1AD_1_1HelperBase.html dece1a12df3d70012362de169a0b2eae01a9b50ceeb859d66da86178a4137516 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classDifferentiation_1_1AD_1_1HelperBase.html 9f32b5d12b1331952ff7b108ec0c826f72eb4696885f9d190bb2b3906328299c 2 @@ -10632 +10632 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classDifferentiation_1_1AD_1_1PointLevelFunctionsBase.html 6e8562ead7025069fd10a9aba9f67b5378bcc984c825348e4229ae57848caa54 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classDifferentiation_1_1AD_1_1PointLevelFunctionsBase.html d07207991c3ed623a86aa467d881beab19196268e783baf06996261513f0344e 2 @@ -10635 +10635 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classDifferentiation_1_1AD_1_1ResidualLinearization.html aa57abb5dec5fec74b007d816b2607c7049004a5d3ed181f6a033ffbf1005b71 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classDifferentiation_1_1AD_1_1ResidualLinearization.html ef48435459521b39ed49039c9af355993617654c798f025661cfce7c5f49575b 2 @@ -10638 +10638 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classDifferentiation_1_1AD_1_1ScalarFunction.html 5f829c0a09268b949ee8d4bf26b197910b53d7aa8a9ca7792daea08d1d44e46c 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classDifferentiation_1_1AD_1_1ScalarFunction.html 8b57d9ef39590df35e63563b906f9e418625e2fe3035919d89737e53ff8703f3 2 @@ -10641 +10641 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classDifferentiation_1_1AD_1_1VectorFunction.html 06baab8356e9e2e6f9a6bdfe38e1363aae0e4af8fc30bad8be494c24fa6d26a1 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classDifferentiation_1_1AD_1_1VectorFunction.html 59d569dbe8f7fe913d56a045d7d8e79308d80460969ec50d626579431c563e60 2 @@ -10653 +10653 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classDiscreteTime.html 556765d073a7a3088b56e0a32a248e026e92e07a136e9ed1db450931fbfb0d6e 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classDiscreteTime.html dd0ac70b9b53d0336dde080afcf12e96683e9fb0c7d373af7a820f010f038e98 2 @@ -10664 +10664 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classDoFHandler.html 592598dd0433b3af4e2439d575fbcd1ada399efc15a9cc3fc679eb47c86f93d1 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classDoFHandler.html e93c9a08d9bbede725fd467c90b2740a1a296145ec375d85e57f36da51219c2b 2 @@ -10675 +10675 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classDynamicSparsityPattern.html 1f12cc568ab1ac70d576579b103cec3d5b96ae3db44105ea8ffc68d38f6c8b0d 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classDynamicSparsityPattern.html 7dd4c88fa846ea5ab831d2105a15ed6a27d2ecb5c97be9e29a6524e50ac0a59f 2 @@ -10682 +10682 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classEigenInverse.html d782a277da78defff483478e25dfbdb70257dc0a4aa37d59a056e72beb98288c 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classEigenInverse.html c7708ecba7b7d44dd12f4244b562217bd47f89aece4fb080e7d769a0ebdd9422 2 @@ -10685 +10685 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classEigenPower.html 8950569edcb16b952c2c5a0e3480be121d1ee9e0a9b0e67f7495d589f1557ae6 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classEigenPower.html 0a96debb61fe1512695bdc1b12bd6b24ed2892b99f4503e0c9a204a22bb5207c 2 @@ -10688 +10688 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classEllipticalManifold.html f5ee61cd6c0a770fe6934fb7c5ebe7957e53c4df1c26af9a5d1a34e6ce5b096f 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classEllipticalManifold.html b6f0ce479a3ccd1d04a29477d2bd370858166f6634e3dc198517d4dd405133d0 2 @@ -10696 +10696 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEEvaluation.html a8220c1f61aac62d4e7dcff882738e84dc8e4cf27d5e7ddb5196bba5ee796a8a 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEEvaluation.html d91098bc145eba1db5ad2a1cf7e1a1d37414435b5e50b626323851d7fbfb51bb 2 @@ -10698 +10698 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEEvaluationAccess.html 034566d4378889963268c3a272feb6277c6d89bec40b6c84fbf334e460c1bc28 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEEvaluationAccess.html a15a59c6ef8bbc64d5bc8874c3e27634f9c860faee83e574ac18e8c49eb4efba 2 @@ -10700 +10700 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEEvaluationAccess_3_011_00_011_00_01Number_00_01is__face_00_01VectorizedArrayType_01_4.html fb5565907dc49434315273df0a71244d3c36f217f6fe916367c6a75f35b0a1a3 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEEvaluationAccess_3_011_00_011_00_01Number_00_01is__face_00_01VectorizedArrayType_01_4.html ea7e927d6cf1f8ceacd9effb959eeea9281eb6c2e4c8e0641ce458cd85bcd848 2 @@ -10703 +10703 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEEvaluationAccess_3_01dim_00_011_00_01Number_00_01is__face_00_01VectorizedArrayType_01_4.html 09810af439e86392254c6761d9da3d5329f98c46c4bbc5c086254e4879d7c88b 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEEvaluationAccess_3_01dim_00_011_00_01Number_00_01is__face_00_01VectorizedArrayType_01_4.html 753b41f267761964287a908b9f2acdb3012599eaa1e2d83274c038df1df3d922 2 @@ -10706 +10706 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEEvaluationAccess_3_01dim_00_01dim_00_01Number_00_01is__face_00_01VectorizedArrayType_01_4.html 6254359e9660df8b335529d4f9ac30a15810208abfa9a9bb30dfdf3d262b239d 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEEvaluationAccess_3_01dim_00_01dim_00_01Number_00_01is__face_00_01VectorizedArrayType_01_4.html 52ac3df4ee610b121f105675d1302be73b31b25b73e49db440aca0d6337232b2 2 @@ -10710 +10710 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEEvaluationBase.html 8b6579555a9c6f5f6f460de98adddea31bf18968981b56c008db487627ec5313 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEEvaluationBase.html 38dcf916b54259b266b4c59614a265fe8ffa2cbdf687ffa53b1ae8935ac6a294 2 @@ -10713 +10713 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEEvaluationData.html beae7f98bb292f420e7bdeb2feab4050708f7772c0f6d5d9c83293a61bd7e642 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEEvaluationData.html 0c639d687a2e056958dba7b4db96e1e5126450bf1528a5854f4f0ed3e29cc4a2 2 @@ -10717 +10717 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEFaceEvaluation.html 686c4b7b7c6898bc54d66ca3c719ec35fbc4d5d6d4299a59b96530698dfa3240 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEFaceEvaluation.html ee9f31289e8b9c8eb18fceb4f2c2627d19bf132506a8b9233f7d044b8f01487c 2 @@ -10720 +10720 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEFaceValues.html 80d53e2ff6a5dd057ed532e857f201075b9580171aafda24818cb94b416ff443 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEFaceValues.html 999a0a20bf13cf656f3e78b44ce6b2c063255ed6737b30db0cf245f37583252c 2 @@ -10722 +10722 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEFaceValuesBase.html a99f966cd0a0be6440f2d480e32c1e4352da8385c375a357f558c079dbf362cd 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEFaceValuesBase.html 2c21195fab00628db0f691071bc13c6eb458de00a3a651e0f0809a5e7fcceda9 2 @@ -10726 +10726 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEInterfaceValues.html c6f4cf2a7b272c1547360822225b356440003f0d9e05a3fa8cce3895b4e13d08 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEInterfaceValues.html 2c4929d7fe56bed3fdd68707a8a0ef1440bfc8ba63bfb46f1ace29639327d095 2 @@ -10732 +10732 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEInterfaceViews_1_1Scalar.html ed4a016526d8adf5b0a7c6c458bf93e7f4d4624d4663eb635690431c02c9931b 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEInterfaceViews_1_1Scalar.html beda6af5032121968ebe44ca53fcd927b924bab531221b465813ebb429dda07c 2 @@ -10735 +10735 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEInterfaceViews_1_1Vector.html 27f3464be97f81e92e89cf04e30b4aa4ecfd3349356c0da78e68ce60687b806a 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEInterfaceViews_1_1Vector.html 5e13c29f3de8f7158a4a3ddf30242cd2ac8cdbe2171bbf9a0fb1c37b5a9fae73 2 @@ -10740 +10740 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFESeries_1_1Fourier.html d37d060aab58084153fe0af72048c09b49dd5c29a230839c7bb1d7da5402beab 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFESeries_1_1Fourier.html 668a522d9cabcf50ae8ec53b2117fb77a80c665fca32f51ee092c63f02654a76 2 @@ -10743 +10743 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFESeries_1_1Legendre.html c200f223d6a9d25821aa84034adc9b0e8bd4002a8d318c3253b5f8a1e524448e 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFESeries_1_1Legendre.html f7eec706b4b6e19df99770d5a213d04c6fb3a623b159a30c7ec94a2ea14922ec 2 @@ -10746 +10746 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFESubfaceValues.html a76b7524d8119146e02e3e99bba3c75b87e45f6b5009df6de3d83a6daaa8f506 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFESubfaceValues.html dce30a2901a7d1e671acbc3e77dda3cf0492067efa2439dc190a033ecc744632 2 @@ -10749 +10749 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFESystem.html 3385a95a5e3f2c2fb57bdbf132466ee9a7a51b1b9ba668105f7165b4c531c44e 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFESystem.html 9828f56926be0043fb157cab9741dc2b79a4d88087d2bc92615611e57477b218 2 @@ -10761 +10761 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEValues.html eb45312e26d984b9a1ff9545bfb5199f3cfcb157d34448e8d19af088e6ad4187 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEValues.html a81685a0984c7faa4dda046a9751143dbbef29b110b57752dfd13d4a3ba988e6 2 @@ -10763 +10763 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEValuesBase.html 7538ef082181106913997eff29e7f77c5c059a0022c2811e65a90559988844af 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEValuesBase.html fce86f6a5677f77ac1db6193f28ad2041231ac85f61e592de5cf52ce4c23c9e2 2 @@ -10768 +10768 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEValuesViews_1_1Scalar.html 9525b1c5f303ea49728c5b31bce1028b84e390c1f6a52f832895f1f656fb760a 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEValuesViews_1_1Scalar.html e3f47e33fe5c254fc6566f38560081d717dd4540ccb26bee4de717666ff69959 2 @@ -10772 +10772 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEValuesViews_1_1SymmetricTensor_3_012_00_01dim_00_01spacedim_01_4.html ba3d3ba6fc7df0338c17633a24d3fa5b2d7ae6a5f04cdd4a57cb6393129e856d 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEValuesViews_1_1SymmetricTensor_3_012_00_01dim_00_01spacedim_01_4.html 28279a2298af15e54bb88a8130ec592ecd9ba5bf9d6ac8ea403c440feaef5158 2 @@ -10775 +10775 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEValuesViews_1_1Tensor_3_012_00_01dim_00_01spacedim_01_4.html 49e0fcbbc896441d22bdb0edb56184f2929aae11c29b75316d87bcfc4cf918d6 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEValuesViews_1_1Tensor_3_012_00_01dim_00_01spacedim_01_4.html ee0c8c4b6712e3181eb206868f1ee3400b9b766772cf88fd5c573db529905a73 2 @@ -10777 +10777 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEValuesViews_1_1Vector.html 3a7eb0d1ab2bbe8dbed8dcafae6e45e2472abc38be60536c2565bb0443e94b3d 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEValuesViews_1_1Vector.html 14ec23475abaac6e04a6d3bae520170d2b903a14d35aeced8dfc4e920e310525 2 @@ -10781 +10781 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__ABF.html 2da1707157e4d4fd876a7ee724e367f1917f419d5ad438a343020eeac6172d80 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__ABF.html 4161b072a29395dc207d9602dc3f869b52e1c6055a94fbbb5f34d4475ca7f94d 2 @@ -10787 +10787 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__BDM.html 99df2887ed8f784af3aa0e43e202e1c3dbd25de745a38bee4d3e9f2d6c6bd400 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__BDM.html 5c3dc5f7ef46e9e23043de34e09cfa3acc44f7c25469f3a72bc81669a2544086 2 @@ -10790 +10790 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__BernardiRaugel.html 39d5c43f22305f0a5422446b3281de768be8b813c1698c16058e220bb61019fd 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__BernardiRaugel.html d0b8c036c3fd93d6a9cb75a00026e238c923f45eeb9e79fd71e2f1506fa95b3d 2 @@ -10793 +10793 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__Bernstein.html ba7aa63e8b9543ce20f67dc024465caf74d1191c5343b7ffeffa65219ed6f662 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__Bernstein.html 9ef02cc657f69183a9a876496e4515721096b7de11764e60755854e1988a20df 2 @@ -10796 +10796 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__DGBDM.html ac8007649b15713626bf7e0d8b12568f7b0b9cfbdbc488a797a71a5275d2360c 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__DGBDM.html 5a6e4ca11a87476d0e9b249d9476bad2e299bd6c2f2992b2b30d0c5c917aee50 2 @@ -10799 +10799 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__DGNedelec.html 102b9bfd2dce6b73ac370b966fe214b801a70cf72397b82748e84cabf5df1d33 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__DGNedelec.html 5ebd2ddd3345a1145504e1378507ccbe9a2251f1d1fce6c5fe69a54ef11f5ce4 2 @@ -10802 +10802 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__DGP.html 3ce175327a666c198149eb8b84775ae794c189a6b6f6b21a3b86c828c562dc3b 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__DGP.html 1a9d12d3f0e5b7fcca754577b0149ec33db6089b1d634826a2457d96d5f5c06a 2 @@ -10804 +10804 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__DGPMonomial.html 78816881680f03466aa43bf0adf1d52a9d5c3b8b28cc536b3b2d768daa4d07d1 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__DGPMonomial.html 02bbd262c6e0c3067efde1b0273ea94d4a1a074477f07de151ffd2bb48c2ccae 2 @@ -10807 +10807 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__DGPNonparametric.html ff9ee231eb011e189f584e94bbc7178bbe6d8306f99e437ee6236024799a00cd 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__DGPNonparametric.html 59cf7f0c6c1383db63d098e8e5ff8f32cfb788df0a21bcfc60222c9ad0a0883f 2 @@ -10811 +10811 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__DGQ.html 49ce1d5fe44dfe8f09cdb0df729e37965b3d8ea2cc3d0e98e4208380f9df9863 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__DGQ.html 5cc9c60ce4bfd719c110e750fbe921257321b1ff5d6839e9a29d0476f4076607 2 @@ -10813 +10813 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__DGQArbitraryNodes.html 6111b34936ecb7e6a6621e7fb7b6253f88f0a246d3f48e324061aa160bc08ea1 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__DGQArbitraryNodes.html 7a5b7bdd42c6dbe36a8158231dbf03086b4504653de945a79393de54cf86041e 2 @@ -10816 +10816 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__DGQHermite.html 6b3db8923a5c0b15215960ecf36daac0fe2b842bbe37ad58de73860676d6683e 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__DGQHermite.html 1dd2713c32bdc1167008b3aa8f854e79ec9bc5b43082d3aec3fbfee15bb296e9 2 @@ -10819 +10819 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__DGQLegendre.html 5dcfc6fe87daf5fde0b2f08d1906d007823974995c2762be4fbc8c5c82321153 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__DGQLegendre.html 54295453722561f5deb3b1514285bd31686068c9dcdf2c9a70dff56164236799 2 @@ -10823 +10823 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__DGRaviartThomas.html 40136a68302e003a39e79e315cf49dd00b8c4523691c45d0b2bec3b76a704347 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__DGRaviartThomas.html beab997d261aadcead72786e4c45ebae885d529755fbfe777c41ba84277220e5 2 @@ -10826 +10826 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__DGVector.html c4b67787d8e00ccf2a89b87adf6237fe50afdb6b617163b1c1be2ffbb7d123d9 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__DGVector.html d17be38370de96690ac1b47f666b5191696bca909f32846bf040b49176cd3751 2 @@ -10832 +10832 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__Enriched.html 456ae5496a0e92811423fb4204d53efbc87a9fa3f3f18aa5cd3dc712724b02bc 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__Enriched.html 1d2470d74cf488671984244157b58fc70f1fe8c428ebdf94bdac3e3f42bf478e 2 @@ -10838 +10838 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__FaceP.html 3a8e65c1306a34ce2bafff35d11965cc4f7110dff1808d552a48aef27e679c3b 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__FaceP.html f8ce6814d8093aacb2b15e33a03396cf31491c0795006d560037173ae3e54e48 2 @@ -10840 +10840 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__FaceP_3_011_00_01spacedim_01_4.html d8f143eb8c44f077986011efb21dc9ba9af409d9e060ba1d6381e3e1a39e9f04 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__FaceP_3_011_00_01spacedim_01_4.html b8280ba17ccf9e97b540835619b83dbcc2f3faec5dce3e6fc92c2bf339809ab2 2 @@ -10844 +10844 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__FaceQ.html 6b7ed43544ebe7bb295dbcac45e9d362e0928c1b0a3db0c6f50dfd83395f1d36 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__FaceQ.html 2d8a33515091adca527508d71fc37c39c68893805a415c4d68e87a86aef9267f 2 @@ -10846 +10846 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__FaceQ_3_011_00_01spacedim_01_4.html eb0f80529814baa206fbd113297b46d01c6aa15f7f5d73384bd556c8f7ac5df6 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__FaceQ_3_011_00_01spacedim_01_4.html 55c560a91c3d48a3da4a61d248846554595214a917be9373d2d5bf045be61104 2 @@ -10850 +10850 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__Nedelec.html 186229f9cd7d6f6ec58b911555717422e6d5ab314c4fb15c4935dfc9b9b1acc6 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__Nedelec.html 9fccc57fb55902ae597aed040d113f61688d4d27c9dc01161bfe891ca5cba56a 2 @@ -10852 +10852 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__NedelecSZ.html 967056251309978fbc6b74160821d37912c613b79fefcdbf868728a5ef038624 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__NedelecSZ.html 2fbbc9acf8ad5572f9347cbf639ddfbf357f4fdf683e98667bacbb63095f9af5 2 @@ -10854 +10854 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__NedelecSZ_1_1InternalData.html 33001c60d9de6c5e354edd4611fd189096400090767e7497dd064aac4c7ffc8a 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__NedelecSZ_1_1InternalData.html 9d8be9c4222cbf12592d732dd613b2bb9f91957c2f22241a8e3113ddc9bdbf0d 2 @@ -10859 +10859 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__Nothing.html ad7368388d051fa96e696a01b585c2f10db0957644b9f7630a328746f63c9ff6 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__Nothing.html 80f9f9502366805b14501079037cd4f0e952e0cc340a1ef16edb5b96938f23ad 2 @@ -10863 +10863 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__P1NC.html 3b602f4301166538ac3906bd612ed828bc379fc86a15650ef3bee1b729bc2812 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__P1NC.html 8f081949f3c4c81f3d08295b35a89f0fc6f78f78b59d8c0e099a51e66ab4963b 2 @@ -10866 +10866 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__Poly.html 7cd0bd0197a7b5eafeb0628d90cff1e46336170345f1e014b37656f51c0dc202 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__Poly.html 52b237443052163d8c420747f7597db47d6489c82937b98b69ea6dd1ac2a9111 2 @@ -10868 +10868 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__PolyFace.html 883bb95c4e6643d585a3f587fd3d30a7da99596fd42b350da5f9537cbd8a666b 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__PolyFace.html 4fe0ec3a0f9a57e13bd5ba3154aa6c6828179bc524461bffea05c1c3682e978b 2 @@ -10874 +10874 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__PolyTensor.html 6a79b138d52950df00543471f3d9da112fc8e18fbd41b8c7ea5e40168ea646d7 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__PolyTensor.html 17d8d27526b49495b0dcace1c8d42f86b1b1457d72df6282b620e2aced2c8da0 2 @@ -10884 +10884 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__PyramidDGP.html 8222382fdf5a15bae51ebc9cdd978b7aa85ea1c44af2f4aaa5fcd33f91ef787e 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__PyramidDGP.html 812d47249b19b29a59a17100b30d4c89112ae26c933b04e8d8eaa26020e65516 2 @@ -10887 +10887 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__PyramidP.html 6f847daba20ff03d069ee0ad3340fd5b69f70199b7caa161ff6aad121da2ab57 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__PyramidP.html a4751f2766abd99c18e9cc6c894a5c95d4e84260bd834b6f1ebb2b6a0274d4d4 2 @@ -10890 +10890 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__PyramidPoly.html 7309cf6e246184ce62f49b28a1e752babbb3b01e97ff7d38026234d495e61a69 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__PyramidPoly.html 8a863123d9bc8b80d55dd8d3771ec6389e50d0d205924feacc778036e1d2c952 2 @@ -10893 +10893 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__Q.html 86947134f5aae163779bf7f4a0f1a9fa7664463710850c2f780e7ee5d6e23981 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__Q.html 5787ee78b0943dd51eaa7be9a82941b07e955cf1bb0726167f2eeb3dc95b1106 2 @@ -10895 +10895 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__Q__Base.html a247ae2c3cd42477aa5c83b489a563f0378f49579965ebf52d029d54e4eaa858 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__Q__Base.html 03edb55826232eb0658546eac79ab9f0e9c79b4df7302980d60302aede3ce90f 2 @@ -10898 +10898 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__Q__Bubbles.html 78a5ff3af0517bf12b8e9e55ae61e974d460020669894d157da0cdbd2aa72bcf 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__Q__Bubbles.html f86839aa155291fe3dfe1cdda3bd2d3d124bb0c49c7bc6c2d96892ebe74ab52b 2 @@ -10901 +10901 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__Q__DG0.html 58149d4355d1d9841014101cb665282626dd7783dd0236916eaeb76172119ef1 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__Q__DG0.html 8c4ce11beb09d54ac53990da166ee26a5d505c2943c8bd6b02960dc39f421934 2 @@ -10904 +10904 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__Q__Hierarchical.html 3b9037ae8b5217a38979e98e063dd9134cdacbb54a098d9986889a7387e6f159 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__Q__Hierarchical.html 68cb5c9e6dd26040ad3269b969895799b7120f57a50dd43768d6be5dffc4839c 2 @@ -10908 +10908 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__Q__iso__Q1.html 0b646aefbdb55cc34bae2fc0e9c7fed748d33f8346769e5bcb0604720f46b36e 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__Q__iso__Q1.html 4cb57d1b8c38646eaf2409715fb0673d872953321749347b9654a9b21a74cf83 2 @@ -10911 +10911 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__RT__Bubbles.html 3bc93eb17db3be6601fe2efa1885f3cfdf4bf42cd33099a87efba042f33aee78 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__RT__Bubbles.html 052e9e2bb01f604abcb1f860edf9dc4bb11f2e4e472e573d9a63187e2999b6f8 2 @@ -10914 +10914 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__RannacherTurek.html d12b7594ad58ff6f20d7cb3c23eec03df4eb5254e979e46af4602811f16f8de5 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__RannacherTurek.html eed287e4b02a75b2e98dc6da33feb74fe1215c75deacaecc020d30b84e3db633 2 @@ -10917 +10917 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__RaviartThomas.html d2d9bb436385a7cc5def0b0a391de93e5732ed25756f102a9d5817a799f4dd0a 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__RaviartThomas.html 3cdf3053b0d3480dd2dc28bb28d372a86ef3f1425795859163f0b36bf66a6118 2 @@ -10919 +10919 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__RaviartThomasNodal.html a1b53cd9769a2d7471af2dc3362f96fb4d1605ee5c70a023755eb4bdc4239d7f 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__RaviartThomasNodal.html 20e241b749582217e8af9edee7fe19130a122ee1850c751cc64720367df2dc6e 2 @@ -10923 +10923 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__SimplexDGP.html 19e9084072b328db38f83e489dfefde58fbdf126f4895f7c2de0e93cbfa63020 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__SimplexDGP.html 69e197b8f3c487e022a20f14c06621882eeb4a0b760d68ccc185307b9ce4d3ce 2 @@ -10926 +10926 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__SimplexP.html 6b5328c0bc8f1ae15e016eabb8b1e1ddc61b304ae9e5a3fdaab62e120743ad62 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__SimplexP.html da05d57d0e41e0c5933d7f6e6e8c9b6d6661c5d712b76b9de76cb68334bfe951 2 @@ -10928 +10928 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__SimplexP__Bubbles.html 7508014988cdb58b3375b444d20b3680155e41a482f9db02c492b064c5ae2a34 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__SimplexP__Bubbles.html 4e16bf4d1eef288bbec5375adb0702a0046fd0458a2d43f46ecedb8341b25e90 2 @@ -10932 +10932 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__SimplexPoly.html d9ba2cec38d2f7e2487e6c40953177966b919485b34ece8375b4afe06a4dcb61 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__SimplexPoly.html 9fdf539714715762479c9c75135f6ce59ec841c08be8cfaef710b6a7aeb13465 2 @@ -10935 +10935 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__TraceQ.html fd7ce89e74ad83ddb78d9579d381bc760ab803318f28687b219ce4dcb59d3c72 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__TraceQ.html 2ef5de9cc35e8265a713cad5fc9c4cc8d68deed657a6a7881e97e0f84055642e 2 @@ -10937 +10937 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__TraceQ_3_011_00_01spacedim_01_4.html 0194292e004766af468aa698eccc9a966657eb09b6cbf77e1c85e4e82e06a9e0 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__TraceQ_3_011_00_01spacedim_01_4.html 5f0b86396fa2015a495357bfd5c885c4fa4c097e73c4d1c66121d7075f743f77 2 @@ -10941 +10941 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__WedgeDGP.html 1945cbeadf6b20632fb1214050bc343e33b23f2758ececc5b1f7102cc992a4c3 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__WedgeDGP.html 4fdcbc58889f56dedca9e699034dd79b8cb33fc0efd9f36eb2499d839597e574 2 @@ -10944 +10944 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__WedgeP.html b94fda4ef244418ce8c993d1f363cbd025570a220aec128adbb9f7fba5cfcec2 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__WedgeP.html 0d39447313edc0b9dcdfe979e1f019dfa414ea65cea1ab4b22c0f2ce86545d85 2 @@ -10947 +10947 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__WedgePoly.html 35b80f015c5756f4e7ed0289d43db5bd9bd2b498390d233561597e1d2812d6a1 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__WedgePoly.html 61bc46b85a86b25c7750cc6917bd43a4eb88f7d52d3c715364c6d3b8d6e80c51 2 @@ -10959 +10959 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFiniteElement.html f4cc18c8eabd591b744eeaf219af639cdd050e80d4473281defd470e62248d9e 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFiniteElement.html 68e2f832c0c752788a5302030d50f041ba17eeb303cf1073e0d986c665a89796 2 @@ -10961 +10961 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFiniteElementData.html 5d4f259951672e14c74079a4209c49c3115285d44ce634b3c816d80bf89135dd 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFiniteElementData.html a0a14bc3b62b8d6247e990c8837b666224d6903df725e97d47110f7769fc1633 2 @@ -10968 +10968 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFiniteSizeHistory.html be31bc81ea894c6d006727d1e98c3868b20750d8bcd5f53d86d934cc5171823d 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFiniteSizeHistory.html 0ec6a30d52ddc66cfb307cd3545674f86f31e8fd041b4a73d2a7aca621c8d0d7 2 @@ -10970 +10970 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFlatManifold.html 78462294a1e5476bd46170447c2aa18ffdebf3cf52015654de1370a36c7ed064 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFlatManifold.html 6a9cbec60432d964c1f6064e6f7cd8962d874da09c2775a6fe611637fa7bd07a 2 @@ -10973 +10973 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFullMatrix.html 0042caf0f66502d4375f7ac23d49df5fb7b64b0e0c7727e6c86f0fed9d075c1a 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFullMatrix.html 34c1dbcb2bf8620fc6b0b0280259b8fd6ff33bc9c2392f87ee249d1cd6e220d1 2 @@ -10977 +10977 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFunction.html db9b7e9ed49e2604af8edf929e4eb27a57ff6d47d9bba4523ef503d7bf325e3d 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFunction.html 1427012fc8ae493c5414c205a100cf21a73408c6390e9116a5833e687c317ecd 2 @@ -10979 +10979 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFunctionDerivative.html a7858fc9cda7e75961246c652e799a44be087ad76e8509921798dac4887dc1eb 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFunctionDerivative.html 973a4c0394537700eafe82ea72e08114cf6c21a4b3f50701bf2a16cc2ce043ff 2 @@ -10985 +10985 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFunctionManifold.html e61d22fbe49549e9f2cc7bef7245f2e573b5b5b744935c409653b3f5301fd4f3 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFunctionManifold.html 84fe3a09910d7876de79045c093f70aa2ab14d5c98061bf54cdeda8430316f00 2 @@ -10988 +10988 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFunctionParser.html 003bef4caf95e82e93efac44f8292b1326cfdb802cb381a9e1a44688b2491cee 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFunctionParser.html c191abdd8fef37a6fc09b560c9b865a3b1e784772151ce386cb61c56dbf4b188 2 @@ -11004 +11004 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFunctions_1_1CoordinateRestriction.html 57ea3851a70406c24d3847f7004958d9dd3d810ed2f8a636160f6325ef8c8be8 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFunctions_1_1CoordinateRestriction.html aa2caf3c4800ea80d7ae596c51d3a9706d5a4dde2a3f8fdddc25f99d1a7838eb 2 @@ -11016 +11016 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFunctions_1_1CutOffFunctionC1.html 8fddc8fd6f8576e0365351a6286aab05791e7718dbaa924687084c73bf5b7b0d 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFunctions_1_1CutOffFunctionC1.html 7db5cba1126c45c5dd25df8c1dd51e986294df968de32cfa30e8a1cff9f0346a 2 @@ -11019 +11019 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFunctions_1_1CutOffFunctionCinfty.html 357b69de32e8e79eb9e00b4babd091b3a19ae1c5322da5451f960c6c27c81510 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFunctions_1_1CutOffFunctionCinfty.html ee8dca78bebf8d1950c4fb8818b4fa2d50a7e0e6294fe2dd3a20e2584a50b6a6 2 @@ -11022 +11022 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFunctions_1_1CutOffFunctionLinfty.html 316b1951df733608162e21b26d1c5e7504038575aa03757b38d812ea298c848b 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFunctions_1_1CutOffFunctionLinfty.html 4355aef6176017c737ada88a71727d332b941737409f6d652a7f784cd3a84208 2 @@ -11040 +11040 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFunctions_1_1FourierCosineFunction.html fd8e9a232a9cf3d623bb2f59196770d3e204293bd31ae82e4c0995594f2ea146 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFunctions_1_1FourierCosineFunction.html ba073e50be325074888a11481499117bf2e3b948c2ee524c4d84525a77cbd135 2 @@ -11043 +11043 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFunctions_1_1FourierCosineSum.html b166400da5b356220e5bd797c93b5f089febbc8e4468f3c652133e5bdf06a1c1 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFunctions_1_1FourierCosineSum.html 2bcb0adbfce80f32331ef3136c145c5d59703708cc32e31bdba2fcc99e62b56a 2 @@ -11046 +11046 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFunctions_1_1FourierSineFunction.html 9380c46611e55d26c32803f79d8310781861171ca8cd18f50d6671419364be45 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFunctions_1_1FourierSineFunction.html de187f2d28afa6528161d2a45997b368e48c33ab9415e5c22a025326122bf4bd 2 @@ -11049 +11049 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFunctions_1_1FourierSineSum.html c64644b714e23f4ea9c279d027ff1c4ccdbbcf0a55ff83f65839ac6d58e58ef9 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFunctions_1_1FourierSineSum.html b8e8be311c7a71af5263c9fc46ce1904a2ff893759c940e68a6627e090fff53b 2 @@ -11058 +11058 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFunctions_1_1InterpolatedTensorProductGridData.html c0304dff03daf76c2cfa6adfdb23b42700348d676c43d9e8b14fdab1cfbfe3a0 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFunctions_1_1InterpolatedTensorProductGridData.html a0faa83910996cae76fa7506079d01d2782809ac055995173f23e0f50dfb3204 2 @@ -11061 +11061 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFunctions_1_1InterpolatedUniformGridData.html d536d04b62b46257cd5f9b90fa383e4e8924b3b89789dc4ed3329170578405f0 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFunctions_1_1InterpolatedUniformGridData.html 0bba6a68bb9648cb0c295d3ad96ebbd91bb424c674a3292c7aad2429679df248 2 @@ -11070 +11070 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFunctions_1_1LSingularityFunction.html 8508bedc15444fc65e2ca3fb82635c6992ac101117f64db2c960abf8f4bc0ca2 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFunctions_1_1LSingularityFunction.html ae0232d94e78639f06cf30b43deacdd64b9aaf072113f04abe0f75741e1b1b43 2 @@ -11076 +11076 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFunctions_1_1Monomial.html 37c2232616ee604dd83070a6f6430490dc12bdb2f31f6b9e413cdebe4980c6dc 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFunctions_1_1Monomial.html 62932ac48f43810319d3ba37ef97599f4b49facf136427b850ce165f8b264468 2 @@ -11079 +11079 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFunctions_1_1ParsedFunction.html 5437dac0a73a3b483d0f95c162b1a2ce34918e08de04af4d86e5fe6f453f00a8 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFunctions_1_1ParsedFunction.html 1cb439bfb545a6f2c21e6335933885e9f3be23b7eaa80d98a13ff6d362bbe185 2 @@ -11085 +11085 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFunctions_1_1PointRestriction.html a4c765eb3da4591beae2034f0791ac17a85677c639dd6bd17aad1dce7835e1bf 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFunctions_1_1PointRestriction.html f3daf524a4a98318225b9135863f4b20b283f41aa96c37d1701994fe4d0f0a13 2 @@ -11091 +11091 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFunctions_1_1Polynomial.html 379f931ddf921676d868212eb096c9f00856119590cbe5869865b2f50327347e 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFunctions_1_1Polynomial.html 76d3a2ad85c65829c5d3539bfc991eee78f5935be977703946c1c3e93d9be2e5 2 @@ -11097 +11097 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Ellipsoid.html f572ce0c20a073ec6927a70ee86125c2aa8d5e8575da79a794093906aa542f54 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Ellipsoid.html 50073cbd6346908ab08ff6aeb4ba54892611171a3bead9373f15e7a779879b51 2 @@ -11100 +11100 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Plane.html c24352f9f8379f9b3a9b844f5972addd1876d72cf7042a73d43568390c01a7ce 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Plane.html a293b23f40f75a453c400f57422ac049efd40a5c5c7b59f570c84b0a2e5f2a2a 2 @@ -11103 +11103 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Rectangle.html ba472ad3f124e200bb6c66125a938eb140f0134c3e931eca2aa7d3eac6952764 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Rectangle.html a5d874802041bd3b553526e651539e53b907b0140c3f797b62f97337962dedc6 2 @@ -11106 +11106 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Sphere.html 85c5e7c25273c638639dcad45b02461e2d0afa4b43a4e5839128fdc2396c9b1b 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Sphere.html 12c9502adbe5e676f9c2a28af96c7f64770dbcdde9c6e59c8d4ae79816f51975 2 @@ -11109 +11109 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1ZalesakDisk.html b688b2e60c319a3147dd5f71c310fbbe98de277f44a45a4155dcfd1da822e605 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1ZalesakDisk.html b6e42f6411d2bdc838dcd30c7bf365edb3e8e723c9715e5539bd0aa5f79e643c 2 @@ -11118 +11118 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFunctions_1_1Spherical.html e67ae085d546409cab31f0ed461b6e45363a16eb53a3368eacec1d60aebfab8a 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFunctions_1_1Spherical.html 324cde5863931e28795f0b9aa07a7376cde8d71480db20fc78f9dbc92ea20519 2 @@ -11127 +11127 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFunctions_1_1StokesLSingularity.html c8e29931a3f51c26b559fd58788bfaf90ae561da89ce11bcfa8853bf613450f1 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFunctions_1_1StokesLSingularity.html cddc1af746f9a2abbe49606ac7b178321b210cd59057a93914ff22e150b590e7 2 @@ -11164 +11164 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classGridIn.html b6815633095d6b958605cce2f919da5f5af21a40b43084e2e48c4f1cc7c51aad 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classGridIn.html 5814acf48ae01f084e5ee55db675afffb1ad21e0f5a6b99a2206314a6bf1a84c 2 @@ -11204 +11204 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classHouseholder.html 76390f6d8c914ca08758bc94c8a945049c1e6772aa145e6fee00ce0b62b2e507 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classHouseholder.html 597f9b2801ff836c0d1bc83059caaf3e5b793f05398a246d4862d5a7c1034f62 2 @@ -11207 +11207 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classIdentityMatrix.html 20215eea36f09cb2f0cf6f5ff922c19ebb28e90c2802dd7fa4133bac58241811 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classIdentityMatrix.html 8f29c712312da541b68fbd3e7c7d19af0f5ca71fe4425f9a3e9842c2bdaab427 2 @@ -11209 +11209 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classImplicitQR.html 97b28aede7854d51410fa710c1b5495c085ba4c88d0ef50362ca5fd46c121458 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classImplicitQR.html e6497b7b42f99e96f239cb4625f39f5e7ec84fe21d41f27f58d42f28f9ec11f5 2 @@ -11212 +11212 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classIndexSet.html 37e78098a4413e6b475f2f0cd9e6cf05ed2234f0c0fd90797e0a2ae93feb6190 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classIndexSet.html f8a31973c86b6681f6956d7b1a86e873844e3de390a3e1816f35c7b22e5ac502 2 @@ -11222 +11222 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classIntegratedLegendreSZ.html 4b3ec9ddb8a71f62d87adf421548cead1ba9132441e75b088c0422b46d644e72 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classIntegratedLegendreSZ.html 76193e9d152d6ab77392292580921563284046de20792c7f87709fcbd33226da 2 @@ -11264 +11264 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classKellyErrorEstimator.html 78dd320bf57e1ac6086ce7d4b488072f355410feacb3cf6ce8a32958b06daea6 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classKellyErrorEstimator.html e476a54ceefcf384df99019d664525b9f739b081e34bda0b1e94ae3c721bfda7 2 @@ -11266 +11266 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classKellyErrorEstimator_3_011_00_01spacedim_01_4.html 6af67401a820d8d75c5159be46d9d97a570783c93c0e1b9c202e6f497e56bdc4 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classKellyErrorEstimator_3_011_00_01spacedim_01_4.html 0cc687143696d7eee778a876ec05330e9892e4e265d9384c2c5fb79ea25d15ef 2 @@ -11277 +11277 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classLAPACKFullMatrix.html 3efde37325bbced86736b6ecc9744ba0a9fda6c22e2cffa1d2ac0be9cc7d2a73 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classLAPACKFullMatrix.html c9b60679b6e0bb1e370ddbe7f9b0b4df0b7e6872e3f6b7383e20e9388f8224a4 2 @@ -11281 +11281 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classLinearAlgebra_1_1CUDAWrappers_1_1Vector.html 9ef7a8e52b7d3e49adcc7b87e08c31298b05a3966b7bfa06210a2f886ecabb2e 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classLinearAlgebra_1_1CUDAWrappers_1_1Vector.html eca2a76b47d86a7b2fb662ba33f6011bd948e62ffc188810adde7b02df83fce1 2 @@ -11290 +11290 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classLinearAlgebra_1_1ReadWriteVector.html 810ac0037b17a51e5dd5a3bfccac3be3f3b7ca04488a082d833674c2ab2af422 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classLinearAlgebra_1_1ReadWriteVector.html f6d1e21c499a891b72c03fe9f0f8f9b86c354b3db4d76aeebd156a9fe87fec08 2 @@ -11307 +11307 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classLinearAlgebra_1_1distributed_1_1BlockVector.html 4a14192a905f93b13fa00538d541bf97a72e263065700ac8501cad2fe97fa011 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classLinearAlgebra_1_1distributed_1_1BlockVector.html 8c79bed5e3ee6b0694a43841b638b1d4c009cbd46ab32065b8bfc33558139d86 2 @@ -11310 +11310 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classLinearAlgebra_1_1distributed_1_1Vector.html 8a782e43b6c0f3077c22be0fb1a6c61c1ba16316853f0c69e4f8b953972c7b0b 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classLinearAlgebra_1_1distributed_1_1Vector.html 38ff348f55dbcdbb9600db7d05b09d2df513713fdda2a3ddf3d71e52b37b60e5 2 @@ -11316 +11316 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classLinearOperator.html 1f8ae628e50055f16ab9224cb2b953a791c109a8eb1033daf6126f3321c82285 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classLinearOperator.html 6643ed4162a905b2be40f3e41f5fb380332ddc0ad7f445e4979d02d7011df03d 2 @@ -11434 +11434 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classManifold.html fb7aa289c8de01ef26b14c8a408d67e4d2942d03f8c5fb03e1d1b9c99148ca33 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classManifold.html dad41b27bf075962cc6fe15c787d445cc98ee6e4e2ed1e17d0841dbd8ca721d1 2 @@ -11437 +11437 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classMapping.html db68289b886ac9b84be3c8aaedfd718b2fae83a381f0f9ebeddfb8a3adb40fa7 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classMapping.html 63f3dadde9f9b566e94c205ca4ad6da12dd5e312437118e5fbd321067e8e8d52 2 @@ -11439 +11439 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classMappingC1.html efb478147dfecb6888dbdd90168429365aee665b2f18cd2af31eafdf31e6bdca 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classMappingC1.html 380ba641c2ec85e41bd9d97b4f7b4bd83b10f7768d0623381233859a605e4169 2 @@ -11442 +11442 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classMappingCartesian.html cc1861b919f65db6648dc357d5c9b6cac02767023e69c20e9519103781890070 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classMappingCartesian.html 5b1f393cc8f18402c3e94d5073b131bd51ec8a3aa11bc2a095f89765c4952c67 2 @@ -11448 +11448 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classMappingFE.html d4acbc51288494f4b3d4ce8a85b360d2be6b86d67a0086f7251fa731f7689754 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classMappingFE.html ea52f5728160095f39109fb2fe563582864eb785f0b9886d0ebf0cc22f8108ad 2 @@ -11450 +11450 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classMappingFEField.html 80f91a91aaf5b8b906ceee00b5c4792b3e78640c0490d0daa56a5818a10de237 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classMappingFEField.html 9b3297cf187a50d623c04f87cb3a61ce19191773cd79570f81d97ab0d6bb62df 2 @@ -11452 +11452 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classMappingFEField_1_1InternalData.html d0ff134e480e376f272d6973305a3deb718e98a3be0f93d4e150f3f4c57464d4 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classMappingFEField_1_1InternalData.html 67d3893fb7ab3738e617b934553849b6a54c5d9fe1f23e49332d7629c753346a 2 @@ -11456 +11456 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classMappingFE_1_1InternalData.html 40b43841361da085f87281e9454ba4b2241e3f0316154206437a7dc2900ca3c4 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classMappingFE_1_1InternalData.html 4408f06dac6b36f0d38afb2c02ed50362db9ac328902461b1416da169063200f 2 @@ -11460 +11460 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classMappingManifold.html 29496bfed2e1b0ae76ba7e57b778170e52ab278092e2a50c1e05cd3f5ab9aacc 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classMappingManifold.html 02b39c9e50a3578f5f1398eed97d647e158db066ba59d64f7eb830d65cdcb0c8 2 @@ -11462 +11462 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classMappingManifold_1_1InternalData.html 868ed1925a362a12921d76719e0b5850e54fe317bb2535aaa2213afbc43f7f52 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classMappingManifold_1_1InternalData.html 87e88c9775d9cd77622efbbc3f9637fb5eb0eeb7753fcf84db72befa2c7765e4 2 @@ -11467 +11467 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classMappingQ.html 2cca9fafa0aa88403825d3f89c2d5b32ec928593d292c12c119138aa923ac999 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classMappingQ.html 5e3b2d510fda059440b6bd5b3961cb2b452c878eec99ce78a1dc30582ce21dea 2 @@ -11469 +11469 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classMappingQ1.html d09549df83a13d1e5837321e295c510a5b69527dc6e7e41f1012898c1531480e 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classMappingQ1.html b439612386864b2ceec36d24c3933376f585fcc8b5c0ae620cc7f825f81bfb75 2 @@ -11471 +11471 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classMappingQ1Eulerian.html a7c85f28782bd1742a55fdb1f3652070ee485b4299122bf5ddfec4ff89e566d9 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classMappingQ1Eulerian.html 1bee44062c9dece0e3d12f942e829139ed8f8d2658e5e4b65dbc4d94cfdfe3b5 2 @@ -11475 +11475 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classMappingQCache.html a0a9cd3186ac0feebac7e06267bdd7ff5167b4fe999ac1b81d3823f7afea64b3 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classMappingQCache.html 6dfb1c55b354b8ce62a7acd031e0f6bb261599153d2c6da07cb9158fa4035d5f 2 @@ -11478 +11478 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classMappingQEulerian.html f6311bca11329dba9b674df021c9ae4d27b34d921c9ad256e2bbd6a875c01935 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classMappingQEulerian.html 697f615f22f5a64023417064f8376abac895d4bbfe88b71172d00de0db1e0100 2 @@ -11484 +11484 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classMappingQ_1_1InternalData.html 9be567d96c5fd20c19eb1a44959f50dd552b338aa2110e9531df185a2b31bd60 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classMappingQ_1_1InternalData.html 4a7020479759fb0498bb15b9beb965fcd7ececa6af7dec1216d562c61e1a95ca 2 @@ -11506 +11506 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classMatrixFreeOperators_1_1LaplaceOperator.html abb76fdc61f2ad1bdcc852e078ca9163334c16732cec26af1c984510d337c186 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classMatrixFreeOperators_1_1LaplaceOperator.html d22e269127a8b65d0f448c2570393b568d4f463cf3cf1c373e8d6f4e0864e81f 2 @@ -11551 +11551 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classMeshWorker_1_1Assembler_1_1ResidualLocalBlocksToGlobalBlocks.html 453d89ce0a292fa943539d861f9696f608b076ea5a2f984cb915ec00cc8992cb 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classMeshWorker_1_1Assembler_1_1ResidualLocalBlocksToGlobalBlocks.html 9b868bde1d696f97a0acf485a40a89d7302e8e872ddb9700aa356e8b8f97449e 2 @@ -11561 +11561 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classMeshWorker_1_1DoFInfoBox.html 048c8ae851a9596484eb0a5d91c82c67cd44479d77d1860223284864e28066e0 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classMeshWorker_1_1DoFInfoBox.html 27283eed4ef4232e9ed2bcf03f7f7640f921d089ac2b5afafe07b8e489ef78d8 2 @@ -11606 +11606 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classNonMatching_1_1DiscreteFaceQuadratureGenerator.html ab2b2b121613f454a1d02cc5d4961d656a59a31eb0e4ef40f54fe53f57894b92 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classNonMatching_1_1DiscreteFaceQuadratureGenerator.html 9c7d37c4f746e224ceb935ad95acc557fcae4d2093d63143aee302d17641cab6 2 @@ -11609 +11609 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classNonMatching_1_1DiscreteQuadratureGenerator.html 1b892a33aca1cef7ff16b62e098560f3c9fba705fa8e58a11b7ca266fc06e130 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classNonMatching_1_1DiscreteQuadratureGenerator.html 1295bce1b73582ff331132255af6aac212bc4c7a729637d44816c3fde80c4bb9 2 @@ -11612 +11612 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classNonMatching_1_1FEImmersedSurfaceValues.html 632875d8f9c4c97629676dc5af228acbbef709ad64fddbca8fbfc4568b2b2cd0 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classNonMatching_1_1FEImmersedSurfaceValues.html 824ec710396111fb28f4eebac5bee8dd731998b9a01154107538c2259eb9e91d 2 @@ -11615 +11615 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classNonMatching_1_1FEInterfaceValues.html 21ef4f9a42498434a6b7acb78bd22109411097a57ec8cdb5662eb9a8b528fb7b 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classNonMatching_1_1FEInterfaceValues.html c7afffb48d7c282853df071948012efea708dbec048d087ea5ba1a08a2a0e4c9 2 @@ -11617 +11617 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classNonMatching_1_1FEValues.html 5e6a9caef965192d8e434c9171fc4eb0c23aef44f228823833aff0c0539535da 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classNonMatching_1_1FEValues.html 8839ad52280ba8deb1b247f026a48a781bc7f3641849c1b539c5eb0ec6a9df09 2 @@ -11619 +11619 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classNonMatching_1_1FaceQuadratureGenerator.html 030b33538edad67a26ccc2ab2bd9eb6aa794b6c6510464db4cd8df5470995140 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classNonMatching_1_1FaceQuadratureGenerator.html 2e814d5cce40f66e376b4de58f50d32586d2e89ae3d1fe03e914a5d5e7ca8fd6 2 @@ -11621 +11621 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classNonMatching_1_1FaceQuadratureGenerator_3_011_01_4.html e26535e743e30c4b4c4639415912c9e10a9be8c74b3c4745a98096dae9903887 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classNonMatching_1_1FaceQuadratureGenerator_3_011_01_4.html 73fb3a99933b6b221448c4044f77108b4366db61761c62139c7f95025e935800 2 @@ -11624 +11624 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classNonMatching_1_1ImmersedSurfaceQuadrature.html 29fbfea86011922e26344e53d9e5e836675a57d188f74c306592ce8d021729a3 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classNonMatching_1_1ImmersedSurfaceQuadrature.html abecfe02f0d83e4562d3138e6efe9e223d281d552b0ee72af5f36a69c765b956 2 @@ -11633 +11633 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classNonMatching_1_1QuadratureGenerator.html bc2744ff6fb47c6838f26abea7bb3b36696a12a5b26fb40a69b561b2b3b18a3b 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classNonMatching_1_1QuadratureGenerator.html 957fa0c3ad4621c864d3817d9de642b64fbaee38bf60e9930032d1c5e114c718 2 @@ -11641 +11641 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classNonMatching_1_1internal_1_1DiscreteQuadratureGeneratorImplementation_1_1RefSpaceFEFieldFunction.html deaafcd194c88597ef6316c44f02c08e206d9765fcc602e1dfcd8d822b1f2f97 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classNonMatching_1_1internal_1_1DiscreteQuadratureGeneratorImplementation_1_1RefSpaceFEFieldFunction.html 4ca571d144bc378e1d837fd32fea5ababa315aad6b85c1b5a49dfd4e82f55664 2 @@ -11656 +11656 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1QGenerator.html bb0f9d02c09dd68ffbce0373be8416721ba3fbb66a69934ab1250431911af54b 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1QGenerator.html d69829970add8547f0bb1b06df7c191ecaee2a33e24a7d832bc97359239fca2a 2 @@ -11661 +11661 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1QGenerator_3_011_00_01spacedim_01_4.html cb75c65769e82397acd1becf1e9b05feb89ddbc15bc6464da19c52d276eb955d 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1QGenerator_3_011_00_01spacedim_01_4.html b9d557d4aae75afa32b7a99358c9a9980d8f9ef716fdf906b89ce05292febca5 2 @@ -11665 +11665 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1QPartitioning.html ec04db9c8ffc15b6edd00f5d00e95e78d7d1aa109d45819d15a967cba144067a 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1QPartitioning.html 23d55b603422b882e85e9c90cba479d678419421bdf71897529116bd48aded96 2 @@ -11667 +11667 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1RootFinder.html a4fc3e88f531e49bad1aa85de8d72e912af0f9298f658ce1c5f2174af0299c7e 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1RootFinder.html d0a3d2435c0b69ef959b1909884b97b48f4ab537b373d7b450cf70b9436666ed 2 @@ -11669 +11669 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1UpThroughDimensionCreator.html 466e5d491d47c899459cc21aa44b2e0bfe2113cb9d4182b5024f6f28cb50b241 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1UpThroughDimensionCreator.html 83f7f467403ce9549f38a73a6d4b839ea8d583ad078c3f528cd04c8a3f131954 2 @@ -11671 +11671 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classNonlinearSolverSelector.html 6dac9b80a5f1e2e1a255b1f2a8ffe88adaa4ef9a5c250e49b1bf71b0faad4199 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classNonlinearSolverSelector.html 94b2d27c9402170a3b3af58b30974f0d982f59c2f5310e526335fbdc998fc608 2 @@ -11673 +11673 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classNonlinearSolverSelector_1_1AdditionalData.html da1cb691c4d06513c489ffa93fff3e44261ee5ecbd373aaa3a096587270f0e9d 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classNonlinearSolverSelector_1_1AdditionalData.html 40f25b1024ef3a9017b6d4967c3c72a1b8571043c628c29526da3452b706de58 2 @@ -11675 +11675 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classOpenCASCADE_1_1ArclengthProjectionLineManifold.html 82dbe067f8b280a43729e54458b55936b22e8c6179501b3030edeb80b2d4841a 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classOpenCASCADE_1_1ArclengthProjectionLineManifold.html 20ca2a916a37a8f86bb0a5fa9d26e79c45e739fdfd2a77451b5a37c460accd39 2 @@ -11678 +11678 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classOpenCASCADE_1_1DirectionalProjectionManifold.html 9a19099523375a6f33c2eee7638f1a2dbd52683abfbaea7effdb0629da05de1a 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classOpenCASCADE_1_1DirectionalProjectionManifold.html 4784e252a440775cfcb0fbd5b5ec10abf67730ef847fd58ada56a36a2ab53ce3 2 @@ -11681 +11681 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classOpenCASCADE_1_1NURBSPatchManifold.html d62762c5f86cde71e9409c2fd075af357bc7ae8f696843729fb447ad279a38d7 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classOpenCASCADE_1_1NURBSPatchManifold.html 4d8b528c6e6dbe5b2a8793c173da786ff3f1f661d3820e88bba5f2434c2f0299 2 @@ -11684 +11684 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classOpenCASCADE_1_1NormalProjectionManifold.html 0c9ad63bc70d7fa8668108eaed74aa8e11fd4ff5a8e23030f8e64fb11576d9b5 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classOpenCASCADE_1_1NormalProjectionManifold.html fb514dfa134c09ea71fb8a52527067188dc66e19b3e3ec14ff1ed8a59702cf68 2 @@ -11687 +11687 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classOpenCASCADE_1_1NormalToMeshProjectionManifold.html fa6316a072ca3d42e27dd9c37acd34b8e08cfe1a8f86a30f7fca099b2972cfb8 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classOpenCASCADE_1_1NormalToMeshProjectionManifold.html 94a3f247d7c0d40ef81da35dabc632a3d684b8fad94bd255e76618e6f36181dd 2 @@ -11690 +11690 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPArpackSolver.html 5ba40233aca44ed3788a15657aed24525fdd5ca0f150b10368627a9c2cfaa2ec 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPArpackSolver.html 735d8517e8823fab95a989df0edb2d358702b2fc02dd60b3d7c0a9c7f3e2c836 2 @@ -11693 +11693 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPETScWrappers_1_1CommunicationPattern.html ab034b8d8dd660071f01df096eafd5f3afe35396bb02d1dcad2b57778fc42a14 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPETScWrappers_1_1CommunicationPattern.html a4de49db6065f02fac8d69c31b530aeee9f95b96ce56741d5302cad02e22519d 2 @@ -11696 +11696 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPETScWrappers_1_1FullMatrix.html fc82c15c7a6a952bf0652941801e9728f565744f3e0d23d21f6d3376735011f8 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPETScWrappers_1_1FullMatrix.html a2cde485e67130571afd0ae17b5476252f84532980a258cbfd4ce1f6e6234953 2 @@ -11699 +11699 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1BlockSparseMatrix.html e1c31b5a31ff0d8d937af7716ceb82a0745e4ef26d4b74b0a141e8e4456cd4c5 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1BlockSparseMatrix.html df7f6bddc17a6a08e675172a938a2177f08ed176966024395e6ac33a950b6a92 2 @@ -11702 +11702 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1BlockVector.html 41804a59e72c91b183168c27ac212ba06963482a1fa742f60753eeba4341e025 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1BlockVector.html c7839c5cdbc5fe7f19fccbcc094819fc42b36d1219921cc431a3939aaaa68976 2 @@ -11705 +11705 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1SparseMatrix.html f8570224c638e05d62a5498f89103e0869ae8eff31ac36defa0df3f00652ef30 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1SparseMatrix.html 3561602f12a7f8c1c0da84626c52776f9b7dbe39f3288c3d4d5d8d7e6ba52f80 2 @@ -11708 +11708 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1Vector.html d8ae54384086e8e9ae435d176e26b0bfb1b77fdd4d030ade177acd4832e5a74a 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1Vector.html 859366cabac3509c6eb6d58b5d6921ab87d9e4189ccef3dea93133060ebed690 2 @@ -11711 +11711 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPETScWrappers_1_1MatrixBase.html eb9a7577c98eb4364cecbc55313e57e2dbe88064921d95b10dff502a13c12388 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPETScWrappers_1_1MatrixBase.html aee32fd85cd5b4efbace50f55e0a0dec9abf31c1677bf54a92ae724e071a19c7 2 @@ -11714 +11714 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPETScWrappers_1_1MatrixFree.html 1cba2e044c6cf5da91c9abd156a43e11cda74235865598f5789bb1fb746b51aa 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPETScWrappers_1_1MatrixFree.html e8070fe059850c051885714a61713c79aba913a83b73cd7311b5d0aa74b87087 2 @@ -11721 +11721 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPETScWrappers_1_1NonlinearSolver.html d3050bf137bf3dc4c30fcf7f510c4506d1cf1d6a5f5ece9b585f6f7ff99e1364 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPETScWrappers_1_1NonlinearSolver.html 00b1e0b34d98ae0ba370b0c49bbff12c13647420eab3349fbc154f3ec0163d17 2 @@ -11809 +11809 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPETScWrappers_1_1SparseMatrix.html 32101a9c9b32fe02dab6b43a3eabde3596e1a92e467f1c57025249807a26bd78 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPETScWrappers_1_1SparseMatrix.html b587f5dc277951f6fc49c14ce7d2aec57437b1cc23aa75fbeaa0a32a5f66c304 2 @@ -11812 +11812 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPETScWrappers_1_1TimeStepper.html 2a0dd80fbff4396be2c71972a8c1471fb74232a643c1b4c3ddeb5ad9e4aeed4f 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPETScWrappers_1_1TimeStepper.html 4a9033966f44b9a563341fcbd5905a02f2a255567552b652b4ff37ee4021661e 2 @@ -11816 +11816 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPETScWrappers_1_1VectorBase.html c451a7285662e85a00060a85f9dc75ed4feb69ca03629d0dfeacdef22b1483c8 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPETScWrappers_1_1VectorBase.html e51c83a831f84ec5552c2a48164b4a464fe1c594bac4cc737701f663fe37d37e 2 @@ -11821 +11821 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPackagedOperation.html 73e41290df0e2b3fc7fc3d1d902fb4bcee6ca72d32a779012c8e2cc15bc86414 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPackagedOperation.html e0dd8cc40c02a5c72abf73789f5a351d75e5530d1be0afce88151a5d544d6ee0 2 @@ -11842 +11842 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classParticles_1_1ParticleHandler.html 5ed59b79129de5c639cca8b6e3f13f855ce19d24964de07548604b70cc3a9586 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classParticles_1_1ParticleHandler.html c942db39c0b790753b0962749f922aa7b2e1c7e45a139c4a9d597a76634266bc 2 @@ -11847 +11847 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classParticles_1_1PropertyPool.html d68924761b2c718e254ffb40945104bc9b6f7c1d20ef411b708fcd013fd5d77f 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classParticles_1_1PropertyPool.html 9a5c3036b716331ed95c7e260faefae9d7c3f6d47749aafd0f97658e4d8ed8eb 2 @@ -11888 +11888 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPersistentTriangulation.html 59c2b70ce268faf49f27039003dd05d80bd1d9c2a3432ba95da60abc45f7570a 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPersistentTriangulation.html 94adb56ef9a45557abfa65a6fc1c232f65fabe148d586f6cfca975bcb472ed9e 2 @@ -11891 +11891 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPhysics_1_1Elasticity_1_1StandardTensors.html a1f186ba42d63fb00c9f353df88ed09c7c5e894583bd2bbf3a503d18166219ef 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPhysics_1_1Elasticity_1_1StandardTensors.html c57fef91e215215909ef30f8edf5b1f9bd9f7722d0b33a90e09ddbb17242dc30 2 @@ -11893 +11893 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPoint.html d16846dd1b828fb4d515ac12322440544205f1ac6cbf16f4709aea4b5f09697b 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPoint.html 26b283c91176345c0608e3f6adde62da924bb39fa2a5dec4437ffc63fffe1224 2 @@ -11898 +11898 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPolarManifold.html 6b3f87243c653056e8962a558cd9517252872497eeb00eed5ca6e2c641feb82f 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPolarManifold.html 08884b81fe03636eeab57fad935f73e6e542a490023a3ee668acde0d9f8ee1bc 2 @@ -11913 +11913 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPolynomialsBernardiRaugel.html 754af85054bb13d02767f1d1fa3682e40cb3cb64c8c3db0289d286f0c7668b05 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPolynomialsBernardiRaugel.html 6b5cbb89229aa08fa0dd285c8517a6363953bc41794b7700c5e7e63642df0907 2 @@ -11916 +11916 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPolynomialsBernstein.html 802076e91fab27440d27eda0603e30d6127e47570493827c535540c25f50e446 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPolynomialsBernstein.html 1e6767efbaf0c5bdbe696f3de5a011827887a9a5ac3b69c6f26ae64aaa1b0be0 2 @@ -11925 +11925 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPolynomialsRT__Bubbles.html 05a3d8281b16dd34deb16c5530ac17aa8ac4810394d4bce6896db0aef68ec8dc 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPolynomialsRT__Bubbles.html 34d15e64ae2816e49a8b339ea540c1f3a82898df06785a541476f8483fed195a 2 @@ -11934 +11934 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPolynomials_1_1HermiteInterpolation.html cbdb7d27ee99af8c60c1e95bcf20124cdfbd8bfeeef7f625663fc0da8e8e4f72 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPolynomials_1_1HermiteInterpolation.html 8ac14460c2c9503b4f63a29c23c52baeeff404d35ebf632d9dc8b969773fa6a2 2 @@ -11937 +11937 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPolynomials_1_1HermiteLikeInterpolation.html 1ac9d5d04d029254f58e21b5d2f597ce2d6ed05470dfe6840a391c9b5557f746 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPolynomials_1_1HermiteLikeInterpolation.html 8507d8a27fa436d7e8b28ed998a15c4d367c1385afb96460f0f122a1b6fc29c2 2 @@ -11940 +11940 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPolynomials_1_1Hierarchical.html 4b1e2c9ac0757efde6c5ab3c2b0fce27766d6a915daf1cab20ce1338754922a9 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPolynomials_1_1Hierarchical.html 286521e7e73553b572466eb0ae86211cdbcc252e90f1b63ef8452f5dfcde1be2 2 @@ -11943 +11943 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPolynomials_1_1LagrangeEquidistant.html 3388dc73bf206a012ec0980b6bce1507259c7e1dc9d30eeaeb9d5567fed1e376 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPolynomials_1_1LagrangeEquidistant.html e19cba533fb5e08327d0dd52447adf53fd28fa9b95ef1f932208ab3ff72273a8 2 @@ -11946 +11946 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPolynomials_1_1Legendre.html e98c5ac382bde9263461fcb11f5d9f87407925d438fe1c116adf755a688572e2 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPolynomials_1_1Legendre.html 5d3997ee2030e672f7261fef2477a499b6f5a974be6d771cbb368b719718116b 2 @@ -11949 +11949 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPolynomials_1_1Lobatto.html 24665a171f9723f214fa321fe2fa4c3864e54a2a32c6b8336c859de2a8752884 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPolynomials_1_1Lobatto.html ef340f53522952db83263e44436451e58a691e25eb11fbc8b27bb37f1ac6995f 2 @@ -11952 +11952 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPolynomials_1_1Monomial.html 5780b441b94c2ebb6a4b697c5d3aa605c76b22da0fc53ebc04ec1e89c7b0d445 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPolynomials_1_1Monomial.html 72741df4411b810eefe5c5e98f491d225583dd0451b0be3c29babd187e4bda9f 2 @@ -11955 +11955 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPolynomials_1_1PiecewisePolynomial.html ef64b9c910509977b18b9f0abe4d738cd259341d697cba1aff9460510ab847e6 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPolynomials_1_1PiecewisePolynomial.html b7497e85db166e0f6c34630231c3edc746b422b5a577f1a568a59b5d7b8e2dbe 2 @@ -11958 +11958 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPolynomials_1_1Polynomial.html 1afac6cad85ede044de3d5918566135f95af4f1e23b179743ae3b995dd8f0c68 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPolynomials_1_1Polynomial.html 6bcfe0076a55496e2c49c8b8133043505b177219432f9b9e84ab2f578bd761aa 2 @@ -11961 +11961 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPolynomials_1_1PolynomialsHermite.html 16b9b9f666862daa696e73cf84f953b00963d20c79f0e1638c0c496fbf1b0ed4 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPolynomials_1_1PolynomialsHermite.html c6c46a5db5edff5c66d82f53e2e75cf6dfbcae3e6d041d9e70e520f1819cb920 2 @@ -12038 +12038 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classQGaussChebyshev.html b35bfe1d462c40dbc591d75c3f67898d6ed798d80569c4b196ba0717b449bcab 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classQGaussChebyshev.html eeff7d0eae0fa4b39fd138caf9b250e21c98a8501cc446bd5a3ffb090b7b9e40 2 @@ -12041 +12041 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classQGaussLobatto.html 6cbe4947dc242c99940b02277ef10b1213ae38cb43fac5f0d10d7c49216400a7 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classQGaussLobatto.html b4366dfcd6068b6e986912a48a87b66b2d871a857190645a37a6d6c7569fbbeb 2 @@ -12043 +12043 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classQGaussLobattoChebyshev.html c2da59e28877ac0bc6a8787147b77b1840ccdd6f186f00c9752ddb228050a431 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classQGaussLobattoChebyshev.html cf23180c91280443ee116f0477e76739448fee93231eb4744faaa80cb0c9cf2a 2 @@ -12047 +12047 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classQGaussLog.html efafb4fbf48ee2639b9832e557d46ebc496c4450f967dea40bd9c572088c87c6 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classQGaussLog.html 9f888ee3e5bfeb82ec07dc0115daf4897a77827a306db589048d610d835f60e8 2 @@ -12049 +12049 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classQGaussLogR.html 5f0c2d40e3b58b63b6e60f46f5a611fe077c785dc0d1c70b45285c4f46350b5a 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classQGaussLogR.html 9df11e160ce296151e3fd98cc1994c4c9c48370b676c447fb4d7e9275ade12ca 2 @@ -12053 +12053 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classQGaussOneOverR.html 71be7edade5f8319dd8fb90637898a7037d5eb585c537111fd846b8eda1a97ab 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classQGaussOneOverR.html be6f30b369a3c05a9086efc46c2f3862b99c5ebf0b9e00dfcc382c23f3f3df56 2 @@ -12059 +12059 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classQGaussRadauChebyshev.html 63593cd7ff8cdae3ac9f1a24f6d7bfe4c07d3cd5f0ee96fc560f9466bd151335 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classQGaussRadauChebyshev.html f05c542cb3c61d7e18c58b6321b2e94feff4e8ff4f0892b77ec41ec46f856653 2 @@ -12085 +12085 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classQR.html c88b783c7c23cbf4947a88cfce95b7a87d4982152c48d4488093a94708eeebf4 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classQR.html b3e917932f9cd404d34e33ceda6f878541cd775c0fba4bf11b57867327082548 2 @@ -12100 +12100 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classQTelles.html 2bcd79fe525c2a0b7e335047228f56b96afdba1bae93fe4c8bbe914dd6d17f44 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classQTelles.html abbce14e4c0df66c524872ee573d6f333ad5ff9f5a78a31a2215fe9a1d233a26 2 @@ -12112 +12112 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classQWitherdenVincentSimplex.html 53dff08df71cb2a78218d381a730147e6158e44a55d573cbf68efcc4014add2c 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classQWitherdenVincentSimplex.html 3b86a05c24f7713bfab2cbbd7af54e39e939140a11726f1338ede10c75b44024 2 @@ -12115 +12115 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classQuadrature.html 5ee8173dec8a0f3f17cd387e908fffb09dd2df8e64aa683e9f483016b0b47254 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classQuadrature.html 753b5822947f20f314243b047c8e001ba29de9b6c847f9212aec450d56182f67 2 @@ -12126 +12126 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classReferenceCell.html 9cc867f400670aa746a7f855e603b17e2416f2abba345a2664c31f907bce8aa1 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classReferenceCell.html f9e0d92fede12ae90aba193978d76009d44a1ceead0c31b2e399a57aed23d1f9 2 @@ -12161 +12161 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classRol_1_1VectorAdaptor.html 19c7efe00ccefbb8a50b7547a294cbfdde4ba52023ed36eaf7e564399503df5e 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classRol_1_1VectorAdaptor.html a1253ca126d0161beb33a53aedf95328a3d69bcf0be28690d766c2489e97154d 2 @@ -12164 +12164 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSLEPcWrappers_1_1SolverArnoldi.html b6397ac1fb054eb0c757850c0555c81772fe1a90e4e1206a195ba7a985b33a23 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSLEPcWrappers_1_1SolverArnoldi.html 9125081e63faad6c32a126d6e9e8a1bfa49151952ad7c4c95dbaead08dc3a530 2 @@ -12167 +12167 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSLEPcWrappers_1_1SolverBase.html ded95b79dfe0a47bfad1a0cbf1cc5fe3da1f72e5903928494aaf4343fb65363b 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSLEPcWrappers_1_1SolverBase.html 0d624efa58e0a87fd724a4068dc6a911ab37d10e0dfcfc494598ff90436d3cde 2 @@ -12170 +12170 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSLEPcWrappers_1_1SolverGeneralizedDavidson.html c19dc4e88866124b2caa1c2c0082c4a42d514ec90e07ccc924a4bac3ce22ddfd 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSLEPcWrappers_1_1SolverGeneralizedDavidson.html 28d44f3efe299041ed05935e98966bd17b4b94514b91bcb560868d3c2495786d 2 @@ -12173 +12173 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSLEPcWrappers_1_1SolverJacobiDavidson.html dd46c46924e125c47f76b128d9a36cf165a8dff9f4dd555f4ed91b7eed4b5a23 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSLEPcWrappers_1_1SolverJacobiDavidson.html acbf3a2c672681f0510a892d0092d373f9ccc8e69f8649562529d428a4fc2446 2 @@ -12176 +12176 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSLEPcWrappers_1_1SolverKrylovSchur.html 1ec8c94a71da6998de9682d0b4111b8a21144e280173cde995a71785e9a2b4a1 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSLEPcWrappers_1_1SolverKrylovSchur.html 6b01e59f0f0d9cfe02c2c0a2110e35d286d734bb3feeaa082962ccc65ff8ea35 2 @@ -12179 +12179 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSLEPcWrappers_1_1SolverLAPACK.html 9b5ea7e3b1f3819cf16d5e570ef421a37b21628e3787307dded727f3075eecf6 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSLEPcWrappers_1_1SolverLAPACK.html 963043c610dddaf8dfe7a136e5a78cac4eb85ee48220601c0164814a1cd62b51 2 @@ -12182 +12182 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSLEPcWrappers_1_1SolverLanczos.html 80471834d180abde42c07d1f1e5cae6ca133145f4c9972c20c70738350217760 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSLEPcWrappers_1_1SolverLanczos.html 12105aa86de3302c15230bf21104da7eb97cd2efc4efc504d23f52fab73602e3 2 @@ -12185 +12185 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSLEPcWrappers_1_1SolverPower.html fda3fce93624d09273f41fe805e9009fe796fb4bfb973d7038da6e58c66e0169 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSLEPcWrappers_1_1SolverPower.html 8a2819fdbc029f4ed3e3890eef29648eec065b50b4ee9876cb00d8144b1c2e7c 2 @@ -12204 +12204 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSUNDIALS_1_1ARKode.html bb6c4e7411ef7e3059f9eb756656e461248e85ee8bba049d0f2e778595f971f0 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSUNDIALS_1_1ARKode.html 7cbb61b7a6120f86294db994885fbd66a34f78f02f3f9d342a2145db1b173ad8 2 @@ -12208 +12208 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSUNDIALS_1_1IDA.html 6586262409c9b26b0231f6f83991d1e9ab8736105a47b3b1b51939fb408e1616 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSUNDIALS_1_1IDA.html d8cf6daaef8f13214fd3b2aa8cdc151a06dabfd8679b9e3e468854850cee601c 2 @@ -12210 +12210 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSUNDIALS_1_1IDA_1_1AdditionalData.html f7bee5ea24976218ebd519df602b2b96bb1d18601f8705acff06d83874043602 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSUNDIALS_1_1IDA_1_1AdditionalData.html 8c22fc3ecf15a54d2f9b3c597975a4dc16c369e269b4be23570a519b19773d55 2 @@ -12212 +12212 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSUNDIALS_1_1KINSOL.html 53749a0ae7ae902dbfff7651733a3b454d4be56655d1f95a883402f8e0652ab4 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSUNDIALS_1_1KINSOL.html ea524320c15dc8e44f972942d618df674ccf27e4d58f13d9a01ecf177913e8d4 2 @@ -12223 +12223 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classScaLAPACKMatrix.html fc04a14fe2f0c6c98823dd54717284b9f4dc90ba53e0f65df7115a24d03e7612 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classScaLAPACKMatrix.html 879916b9779d1578e86178b53dd6bda7f4fbbc43bc7a5c9c1e2c4b1c648ca938 2 @@ -12226 +12226 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classScalarFunctionFromFunctionObject.html 77fe98fb59c44003a5e113109b7fbec6a4b85e3d96d5db870b3dcc2a729b5b94 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classScalarFunctionFromFunctionObject.html ca6399e35143e9f87a354d63fe90a1f481857ed90f9f634dc7839e17c90775cf 2 @@ -12242 +12242 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSolutionTransfer.html c1a3aaf236dad62d697987ad1388510dcb284f5439e2c6768f93e49852b8c614 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSolutionTransfer.html e94d16b4d2d43bdb191001836364f17b97daca0254776b7f722f6b21aa138bdb 2 @@ -12244 +12244 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSolverBFGS.html f4eb63d75f46451d2d70aebe7efea72c324a48e97f871e5c1b412b3d3d527c04 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSolverBFGS.html 01a28167b9dbe5026ed6af9d86a2616087b64d27a9f3c541a97175054afa5e32 2 @@ -12253 +12253 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSolverCG.html 0b3f4351e6b54704143782edc1dbc2cedcd605385c04825c49f07b987d42f4e2 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSolverCG.html 85993e7f5b19bb3e77eb1c50a12d1e60e466baa9af455dcd85218a45f2f43d9e 2 @@ -12262 +12262 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSolverFGMRES.html 0698b0a40c3bae0522a1a614718edd3069c88b248de2db53090b1f4129ab6202 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSolverFGMRES.html 469aad742c0575c9b671469fc72b8daad727103a84f6213b3c561bcd3cfccafe 2 @@ -12265 +12265 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSolverFIRE.html 7133053c61b7a5bc9ebfe7a5a9cc081a9d12fd1774b65ae5d8842f98c858366f 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSolverFIRE.html 8ed3c7404b8cda8c50226cb180c1d83e5fa5f2637b44f42e6d102f48938fe7a5 2 @@ -12268 +12268 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSolverFlexibleCG.html 8597bb4321ebdd0f92941ec85d6941e005480589496372378616d12e37a8ca09 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSolverFlexibleCG.html e89dfb57ef7c32306cd81dec77d0424e2856988764ec73d9622438085d519c41 2 @@ -12271 +12271 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSolverGMRES.html a6174bbcd8d071a006726f3fbff4c9508759a0237ac4c558d5bc5a92f952251a 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSolverGMRES.html ef7192dbed2c861cc334e7cd6fd20dc26fcef1a0ca289d46bc3a37e3c650c5cb 2 @@ -12277 +12277 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSolverMinRes.html 8d93cc711596ddc34c68d024ee53b0be8fa91cb4a149e16631bd4cf7de16a73a 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSolverMinRes.html b354c88c23d7dd67902f4a10b90c269445b545ce2e7c5652db0bdab2a2d52bb6 2 @@ -12280 +12280 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSolverQMRS.html 07c85cd487d432cd3a9d95b0ab2e99623ba6e6a627ba6f179b3444bf064b5449 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSolverQMRS.html d3713dd4e365c6446d543d63146a777a249d91e9abfa7920324d0b3bd6f1b784 2 @@ -12286 +12286 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSolverRichardson.html 04ae788d9922a822503103070ab62903cc6709dd80d44da297e1d3132e105c11 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSolverRichardson.html ac3ada1e2f526b7a6bc1dcf9f1a3ee0ac531f0ea17293256af104167cbcd5882 2 @@ -12298 +12298 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSparseDirectUMFPACK.html c0b56607ed9ef0ce1c3d562766326578ffb40d57aace07a83340a77f4f937631 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSparseDirectUMFPACK.html 8d6cc2d73ae0061aa4d5f6a06107d1ed48d14c53859a3a73f0cace4112a5fcef 2 @@ -12302 +12302 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSparseILU.html ce4c1c86bb478c522a91bf7ca2a7098573c3ccab61c46b6c557b7d67882c9ae8 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSparseILU.html a6e36f6ad03f092ef5ba2992173db1b7c21fbc6a15d4ff78aa7c31926d9704d3 2 @@ -12305 +12305 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSparseLUDecomposition.html f6f691923d667f71379350a6dfc4e5a72816928e74db3062b71d3a1da57fb713 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSparseLUDecomposition.html 12f93c6c70dceed71a1863776995085809eef2e42dc4c7faec704d091269a35e 2 @@ -12310 +12310 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSparseMIC.html 17f9dd41212ae26448f66ec47f2ce5b3d1b39992a04f90fa048962e6fca81c9c 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSparseMIC.html 8f53e79f03154c00e7e8a278565140557e990aa5b09b0540029891619dc82e76 2 @@ -12313 +12313 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSparseMatrix.html 865abf6b07d67c2789f2047f89a5dce37d363359472a8a28133f329fd8dcfb4f 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSparseMatrix.html 73a02cdb62d56e269ea096d57f6cac993c957d14188a8fc6d3996b550fa424ef 2 @@ -12315 +12315 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSparseMatrixEZ.html 7c2a65501f125efe20b8288605620daf8a388dad9ac5e9b0e67134898541bb9c 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSparseMatrixEZ.html 0d0924549f2648e69f47d2be486388a1b7917b5ebe423608437d0459e1cd6200 2 @@ -12333 +12333 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSparseMatrixIterators_1_1Iterator.html 8d9191f85901d52d7b1f7a5caf5ed1089341a66d150b4a4689cb4a8b7cf59d4e 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSparseMatrixIterators_1_1Iterator.html 770397bd19ec48abeaae10079300dde97a4498f0a2e0f331ace7492f6aa517ee 2 @@ -12341 +12341 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSparsityPattern.html 9f6b1d4ea10fc8bd8dbb1631d251d2552003dace5c8fc5a1a3927864b427b5a4 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSparsityPattern.html 42e0eae0b7cdf144d65ce92edf30313354ba0be619c09f49eedc1c9171b5f8f1 2 @@ -12349 +12349 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSparsityPatternIterators_1_1Iterator.html 456a01761cb4407b688f42fae59b0c4f12af76c3b00b93fdf0ed7910f615f15b 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSparsityPatternIterators_1_1Iterator.html b742e380f05ddb8d6e5ad59e5b128a099a132ca55c400b08067ca13f386e3787 2 @@ -12353 +12353 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSphericalManifold.html 5be7263b9bcdffbb3839005966492ca40b0a24036414e104e095f33b55ca5887 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSphericalManifold.html ad03f9934287e2f749f3a0304215dfdca1d283ec242f5cbbdeb6fd50bd3902b9 2 @@ -12361 +12361 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSymmetricTensor.html aa2faf774ca5b7de23b67abdeef9d69128d46526665210902e2c012f139d1003 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSymmetricTensor.html d23392198009135b5e018046ccd068eefbf86aad5a54f00025477b0b226fe744 2 @@ -12365 +12365 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classTableBase.html 3e9a231f8c983519b3f81f1519b954a750461c6c8d05d66c09cfac08213cd48d 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classTableBase.html 3e104a4474d827453a551f30b49f7e59827c1361e1e4216bc97eea69851f13e6 2 @@ -12368 +12368 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classTableHandler.html 82bcd51c8ee4d6c0431b12a78cc5202ccd3e969558c70d0b58c4f4e7b950979f 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classTableHandler.html 07dd10192f234a1efd2094043b69aafabbf05e0b44fbdd79501b635a4ef41735 2 @@ -12396 +12396 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classTensor.html baa7d90043831ba962cfe4a97ec8cf871550ef7319925056bbf68d462ea38886 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classTensor.html 59129fab7d9a7b80f465124bd4ca40ddfba910678f03e33cd360c33b0e034f00 2 @@ -12435 +12435 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classTensorProductManifold.html 9c0b2b61b243397a85a447493ccb9387b6cec89d0d4f76336abec54bb090b62c 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classTensorProductManifold.html dfdd41cd5134d33a1a5a4ca6ac28c8bf692084a5a2fc1d2ccc2bb8eef439019d 2 @@ -12438 +12438 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classTensorProductMatrixSymmetricSum.html a87e7acf0b7b9f088c217fe9894a4afad17808e7ad5e19f8cdc2503fc5a0c33e 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classTensorProductMatrixSymmetricSum.html 1adaa1e87cfa9bea075808a38577d6d7873bcc55454ed4c5f79317e047aa037c 2 @@ -12444 +12444 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classTensorProductPolynomialsBubbles.html 491aeb962c9fe404c13523c671891551c07e17cedf35dbcb5da702d83a639070 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classTensorProductPolynomialsBubbles.html 1ff88a1f6f5c4302e8bff8fd077356d0654aee24da21afb9356fc256714b5192 2 @@ -12520 +12520 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classTorusManifold.html 57a2d8863b0f4fe69a96a2eb6a90d90c6cd158d166a1ffd5f300212af49c7d85 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classTorusManifold.html 822fbbd4587937066ce6e4bd7a4c771d033b3fcf14de3e3ae835c980d18c29ae 2 @@ -12525 +12525 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classTransfiniteInterpolationManifold.html bd4274c7a1e0e68cfcad52f561b7a4a7683eaf1b57bcfe5b3ad2dd6c07cafa9f 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classTransfiniteInterpolationManifold.html 05d81869089039d143c1697c286bef8cc79d3cc9544071a93eaaaf1d8b9823d4 2 @@ -12531 +12531 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classTriaAccessor.html 499b7858a8d18151dae2f557839e1dac405bc062e12ef2ddb2bac818e9e2230f 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classTriaAccessor.html ec4f2567b923d7b6b6c320f9d8acd4d64b501c82e90c463435bea64c95d2873b 2 @@ -12536 +12536 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classTriaAccessor_3_010_00_011_00_01spacedim_01_4.html c73840247eae154fd1a3105cfb2a8e3f5bcbf4afb48d9208eb09a9e0a6edd3fc 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classTriaAccessor_3_010_00_011_00_01spacedim_01_4.html 6bae7ec4a63e862b92d7ba854e07696fef86698db515474eb260233d48b19ca5 2 @@ -12539 +12539 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classTriaAccessor_3_010_00_01dim_00_01spacedim_01_4.html 47f6e0f6de88c108a490c1bb9d01a0e61088b79676eb6b001546caaf345ea77f 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classTriaAccessor_3_010_00_01dim_00_01spacedim_01_4.html 263ff72099ea63bab15a3690028f9103d8f23cd1180351e06bd18b3bf30272dd 2 @@ -12551 +12551 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classTriangulation.html 08d6119f94b448b37e437e3fdcdc45d7375329d9d11374e57a41ac2ef01901df 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classTriangulation.html 29d10673dac999d3493dd81bfdea94f7a446f2e4aeeca9566d30dca475480aa7 2 @@ -12558 +12558 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classTrilinosWrappers_1_1BlockSparseMatrix.html 3abdf441794402bcbab919ed2370301f38c01cefd4f914ebb3b868373d3e49fb 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classTrilinosWrappers_1_1BlockSparseMatrix.html e03c0c16f44937d2fcf3380967bce009eeb3add5c071bf91deaacb0abe8f48e7 2 @@ -12564 +12564 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classTrilinosWrappers_1_1MPI_1_1BlockVector.html 2738cf9e6ecbae9176cc32fac9a94552bb2914dd0c13a97e26ae328f477f9cb1 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classTrilinosWrappers_1_1MPI_1_1BlockVector.html 04d797aeb70c423af627da94f49648a2b1d790b68c10a021dca4a3e5f3c9169a 2 @@ -12567 +12567 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classTrilinosWrappers_1_1MPI_1_1Vector.html 8452efddc925ab3946037bd9c3693fd50ad95bedfa09fa4f7726ca450ad6b4bf 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classTrilinosWrappers_1_1MPI_1_1Vector.html f4ded53dbce721fed81e9362151a63457a68cd25c703cac0b5acb01200736fee 2 @@ -12570 +12570 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classTrilinosWrappers_1_1NOXSolver.html 33cbbd259861d1805844792c37b565e3029382dc9e6f0db9e3da8b75d4c98fee 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classTrilinosWrappers_1_1NOXSolver.html 0513017b5ab036c083dd4e7cb3d0dbfc80ee3935beafb126913bde634207cc44 2 @@ -12638 +12638 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classTrilinosWrappers_1_1SparseMatrix.html a338a16f7cc03c0babbde4394e0d4028b3d98d630be1861068dcdf8d61f92fc6 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classTrilinosWrappers_1_1SparseMatrix.html 0faad2cb16440ee0c530a89c2d4f9285b47c2df26b6018879e025ddc55f744b9 2 @@ -12657 +12657 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classTrilinosWrappers_1_1SparsityPattern.html 1dc9a151964217b7ca319d37ba32d0231540ca413153041ad43422917e55abb7 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classTrilinosWrappers_1_1SparsityPattern.html 5583b3dab19f516956204794c4be1347c4c3b3eb52789c13c4d7264f65fa232f 2 @@ -12706 +12706 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classUtilities_1_1MPI_1_1Partitioner.html 52e910923a991a6fbdc33c6cd57ad5ebbd906cbf2b84aaf863e84c026dba455f 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classUtilities_1_1MPI_1_1Partitioner.html 677e61d18580714ad2b5eb25c5ec52d16a5a57bb56eb6fa87770aefad5338368 2 @@ -12709 +12709 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classUtilities_1_1MPI_1_1ProcessGrid.html 815a883fa74ef09f3ae9bf94d275201ec62956ec264c1eea4c84ee6108446a73 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classUtilities_1_1MPI_1_1ProcessGrid.html 19d740b1df4ab0d62b67e1311722526ad1c0bc1440d7cdb5cf6796555aa49b4d 2 @@ -12721 +12721 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classVector.html 12b67619b1c7be3446ac9899f2a285cab41f9545555343420211f8ed2df2fab8 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classVector.html f09a46334c72c58f45e28ebafb9313601ba3f727ed0f912dba7cc47826d8623e 2 @@ -12790 +12790 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classhp_1_1FECollection.html e12083f4d49f7c5b71a3fe86f4c4e24d7a51258e7086c30ee680d715dcda9337 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classhp_1_1FECollection.html 93f162f331fee4aca2438e7c2db015f6f5fc65b74cbee7f2d1205ec2bc5897bc 2 @@ -12793 +12793 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classhp_1_1FEFaceValues.html ee224cbc3631cee02b37fddbf01ca02247df5cf74eb25205564ac9f0660a8353 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classhp_1_1FEFaceValues.html 92c13b6f85dac88c8629cca8b0b1dc03107ae7c244167afa07f0403d6afba448 2 @@ -12796 +12796 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classhp_1_1FESubfaceValues.html 318f57cb883c33051b4d3d17119759ff5e873be41372ffa7110f5fe4e5adca62 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classhp_1_1FESubfaceValues.html 759d2e2ac1bcc206547b2200c65bd6646f75dcee9ff6bf3590bac95f1e48152f 2 @@ -12799 +12799 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classhp_1_1FEValues.html a774f7fa717c9cc4449c6bfdfb3f32cfa05e1ebd799acfd77c3074fcea2d7c91 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classhp_1_1FEValues.html 8be4f331b94359cb65a15d319eba633e47c37908ed82fa6e6d375cebc58bbf66 2 @@ -12801 +12801 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classhp_1_1FEValuesBase.html 1a39ceebeec383020e619f8a24e31ac3cad9f0277759186241849353b53dfdeb 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classhp_1_1FEValuesBase.html 7156f8b73d44119c7d6d39d7da91b35fc762f2a9f26b0d26aaef84b646fb43f1 2 @@ -12912 +12912 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classinternal_1_1MappingQImplementation_1_1InverseQuadraticApproximation.html 3c6eff67fadf39b07c91b779aa5535f5ada55a6e92d1225b5e0aa224b4b9e8e5 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classinternal_1_1MappingQImplementation_1_1InverseQuadraticApproximation.html 61ef12fefbc42c0684a0409085a5b34c1b982cdfadf6c69f2b359cc01b2a87e1 2 @@ -12920 +12920 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classinternal_1_1MatrixFreeFunctions_1_1MappingDataOnTheFly.html 4bfc2496fdb91bbc8ca4595ff85a65c63eb96e0877e1f011295f1caec9343fc7 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classinternal_1_1MatrixFreeFunctions_1_1MappingDataOnTheFly.html e24873cb8d2d77a4deda6fc1d7b5cb00221446181f4bdf2804f76929b2319a44 2 @@ -12985 +12985 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classparallel_1_1DistributedTriangulationBase.html d71a3e9e9a5f7d97e8d4cd913362d705119ab791bc7c26fa30e37f732782ad71 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classparallel_1_1DistributedTriangulationBase.html 9cdeff4b0f8182f107dce24a0bb36535a9101e4532876b3182f781462c51f4d5 2 @@ -12990 +12990 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classparallel_1_1TriangulationBase.html e796fd10ba976fdd18d46cc872658a7146c52356e9958b98e02c6839d95decb8 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classparallel_1_1TriangulationBase.html 5b3a9981dd4d950eb00e5b41ef9aa53804a50da8feeff78a504c586a1ec45e7f 2 @@ -13003 +13003 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classparallel_1_1distributed_1_1Triangulation.html 40033e71721ab24d86a3d3269f011d24bfd6d02a504dd27d39d469d9abbaa1e6 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classparallel_1_1distributed_1_1Triangulation.html 17821d708410f8c0d110ce48fe8b64bbd61fd4b546006e1b237bb6f0c767988f 2 @@ -13005 +13005 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classparallel_1_1distributed_1_1Triangulation_3_011_00_01spacedim_01_4.html 92e5fc75175236f1b63d5d821562568d42c4931528bef1a80a05304bcc700812 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classparallel_1_1distributed_1_1Triangulation_3_011_00_01spacedim_01_4.html 54eb4029307a0cfbb23a59f0707d3b1d82634a5a2001ece9d66dfd29f2cb936b 2 @@ -13012 +13012 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classparallel_1_1fullydistributed_1_1Triangulation.html b6473dff4ac95b0451c176158d443ac344ac174b679592cfd7abd10bcfdb380d 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classparallel_1_1fullydistributed_1_1Triangulation.html fc36a77183134570d3c63f66e273deafa013ff3d7aab45c99b7f3eb2059cc921 2 @@ -13017 +13017 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classparallel_1_1shared_1_1Triangulation.html ef66b2a094bb70977a24695110061f37210d05ceabf6ce4a0e0b35cc6ffe8b65 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classparallel_1_1shared_1_1Triangulation.html 832578a19a4b6089ae9ed97f3ca83a11b519d07fbf58acc5f567d349377f7c58 2 @@ -13192 +13192 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/deprecated.html 60e21f26fcaaa63001ca64431bf2ebbfb869505f8eba522f926d0fd663797569 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/deprecated.html 99e4c567bd0fe1b5f49485ac6ddb5e8443d8673f4402cd6892bf6c2e2ea71863 2 @@ -13197 +13197 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/derivative__form_8h.html 3c147e1246f4d6f05f1e1bccd5bb1384d3587a94d941c2b44e8755f5025508cf 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/derivative__form_8h.html c9624fa4c850b86a9485a65ac84704ab52f36ff3b63b11949be873c7997f4b0d 2 @@ -14342 +14342 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/group__CPP11.html 2eec39ea9fc0d49e95160fade9edc1e65616de241b01313a23daa23935efc297 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/group__CPP11.html 7443a36a8ed0f3cef21fcfedf25534f14a9013c5dc610b479585853a0a7da8cd 2 @@ -14345 +14345 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/group__Concepts.html f1337f13ac202cedc34f4ee207c68c595e50bce7716152847e9b97bee1bc53be 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/group__Concepts.html 4f64132370de54c7fdbcab1828988a8a093e30557790e6070fcde0c05e14c081 2 @@ -14347 +14347 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/group__Exceptions.html c0b4371ab888b0fd2aded819830fe9fc62fb261454bdbc2e829b39a175033e7f 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/group__Exceptions.html 6a37c409539c050ed1ef251d1ad5054e402fd6921c88a37aa501e75b04504596 2 @@ -14359 +14359 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/group__LAOperators.html 82960282b349e9465ee6bc7423fec078d76eaa9e42dfd51d70b108f1798dbbdb 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/group__LAOperators.html 45c666f8a27f554b838a020375d13e31a63e02d6dd7fe971d1690697be4076c9 2 @@ -14387 +14387 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/group__UpdateFlags.html 532dcba9810d7be65654067976236040c82793eb50db11aef88bf831519f4fed 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/group__UpdateFlags.html 0f1336d352cb4056291e7aa299c5661274ba088f8bba05bb98b30a30b33de21b 2 @@ -14392 +14392 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/group__auto__symb__diff.html 879bc0eeccb12b3f0cb29545f3c1cc318f0bbb813377eb27272458f814628200 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/group__auto__symb__diff.html 9dfbae349d23ddd82100576aa2518a0685f11e3b4a66dbc3084bd69c8d25d269 2 @@ -14394 +14394 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/group__constraints.html bccca9568e186566399080a60aecb8a8997e94d1eb1a8d6a802ddec487789ed2 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/group__constraints.html ae08c58d9447df2f39ee0e1e5f8e7de4c9b32770d2444e5a3e745ca33e199e54 2 @@ -14398 +14398 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/group__distributed.html ee9052e26a9cd2f0cbc6a29c993cd6f99e7f7c32bf22da0da95842b4010c1077 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/group__distributed.html 1b0998086bf2217fdec485216bca7fb142fdb8f74aad44392b9df82e53be35d6 2 @@ -14418 +14418 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/group__hpcollection.html a913d7b73e858785c2be087eb0c8990c3c3ca8b1ef7a088772fa39e86e28ba2f 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/group__hpcollection.html 3cca710732ca36f40772d1655a2b1b4f61caadae81a317f16f1a9585700cdb9a 2 @@ -14422 +14422 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/group__manifold.html 2ccb93866c8320120a623eeab17fc7cefef20a3272b7f87a31e3c57a3339d425 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/group__manifold.html 5d52d47bad3f1e9109735a6e2c3500c9c3d43623b24fbf66ca9abd77b784977b 2 @@ -14424,2 +14424,2 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/group__mapping.html 60b20d45af1549ff70e116ab8fb3b0ae962d09cb32bac85773e8fee8d64732a0 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/group__matrixfree.html 8cc9102456702a04cbaebfb42cd6bf2f3a348fedfc0ea76658739acbc8e3cc14 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/group__mapping.html e91a37369710c19f079643c471b815478cfa2f4e4145f164d42733b679bbcc50 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/group__matrixfree.html 6235c7e81d8b2e2348e0c0c13cf980d9c596a1623432952931fd82bc1c96e526 2 @@ -14435 +14435 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/group__reordering.html c5ca5efadf35712a0d830c28c101cfb9ced96e39b6910c40cc29a48bfa0d1783 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/group__reordering.html e919be7468b46cf592f8d80d551074ce1868fc752dd56504e36743654ae29d21 2 @@ -14440 +14440 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/group__threads.html fd8c87f0c4752c170e4829a6b7857fb2a86eece9d73f1697c099e8516e7a51d0 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/group__threads.html f3009e522330ea6e4b0decdcf797f44386c1f78d93ba41599860c3a727d0b5f7 2 @@ -14444 +14444 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/group__vector__valued.html c12f84f192b08a115425cbd51378eac5e261a2f4be7f51f43b144eebfca1ba0a 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/group__vector__valued.html 7018adc6ad5238298644dab06958da3b4d73a00da024d5dbfdf8fde26974d45e 2 @@ -14537 +14537 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/index.html 640cfb8c69f9b44643bc62cbe74d92e1afe1f7c81521927c38fded4089287082 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/index.html f455aa19a246b6548fd2596912d8a8e15aea8ac8ebc44be1f4a22324b8bf2818 2 @@ -14540 +14540 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/index__set_8h.html 86541f02a374ed3313e6e1245a1bdb2e9f9fd1754bbbd20745d6e930268104f9 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/index__set_8h.html 7f5026ac945042ebc0576319f17d38cac68682297ef1620d1219739d33455da5 2 @@ -14874,2 +14874,2 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceAdaptationStrategies_1_1Coarsening.html 42131ce71ab760ba6fa0cd5cc3cebb2ee61ae16511914001aeec30177e6d020f 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceAdaptationStrategies_1_1Refinement.html 4f8942ed4aa4f659100126739d6c4875d4266cc09fbf8896f427432cc7b75cde 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceAdaptationStrategies_1_1Coarsening.html d005ad98eecd62fb740fe0eb3fa762d9423992cdc6dd8491e2be3c4e4d7773b0 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceAdaptationStrategies_1_1Refinement.html 82b75346969b480692d09d313567ebfbfa4a06381d6c2724f8ef6fbb431a2567 2 @@ -14890,2 +14890,2 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceDataComponentInterpretation.html da50f6c7e81af77237adda64dbabe7289434138ba437172f9d9278215bb64cba 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceDataOutBase.html 51481dae02b3d1a8e972ab95ff064ae0e970cac18dc91061476da455b85624a1 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceDataComponentInterpretation.html 6cd40085419662e8bc5a683bb532c1e9a8079a731ca6e2354b14742738650f67 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceDataOutBase.html c1f5c7eb55769018150787e530cd9439de7a1c5a2e3c0798f7e2716f8eefae9e 2 @@ -14895 +14895 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceDerivativeApproximation.html 6f81e3d0a25dcce34c7007c21750ab97c07257fb6457e71e81e498442d268a3d 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceDerivativeApproximation.html ce0008995d8824ebf85ac8c992c75e1e3d2874fd9fb5d9701205c9671752f9dd 2 @@ -14901 +14901 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceDifferentiation_1_1SD.html d922af5ee6e226f4971ac6340b7e3eb312592225799a472aaa6ea28589db52a8 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceDifferentiation_1_1SD.html 6c32a4787f5b7524c7231965875012a19972ea8ed366db9c1dbedaa7bf4d70d8 2 @@ -14906 +14906 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceDoFRenumbering.html f05de1aac38dffbc6d096f878f1c418d51888c8567b438f483ca925f55a3dee6 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceDoFRenumbering.html e43241079147cc0abdcc7e4984134f1248f332457c245712c3723a055a542d77 2 @@ -14911 +14911 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceDoFTools.html 4caa185a6c6f10ee2d7d6c53adcc951abab6c2bead7f4cde527f2282d7b03065 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceDoFTools.html 023673ea6941ca3d732bd7ea55968c5c77e29671951ada0c94f782c8ec4978a8 2 @@ -14919,2 +14919,2 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceFESeries.html 410199cd6dabb61722f59febd6365ae7c77495980ec5e21f404c30f4b9e55a37 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceFETools.html 3565f6330cc9085faf899ea77700f65a8045ca236f826249d67b17c372b26ee6 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceFESeries.html 491aa750f117d235ff1adddddaec526c71d487f3466e59fb1a18ad0af3fc1445 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceFETools.html 8d85bfbaa2e91feb302ea1b968b85e333017cecc90e4165a02c1eb21540ca0e2 2 @@ -14927 +14927 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceFunctionTools.html 46a714638b5112c9661efe897d0f8bec9045f7ea8acc16977152de2ea25fd2ac 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceFunctionTools.html 8ee67ad1a3f13564b904aefa921789bc1c7161f29f02ea75eafc2dc4cce417d3 2 @@ -14932 +14932 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceGeometricUtilities_1_1Coordinates.html f50f83b4e5374a6ecfa9493720567d1f980ab6a31796409e3d6f17f67c7541d4 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceGeometricUtilities_1_1Coordinates.html 024634e74bcad04209bfd699af17dbe5e9fc444962f25074c49557f262f87e71 2 @@ -14935 +14935 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceGraphColoring.html 3ba6dd64978faf64abb8f1bb3caa76c609f36c4b8d0b0d8489c83ed177cb495f 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceGraphColoring.html 198473783154190ec39485d3c8dc90669e56bd57eb213b2a781fe23692f12692 2 @@ -14937 +14937 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceGridGenerator.html 5e852e6cb501ad72c699780d7955b69f028c678753053fe32b5c6e33ffb6d64a 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceGridGenerator.html 3ae46252edb3ef30e3ab0369537aed06e0c43162d38d8c2a434da06444321c77 2 @@ -14940,2 +14940,2 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceGridRefinement.html f2562519bb94835d3588e4e995f44dda624c0abbf7f78cd1e76aee2e95c415ee 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceGridTools.html 4e77770bc682b95dc77c261e65b30604333d13ddaf729246f4baa02d68391532 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceGridRefinement.html c70bfd76999a2ea33ca296d170a849ae2a688efce4b0ce09d1f08817eab2fede 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceGridTools.html 76774cfc2445346bfe4f3aee078521d62ad7f159c8f1ae70254ac4e1b20cd89c 2 @@ -14964,8 +14964,8 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceLocalIntegrators.html 19382fce31488856bd2fc05babcb10658a15496435d983fc276bf86f5a9aa00f 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceLocalIntegrators_1_1Advection.html 247d7801b898210db3e39c8adc7a09aad10babaeb0d464d3910cf28abc8e9c7a 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceLocalIntegrators_1_1Divergence.html ac273bd3fb8db81bcebd4242473032442e6dce278bde3fc138dbf298bb8e1be1 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceLocalIntegrators_1_1Elasticity.html cffb0a1d91c487f78b2c7ef5c67e3e56904b0ba6b6fed225127fdad44fcd12d7 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceLocalIntegrators_1_1GradDiv.html 5469fd0878042911e428426cdc7dd1eaa5a58caaca114476bfe3be25b20e7c0a 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceLocalIntegrators_1_1L2.html 3d64f42e4225ed05a5125c8d54659a2d7c3397c862e6d0cd34eebcf94396c70e 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceLocalIntegrators_1_1Laplace.html b31608790289004712f563398bdb8b5e2d8afa3015cbaa8092aa5eb5982788e6 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceLocalIntegrators_1_1Maxwell.html f79fafe243c1b69625e9b516f66d1a8c1928686ffd33fbb6f1877adef090dde7 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceLocalIntegrators.html 407abe35e88d7500d6e285371c805984ef2e8fb62d7b2ccd09908bbb6b439f40 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceLocalIntegrators_1_1Advection.html 4f7cb7148e4fde0b560b6ff75910c7f4cb324d5269aad709dd6267561c30afce 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceLocalIntegrators_1_1Divergence.html 39f2d840493c16c84721dca50b3c7b464fa6f7ff7cd9bd47ae38583e639358ff 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceLocalIntegrators_1_1Elasticity.html fc164ca780e3926c511b0f871c8868e24fe18bfc65338bca7cd17b4015efa743 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceLocalIntegrators_1_1GradDiv.html a81fa818343bd3dab379eeea88f5bdd8cc61ad57948453330b527711b97c85f4 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceLocalIntegrators_1_1L2.html d3f80c8fadab799be8d06d2a476af5c453a2038186daff5fb73dff4d4410b1ac 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceLocalIntegrators_1_1Laplace.html 0fbd69551f2bf0b29e7b933a0fc9348c0eb5db3c9240a6eb89123da744ac5dfa 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceLocalIntegrators_1_1Maxwell.html 306b9e910d5813d27feecd71e57c3935a4baaaa7c94ec2ac54f1149debc3ff54 2 @@ -14977 +14977 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceMatrixCreator.html dc8158e6fd4551eab108d2420cb97a8116216d549d612ce1638c7b44b267cc57 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceMatrixCreator.html 301086f46806e9150682e4aa3f4e404cdfda3f17484c859240dd278f0884bcb7 2 @@ -14991 +14991 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceNonMatching.html 296443d8a99f66aa228f6d47105f56f7efc2f69ac9330381eadb46657aa23080 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceNonMatching.html a9629c01b8523d65702c388d520c50d869e81cb0ed9f14836fba43a68764a81c 2 @@ -14995,2 +14995,2 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceNonMatching_1_1internal_1_1QuadratureGeneratorImplementation.html eedd7d1abfbe8848c9aceed821bd4b546f4f868b3572e5ffaa0ef808aca9c726 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceOpenCASCADE.html 4d56787e509a7bc16cceec5ac0185fb18729014d8eec0857b89672af188bfa98 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceNonMatching_1_1internal_1_1QuadratureGeneratorImplementation.html 42f94da0246ddfd45a7b1f36158e458a2a325d6c116206b810361b54e8c28c2f 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceOpenCASCADE.html af1200fada2225622b94efeddb04461d3de0c729113849c91d9d61a9cd6aa3d4 2 @@ -15004 +15004 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceParticles_1_1Utilities.html 9a89bed574d00d677b2cc8c84b2fd30349c09c484f68f60defd2815e5f7519a0 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceParticles_1_1Utilities.html 9a03198764d7f3623d447f7b47e009f28f5cb17f7f8a573ebab2da6b4b90a0fe 2 @@ -15012 +15012 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespacePhysics_1_1Elasticity_1_1Kinematics.html c4106160dc3dacd6b9211e8d008ff811c338e71ff2e7295e6ac8b89faaa76b46 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespacePhysics_1_1Elasticity_1_1Kinematics.html 8d714582a04702923bd03f2132c85f65718b2f2f91db71e2de2425a6f23d053f 2 @@ -15014,7 +15014,7 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespacePhysics_1_1Notation_1_1Kelvin.html 6403f8b0810b8db655b190bb14b4fa7ab457887a2c111b01890eab7855e65a3e 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespacePhysics_1_1Transformations.html 4dfc6d0de2dcd5116f8592f367362bdf676054fb6deb28e608425a64c1194468 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Contravariant.html 58f0990a4322595830687de173febb9f1edd4f380b50f4a7c49b2db847bad4a0 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Covariant.html 974e8db2431fd8a37a4b04b39decc53e2b7a79fd49f53860ada38253edca9d35 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Piola.html 37cc2b0778cad7cb75e750863c010c7b3be91a3654a8dfaed1538ed8592b154f 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Rotations.html f892561e14102286a28514ff422b38d52c106c287635e6ec4cfdedbe98aff425 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespacePhysics_1_1VectorRelations.html bd6a32db082a3effd2e34f16166ed9b46ec9042ba282d02cd3fd83c40d6e674d 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespacePhysics_1_1Notation_1_1Kelvin.html 14940913c77ea978b512b3c4047fc0fba741256b3f22a009639e4e1e0bc67d1e 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespacePhysics_1_1Transformations.html ddd9a3f852056ec2555f6fe3d78f9e2ced0c90a554b2fd8d99882cbf4cacc7fc 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Contravariant.html cdeaa962e4c9ce888a1511609db759fc32ffc7edfec59c141df25bf2c7d9040c 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Covariant.html 296d2fc06d1272e26ba93a08703c6db21dbf79e4142f13397321ddf2d4e0afcf 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Piola.html c985e9afcb3505e942c6e1c600406bea53a21d78b11204b448ff139cad87bb9e 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Rotations.html 529bf19c4b52578f8cc1c637bf643998ee87bc1957019342e3f02ae1c45f38aa 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespacePhysics_1_1VectorRelations.html 5003731e875ea30075f4b9d0c83570bc3999f769306af713e1f28efe3fe9adde 2 @@ -15028,2 +15028,2 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceSLEPcWrappers.html 40b19b38d0d8b4f8066f4f63a4149b51cd4d4a8b3f8e745b6d0de43312c207cf 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceSUNDIALS.html d1bd260c68ee781af16f8933c584b4e26ca1c48506527664b8c4932d63a121b4 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceSLEPcWrappers.html a3f4522f7447e2cef49863e9dc5bd1a403bf029f50bb19116e68650eb633d8c3 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceSUNDIALS.html 97cabfd440707a6ad818dcdc031f8ed796db03dd9c2045c7b00df75d6ab5604d 2 @@ -15033,2 +15033,2 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceSmoothnessEstimator_1_1Fourier.html a3caec8947e4abf8f6c6a2699be7aee33a725e0bab0b5d79504c0d540c06102a 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceSmoothnessEstimator_1_1Legendre.html 3359ae3a6d2fd75ad2469fa1dd55433d765c865ee23209d4d5fb7ed97773e452 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceSmoothnessEstimator_1_1Fourier.html c4744aeaf1ed543e5d45015a882b10fc3c5f8734049b43125be385bad960fb35 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceSmoothnessEstimator_1_1Legendre.html 332320c772732e0934d4275e44dd8f5b3c6b87bacf078aaedeaebd96ffa85bc4 2 @@ -15036 +15036 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceSparseMatrixTools.html 0642a87ed37a10117e7c87b0e3e4d2b06502901e08e38b248f9256f5bc0909ac 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceSparseMatrixTools.html bf2a1dadb57dabd0003d38a9bed29039829e0d9ed2d897b46031d2a9fe0ef717 2 @@ -15041 +15041 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceTensorAccessors.html a94c468355d211167f4a7e7f0d128d946ff5d552f531db0d2bab2325bb1913c7 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceTensorAccessors.html fb0930fa3d36ad50caf02cf3382aa3c9676291bfad62f0328b9595520b3318be 2 @@ -15064 +15064 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceUtilities_1_1LinearAlgebra.html 02bcec9678686cf249e43a776cde44c6c9e6b00d1920415a0811baa9287ac8bd 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceUtilities_1_1LinearAlgebra.html 46b4fe803b6a400f2964a0bbf106dca63b14bc6eeb9b921fe94beb7824536738 2 @@ -15066 +15066 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceUtilities_1_1MPI_1_1ConsensusAlgorithms.html 50fc8ab54e043e3c0540ae056191dd2aa36f6db5044c104cdd70d5917396b631 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceUtilities_1_1MPI_1_1ConsensusAlgorithms.html f51894fb552974a709f7cba19ca77453755e88c392bcd28fe2764e72b01dea7f 2 @@ -15077 +15077 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceVectorTools.html 62290578c2b5066f4ddde6c6028211ccca4e916a4595cc105c096d20139ea20b 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceVectorTools.html f693540e7bfc387cbb7bbc2fedf3f59a975024f661c750c263cba01c66e9ceff 2 @@ -15097,2 +15097,2 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespacehp_1_1Refinement.html 0018c70247f992e26f1f8c60309adc6edd09bc9a7f2a9d4ccb6c6150505b51f0 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceinternal.html cb412f9f4e4b2ef4a904547a9428aaa7cae69c1009fe41d4a17ac7329f223483 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespacehp_1_1Refinement.html 2843a4a99640f260b691f4568406dc17adb4f810afce84065ad41ff05b04cc3c 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceinternal.html f45075d10ce406998137d92401d8aac9ddcb905ca6f24d0f8a91684c3087076f 2 @@ -15232 +15232 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceparallel.html e6be85ed9dedf4dec24c9a3c60b8d1bc3363db44196c10283151bb640d719bee 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceparallel.html 91076a12b04e52593e0c26272abfdd75fdd984a7d6efddddefbe37a0476072ee 2 @@ -16281,9 +16281,9 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_1.html dc823f3c331f1b9b05bfba9b70029eb4f5e3be725bd187da47b2d040036d286e 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_10.html 112803fe5b161dda68310ff5df81dd61bc48e178a6a52f4d9281838be002c305 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_11.html 3eecf0b4a2008633379f7a2f61ec643ed536791576a36dfa236b8f4b5ac9139f 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_12.html 33044f093c9dbf86c090147982b53ebeaf6d76eb86725c99e597b673ba8604e7 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_12b.html 0d7c86d34f69ec8e8bb416806248bfa47253ffe1ccbf4fda2a761349b7b70444 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_13.html f0f6d6e4d6eecc24524d63f42fbd86f1eab4ac0daf47f68292ee25330b101271 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_14.html b588a06824fa602e9ae012b55db78c8d67759042106365cfeccbbdcfe1d81190 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_15.html 7e9de10e4e6334d898115d0fd8a7baba480e87548c21db617a597cc26bddd308 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_16.html 640128b9a5a96c9a151f3535e225be40f12171c8bb7388d5b896dc35b502350f 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_1.html b0c32f89fae9274119ba7e7bebc11cf4caf15ab09a3af23463620d6db887275c 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_10.html 79d9034206dae5b7210fcf3ee5c34f283212c385cbdb6cbb428317fbee018cbb 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_11.html c8379e08120b035738b564e96e85d02c7547d864a4b79cd9fb7d6d578af2ca40 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_12.html 625ff1acfc184fbc7948ff6179137c182bf800ff7faba7ac5031d811275d89bd 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_12b.html c4d091a910453131b8b665e208fc9c1be3437550ab3adcdec15cc71dd69b92b5 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_13.html e81ba4c936913957ae2fbb727eeaec54b5adf1108f4b0b55bea7978e1c4801a0 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_14.html 81f77428a7f67297bf7625f7a90638af3ec4eee1c4266cb5ada64975d6075b95 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_15.html ccce36289ccbbcd6b3a75524d74dec14d499b16d21a98d31ab3765823bb4614e 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_16.html c522546077a8cb189010d87fea0b3f74ac6eac9772f2ba43185b5ead72207f1d 2 @@ -16292,40 +16292,40 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_18.html 27edc03dcbff9f706656d2e81a5af17d45bb60493670fff48eb33a2d3717822b 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_19.html f47fffa15e9aaea827fa396e59de0dafb95527133e9925377c29c1e7cfcc9aef 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_2.html f1b91f173fd9136c6dc05173eb0e33837e7a7034e642e02f70a9208f953d1524 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_20.html 0154e6db8cb38c1366045b6c62c68e916ef1dfee25a33e4e4bd1e113402271b5 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_21.html b7a106be3b4926cd2cadbdf03f9eeac017cbdb638b40eb6f9925226aafc6dd68 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_22.html 276ae34dd07f9e13c2bf12f7a3f899ff6b908e96d5dea233adaf221b76e4c31a 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_23.html 1a55938ac6cf9b2586f9f37c8596057d0c1663d0b8fd9af82797971944258fd6 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_24.html 625b91a67b95520af5c99aee2532ac58e40362df55144b9c6864071a49b7972d 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_25.html 494f862dae975ca7c0f273b7fac941131df769c0033691c90dfaaf82e33d5a68 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_26.html a7456b2172b5b9a80c9ffb46894418a635585665d457a72694c85f41a55d9815 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_27.html 1907d2e8f45842f4b5e591693395fe74ba8963956ac6ea12eaa4c6bc42d91c93 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_28.html 313be82a21786a1747b4c97e9ea4ce37c0189b3fe8e3ea51f84ab3ff8ebb392a 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_29.html 508de89d609c7ea9c875be99659bc9cb2e38700bc6a421beff1b641279749a2e 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_3.html 3d7444cda5a2850ffd810881820e63b36b2e1f14f0ca3b0c7f87dec050d71f5e 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_30.html 7e9f2e3d116b3d4fa7ace48a3b8c39c1e83ba81941e3b88a8ba1dd11955337ee 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_31.html 61eb8a914586d8ede1fc18b6c4535fb2dec6122f8b6d85af7cc6fe240a654330 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_32.html f720ef3da9d7aeee24015db59b2d570e0410aeac9d6618e7a90c0031c4f185c2 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_33.html 9a50167a941fb6fe2af2d503e7bc80a181fb8e2a4fd79473bb182d0fdcdb4bf7 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_34.html 3b31ccbd6a0ea7771a0b09f931f5dacbced8b3df74a749c94fcb03321ed753d7 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_35.html 88b745c46295e0de133f2261431bea57de28355620f0ab4629d6f7f015f60727 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_36.html d983d9975a11e6376607a19edd5f42ea59c428a59692b0ef0c940e37f6742f21 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_37.html a31102217c358ec3c466aca5a946da9b7dc3f7452f8077bb8a5675fb81b011dc 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_38.html 6366d78286a58a8ab5d079bd7258f4223fffea9100dbf5995a474ecc4cbb4b7d 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_39.html 7212b8797fe92c5ce132a5f6a9b2f2dee7a032b8f7dc60af2e4b24db62e585d4 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_4.html fd8ba5d751505cd10543aa437adf09dcb4c85a4bdff7e6173c1943eedb5f1f50 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_40.html 69e9fd10bc1d7757574620b19db80a47442249176b91adcc3c47a75ad89b3d1a 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_41.html b9cbc8161d6b8f8377971ba8581f8b5474760196c9874df4b6f51c6ff62c4c16 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_42.html 339ca0ca1174dda1d7251d296037ba13c2a57ab2a68129f6c0448534bfc77ab9 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_43.html b5f8fee8e0ea1f4c17f4d7afc1f55805de1ebb3dbc387a2e658214046f58cd5a 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_44.html 6bbfaa421acf49ba13fd43530ee20ce08d3509827824681d8a6838c8bdb2c55b 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_45.html f6f115bbedfb24eecf3115c8147b153830140ac5a6795b9508c24070d77acc8f 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_46.html c9045f680fb885f7824b7cb0dbee4cf3e004c6b773c167819d7a5bbb46a8b2d3 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_47.html 0a64e669be63d5b0a4e607e6df6e1ef5fa650f8f68c85847437b9bc7302a0056 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_48.html 31681aa2c8a2b8b6f765f45ce54a38959ff08ab1caaaf9870813f859e486a9b9 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_49.html 80280b755e3494188e9fc135b28c476fbf16ee26a47c9d18f013c7ff7dd81cf7 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_5.html 55375219df1ce72dc7484b576ebd611e8d22217f1e446f44df50e9123f73315c 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_50.html d2b17972ad0be316738d757d82ea85e2c167d7afe5ec04e3d5b44768bd990db5 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_51.html 086e7bc24714b2bbd8cce6028a4450ae456f203f20af2180dcf538ec49a74d9d 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_52.html c1c8b1e079f0d63a03b851725d18ca9932c2d211db8272f6db8a15a4c4f0bb57 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_53.html 0790397acc9c105ffe059cfc58e5858c320b0a5ce93bbd1d2d453598ec25651b 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_18.html b7a52008633e96c2bb05096588e331a14345c10090cea4b25d9c8fcd09da42f6 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_19.html 795a35799cf9e69a0c9e69725e96c60db752341154775253db0af896b7c1f6b7 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_2.html a2e57e4d4703f2a235a8567684ab09c44cb61de62b52dc21c8f9472d5e27d935 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_20.html 8eee53c570d085208c041f65840efe30ebf7047d4127074045e9c68c00325385 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_21.html 3b672e392ddd0994404917726e93f61a38ff98bb7e7962cfecbbae565d90e5bb 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_22.html 797c31264e62bc6bb77729ee3795fc668ac943715015ac14772bb48bceabd599 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_23.html 0ebb8c2dcfc038032370ca02b0f62992b31d650067bab542f0005dd00512a6d9 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_24.html e492e9449dd08aa0210a837f1c982558eb64830ca6bc8c81324c7073aea8af64 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_25.html 9b269d8479ce8f017a3df4c51dbfbdeb28ddd2e770d2e81752ff885786d1b967 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_26.html eceaa6285e0bae3e200fd88e13fcee29905f2674d8f6eb9ebedf1e17c2cb9e6d 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_27.html 97419d2a8f84b04f84ffc19228dee21ff881fb03ae3a4593ff0b9130729c6de5 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_28.html 0aaeb230a203092440a8274c807a856e2be382ab4789753b5bd2cb75c6e14f48 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_29.html a92cd6235058f6168ee7f1b7fe10e88f994a5b92298ae11eb68426e4c7cafedc 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_3.html 1097131f5c64c5f5e0050a6502f6fffbe05aa09f97f2854958484671dad2157c 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_30.html 30afda04bb85b4842b0feb96cb5a9bc629468afeeabdc8b4cf0532cf87d23f32 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_31.html 28221ace62ec717c91cb84ec080aec433a1d8b0af7b3fe4474219400ea30b55d 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_32.html 4576418b41fdbcb6a7cacdd08aaf65e14cafb620c31516be08cd97dac1de483a 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_33.html 725de7882d128b7b11467deecc83362364c2cd15f027117dcac7a0bbb11a0790 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_34.html 7d6684db6027e0f53fff4662e2fec41d3e7707c0f1e6e7c8aa7371082ed762f3 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_35.html 0a72c551ca43a8f903f652639f46ed9c6078c5745346dbad7bb8152ae9f163a6 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_36.html aef3ee52725b5ca12295bc85c0b030af8477d57930f0e50efd086d1cb8690465 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_37.html f0037b74b5b9015b4ce85cacd6d945f48707ecf60fb36d7a1f5cf018dd590a23 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_38.html f9c424b8ebee2b99c05a58670a01eed2151ca9cb4a386b126a1fa33fe25baddc 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_39.html 2719b409b1982b71c32cb4510500b38d98fe766348056e07d607cdfa09b3e377 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_4.html e018c6cced3a15ccc59c15b323b85bc755460220efef6844ba929d7593b9f450 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_40.html 4b191c03794f6744c2921eb2176d61b61b96a4e9841d33840b1eace7c260e527 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_41.html 7e1bd74453e7c0e4663b76966aa7c56f3f0bbcdb310a5aae7620f644ce24a96f 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_42.html 6b45ca9e9635a5016ea455ec25e01b9b2d0741088490e923770f5383ee5c4da2 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_43.html 0d1d32dcdd3a7da145ddd1f3a7dea137142d2d426574645b905e41804f3e212d 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_44.html b3cb7263b244754c5790e9ef6358720fb5088b5172fa8f9203c3b104946a85c8 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_45.html 509904b7b981668f2a84321a15362fbd4ca4a90fce1c4d792e81b9e1f9e5a480 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_46.html b914a78bae236a47c5b311ad82cc5b1e94158bfe543768bc2c938648ce23ee2d 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_47.html e22e8425c422ff980df382266610689458d00c837a7d3de1c1321d0373850448 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_48.html 940c97d8068960cfda5a4c8cbe206b5218a99079de2ba5c15162e7e911e9f2a2 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_49.html d3232625b71299f4fe486bc7e290b4ba8132c2c29bec4b41f02d7e15f9de26f1 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_5.html 041bcd4249856f56a6bb0b320c989ed980fc9ed8751de2565b0d033bc54e9243 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_50.html f46f2a9520a3f608235f44d8cd964dd7ad17ff5d0c16ef6710a01b88d54e26ea 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_51.html 800dd586ef9ce4f9ffc35ac9a4b636498b9abdfc035d0ad4cec884612bd3b522 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_52.html e515a074f2e9ad5c7146059a4d98d6d4a5cf7b601542364a04dabf48befe2b88 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_53.html aa0bf178c6f47100cb3a6a10173b74409f1cdc26c6b8d3901eca3acc99a778ec 2 @@ -16333,31 +16333,31 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_55.html bafb04403ecd069b5adc09fb033af485aa0385c886cc1fce3d82cb1e6ea699f5 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_56.html 763929590edb000d40aa8fb6c114f2fd25cc705442695a17692b2a34da5580ee 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_57.html 58c49b92cc09c98cc42f3c5e5f147e3d2cb35e2a5f8893544332414fcb2b86f4 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_58.html 8326905db30a51e42beb10dd513ea08584a37a955739c3d42efe473c5634587a 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_59.html 0bdf6ef98580709189bee99a27c0b47756752c4307849619d1d15e088313b2e3 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_6.html cd34f21400748bdf698f05f8eba5e11dc74843d6524d9520796d24078ed82639 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_60.html ef3b1979251f1a3dcddb5687b7b580c26e49364665ab212a1ee18a9d1168f185 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_61.html d6b869c6f6437b66bec9f6ca28fe095c614088ad7c143c67d687c4591e5abdab 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_62.html 45d1ac75aecc8e33cd51af5d0f38a1d909a0bfbcef74099139bdebb357532d92 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_63.html e23d2aad77237f1a399ec7d73bb20769aa08beff5570ad3e3412b503ba84c5ce 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_64.html e2947f8dacf18e1f7367112db4471d1a97b5a3352a8425f54369446b54bfb4ac 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_65.html 5e5cc75de7203f5c998bf84ccb27f8e6844ac57468e95819c6aa7eed505ee222 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_66.html 8f1641652497ebfb7540bfeed7f1f4a2a313561665f29ba7f8bbd2d6d7781b06 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_67.html 1b08291afe51c5f2f9144880543bfa14938035682a7cceb41ef28e4cbf5fe669 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_68.html effff79935b0558cb1e50082b84ecb6f54a55b520826dce32c26fdd6acce7adc 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_69.html ad1bfcf199d02f806cc10eac0c441a6591cb197fa0297b3463dc70b6d844eb9a 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_7.html 6f003f0f240f04e2d9b148a4f7a9ed3f8b4d7a96f0916c0b10d69413b67b6adc 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_70.html b6f0091dcb5b76fc53e12f95d5a87033f687bab75afefde638ade94d061915d4 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_71.html 96b85391ea462d00a4b8db5cf06be440fd5fa2639d5ad1078afa91703b2cada7 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_72.html 00b05bde5009b6e4a0301c357bf2ee0793eaf11997ff2a49b8d50a18b46c7672 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_74.html 3a25412ed9b21d0ad3ae4563f943f532db1169de66e9e08dff347b7f829ef812 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_75.html 6240ec654290666a5faabfe952f322fc1ea989f01df05a253743b666b3623fa4 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_76.html 8c428964cd340b3a64847f48c489ee792b5e46a68b48d792b5b69f7ca629bc0a 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_77.html 755217bae65fc2f0a7872fe6b0a9c0d8c30a1fdce36aa791c3c3b3c3ea7afee2 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_78.html 7713f27a3d22cc55df6130adaf81cbdade14fa502dd93dab7540b4033ffe39fc 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_79.html 75a9c719aa7e2339c242bdd5afee73682b4ec0cc42274d25c2a2018e69352574 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_8.html 3379f5215d9ad62d8032a968f64364ca5b4c2e0dfac52a111c89e4a8eeb95958 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_81.html 3bf00236d3f212ff3be6d98359111a4921caac8dcc07da462c4b63680e98fa39 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_82.html 0baa14e66668f801fd80944300c2a0f4c07b39744f64f8bd073064f4a6df8975 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_85.html 120ca4ccf0f6c531eac51fb29ec8688f3dc0bdf75070fff96e3009c645995ce4 2 -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_9.html 7f9ef66d818a008eecfb044ec1af69a843e1859218fdb68cb69a7e4561e392da 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_55.html f03b958c85560d6fbfb77474e477bcb5410325789b39325028d4bd36f4c95626 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_56.html c7d8e2127fc46bf88b10c6f1627907c83cac5159ea5e1efed523b9d1face1169 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_57.html 5b0b3e8de9be45a8db7b8fdc633cddb88a4b1ca917e72fc8834ed526008dba71 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_58.html 27576f7969efe114c2c67da16568ce5b1f0026a147078101a65934b7d10ac66c 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_59.html 8297039787f554d2d0188b1a235979ff029f93329d393cf22847db763536cb3b 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_6.html bdb5a667c66d3c7db889d3e7dba841b3e9249dc3ac9953bfb2eaec93c537e7cf 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_60.html d70c334a118c74c0af3e44e4d7df58a9da3b456db1586922cdfdf16c3b99ff96 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_61.html fd94802adfe821bd37df53b9c2e02603aec80ee7d12f2eb4611317c3b7febcd8 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_62.html cf5d49c9c38323b899712a44d1afe5cdb0d3542c47fea77b8758518f4f8dddbb 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_63.html 5d80928bdd0f6f26fa5e28c3fc62bfd78ec00c0529572459b863c06d26a4b297 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_64.html 395c3ab291f56f99aba97480a5b9743a485c78ece18c6cbd36b33352d808bcfa 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_65.html 1f9a334aa99df52499b5606ecaa30db01f315011b08f073725d8ccf6144958df 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_66.html 10f75d1792f3baea327877731b62ffb22351d0cb0242e3ebb4c7161290685b24 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_67.html 52401dc6f8f3d4b7b70f38f405ce43d1bd68f6719f3c260aa1a778e5b6e66ee6 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_68.html d709c10caf2ba54e9442447804cca131efb60179c70016bb0b1844242c43b732 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_69.html 707ee951d7854400c2c4c4d9098454280562db404e0aaba7d5b375c9d1c5d193 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_7.html e4f91c406c69b354f120960947513e693aece28c67e805f4d33f4e34a9871def 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_70.html 3e6f2a94a1d33e9c0b98c032b7f9d918064d2dcc271e1f7f5cf40c0717bf62d0 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_71.html 3ab396fd811a6c3a8eff8f8bd0250d104f0746cb39805aebbbb71dbaf1399821 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_72.html 30b367577b275059519335bc583fd7d8fc39fd4775773941357933a171e60aae 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_74.html a0d0ca04f522421bb572d53f3b6c264dd5a2725c396336feeefc218a2e2f9567 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_75.html 95d08172fba72bd2a63a8faeef592f768ff93994a4412a2367770104f753f933 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_76.html a7d8995cb3578ea1a8c3774d9248a85d755d2faf93899981148032b09a6bab9a 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_77.html 09ea852ffd74c63258efba7bd0eb1be3af5ee08f0aa8a7970b483210dd3c2f8c 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_78.html ce8b1e8d44cdc655d469fdf1f8733bf9c5f0bfc776f715829759cc1b0b1b446e 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_79.html 309f6a594dee25270b438460f304fee41eec3485f73881af6527adf6a94dbdbf 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_8.html 8d9f6981159a0a322fd5b703829377e1981620033c9e8f3742ab591640ee2a83 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_81.html 98951e0d4f27064ff4bf05d512972524c248907349a1454691ae99cd2589af48 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_82.html 0022ac510797b4c004173a5f0e1b9b7a1a12fb832fed18da9e51a84bb04e8630 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_85.html 28f9ceb75079c72e33adfd9b0a0f9fd95e7100045b9d9d3fc0ee13911b75e8b9 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_9.html 0fd743ad70e0253eaef13054ff22821137c128a4682b3211a4e802b0e2f411e9 2 @@ -16417 +16417 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/structCellData.html 01448bb2dab4eec8d6397e8fcddb4a05cc0730a34aebe22f2eb392b333647393 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/structCellData.html 591817c3a0d60f0c69d1047f5945acac93a0c72b4c457ad8927e07f1fdab2df3 2 @@ -16422 +16422 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/structColorEnriched_1_1Helper.html 4aae0d64956a34cc464a13798909dd81843c4be0ea3c0793da096bcfdb6844d0 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/structColorEnriched_1_1Helper.html 8e0bf8e9ad3eb304cd2e314690aa6109f7e204b165909d0f5f95764739e5f012 2 @@ -16473 +16473 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/structDataPostprocessorInputs_1_1CommonInputs.html de5b68aacd06b900ee494e42469eb3ac9cb9add0250b09f4199bc5e5c8249215 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/structDataPostprocessorInputs_1_1CommonInputs.html b5020a0ed1f1f5acb5373c344f82ddea36fb065189017bf4880946adb1155cc6 2 @@ -16479 +16479 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/structDataPostprocessorInputs_1_1Vector.html 1ebe750809cba5a6a52b4d642f10d925614e885ec00f2275edfa6e26be494619 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/structDataPostprocessorInputs_1_1Vector.html bbbd9e449f5ed0d78b8f604391f41e5306b431fcf22ae158ef27cfffe814df56 2 @@ -16633 +16633 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/structFEValuesViews_1_1Scalar_1_1OutputType.html c1a4c26a34ce26aed5715602628bb105a14dfc157e5de6247607efb5fbf414db 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/structFEValuesViews_1_1Scalar_1_1OutputType.html fc9c299df554319dd995c84251e04aab3578512a5958bb37585802f261e1e87a 2 @@ -16637 +16637 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/structFEValuesViews_1_1SymmetricTensor_3_012_00_01dim_00_01spacedim_01_4_1_1OutputType.html fe5a6edc900130b45850a951af55fab20c0a17ac6eb3f6c51c8f1406d6573617 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/structFEValuesViews_1_1SymmetricTensor_3_012_00_01dim_00_01spacedim_01_4_1_1OutputType.html 7d001cfbc5d2fc68c7309360d910e563516adc64f20c2c98cabf44fffdd438fd 2 @@ -16641 +16641 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/structFEValuesViews_1_1Tensor_3_012_00_01dim_00_01spacedim_01_4_1_1OutputType.html 33e5680041ee9bd456a71bb5c85235d0c177171e3b270243541bbc5ba91a8f8a 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/structFEValuesViews_1_1Tensor_3_012_00_01dim_00_01spacedim_01_4_1_1OutputType.html 26d6107fc7c6118b654aea1dd8685caeecf16d8ddc739fabc6bb822e193a3127 2 @@ -16645 +16645 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/structFEValuesViews_1_1Vector_1_1OutputType.html 152901536d173dea95937845dc7e7ce6aa3be217544046a0304fbb2e33996ac9 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/structFEValuesViews_1_1Vector_1_1OutputType.html 372a4f38e035bd0d4a642e9577dfef81e7267f106744093f1f327748da2246d2 2 @@ -16655 +16655 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/structGeometryInfo.html 7f3aa1b3b7d93fb5ff850e7848e670be2b16904a85b413f6fbba83404461bb79 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/structGeometryInfo.html 568d96137ad3e244879cffffe227ab2654aa4809530bbc3e146d3f07aa5c3e6c 2 @@ -16673 +16673 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/structGridOutFlags_1_1EpsFlagsBase.html 62054e315ff56fe08fc63f9c3e45fc0830cfdfc4208fa25cbc59093d74378d60 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/structGridOutFlags_1_1EpsFlagsBase.html 2f4aa9e299fdb86c0151700d126c747124753c9b10b58e7e2999471da4779dc8 2 @@ -16676 +16676 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/structGridOutFlags_1_1Eps_3_011_01_4.html 19abbf46ceca77b4c959188383e7c7e464b15825fd3b6f4fd2d6c8d6893571a7 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/structGridOutFlags_1_1Eps_3_011_01_4.html bf92ffd56a65391cb18d599123379ba3ccb4a337f98e8dd1c262bb0e117c9236 2 @@ -16679 +16679 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/structGridOutFlags_1_1Eps_3_012_01_4.html 6847a450134f4d6d910e87e1ae0745168c7dd1ee3dade787700f15beb1ed892f 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/structGridOutFlags_1_1Eps_3_012_01_4.html 5718b9f6a2960e7089924e8f1ad065ab7615d330199f93b89e4055e70f7a2f01 2 @@ -16682 +16682 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/structGridOutFlags_1_1Eps_3_013_01_4.html d67f064048f7661ecfd267a8fe7f29d8d5f12bad4193ca22f4931a37c11eddb8 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/structGridOutFlags_1_1Eps_3_013_01_4.html 910e0814aac1f0533ab29197256ec47d5af0cf9d68cdfbcb330f59569c9f3307 2 @@ -16703 +16703 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/structGridTools_1_1CellDataTransferBuffer.html fc7625b5d20b418f745cc09ba9f8b1761ad45d43ebd0e477c8ffa164fce7bb1e 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/structGridTools_1_1CellDataTransferBuffer.html b1f16174ee9a5923f4e89f942d97c2336a530331e90dbfa01df80fbf59249c29 2 @@ -16776 +16776 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/structNonMatching_1_1AdditionalQGeneratorData.html 665c760f23ce14c027cd40d5ebcf7c7afac04470ef06141070fc546c8b3790e4 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/structNonMatching_1_1AdditionalQGeneratorData.html 214ece7f7f82040e5f4e5cac26aff1a322a1f1069a48c12c825b3510d5d8f4cb 2 @@ -16780 +16780 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/structNonMatching_1_1RegionUpdateFlags.html 85364b059cd1a546ebebd5a14c8f01334c5b1b069a0286ca2cbf986983e7a20c 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/structNonMatching_1_1RegionUpdateFlags.html f23111fa04fc5c35e5cdc082cb637d270eb7e0c59947c330cd74cb0fa164cd5d 2 @@ -16784 +16784 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/structNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1HeightDirectionData.html b88076d4a1d90ed41e627bdb495da2c2fc4f5f911c7ed35ee70b846aa4fdb525 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/structNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1HeightDirectionData.html 9693bb904f2b57489de1b1b00421fe54d987e9d91df2319407973edba9cdbc52 2 @@ -16925 +16925 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/structProductType.html 778145fc78cfeb75e932b127311b2a586b3708cf032427e11126d2f510b3325f 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/structProductType.html 7d44d08f4099e51ebc4103ab73496db5ea59e6ef0530a0d63f208ee49ab4ba77 2 @@ -16956 +16956 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/structSUNDIALS_1_1SundialsPreconditioner.html b5af640c63f2204032eaa6cfc071c49641019099b4d67019a8100387db291e15 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/structSUNDIALS_1_1SundialsPreconditioner.html f1814681429b1a6750aed26a58a88402175f77f27619c5caf3bd815abf9d83a9 2 @@ -16997 +16997 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/structStaticMappingQ1.html e2d398b7ed26c542fee0907e561127a0e2c66962c84cd85fa0d8a0293aa5843f 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/structStaticMappingQ1.html 5247047554a2b227011825b5674fae8a7fa2bc9acbac06f1126f4372be50fd74 2 @@ -17001 +17001 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/structSynchronousIterators.html fb02c8b7504085be6fbf9f6d7bbd031cd9b551ce191d362b475a824206278833 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/structSynchronousIterators.html 79be7602cc43321123796a775e0300ac66303cdd00b6892183b2dce0b2ae18a6 2 @@ -17078 +17078 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/structTriangulation_1_1Signals.html 4986857acc143875472deb8ee6e8d1ceef886007369a5cf156a472e6aa5f69f8 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/structTriangulation_1_1Signals.html 75fcec6f7c21827704453443d7434fa5e9402824cf3ae54ab2dccefa01e22218 2 @@ -17101 +17101 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/structTrilinosWrappers_1_1PreconditionILU_1_1AdditionalData.html 924a30660d947bbf0f697b1fa31b1600b6ec3e62d892644eb6920b889fe1d912 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/structTrilinosWrappers_1_1PreconditionILU_1_1AdditionalData.html 54354acedb09b297170ffe7878792ca1e85ea1f11e317d2822d96834c80bf92f 2 @@ -17174 +17174 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/structhp_1_1StaticMappingQ1.html f4d165d5ecf49b60ff5b38bb5aa3e0aee5a682d08b8147d7187e663e671fdf78 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/structhp_1_1StaticMappingQ1.html e2074d2e99a22aa9869e93a06c639ec532f938cb79699ad8c094f1573bdd50f6 2 @@ -17366 +17366 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/structinternal_1_1MatrixFreeFunctions_1_1MappingInfoStorage.html 47d623c961ec39830a59a901c39162ea80723804dba7586f618f165ae92859c0 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/structinternal_1_1MatrixFreeFunctions_1_1MappingInfoStorage.html 06f214b03a2c17431078aa0ecfc78d8552f775904b8ec7180ea591a5c6756279 2 @@ -17377 +17377 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/structinternal_1_1MatrixFreeFunctions_1_1UnivariateShapeData.html 2ebe52444c6eece17b52e7be3a549afea09f1adb13b2b74e3ded2a1ddee33afd 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/structinternal_1_1MatrixFreeFunctions_1_1UnivariateShapeData.html b1ca67c4ab89b423ac4dfdafd52cdfcd9bc6fb2a2cf73368a83017919308687d 2 @@ -17905 +17905 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/symmetric__tensor_8h.html e6279057c76dfe8de979cff08a406a3f49b256f170c549b3a5234f03282d0a5c 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/symmetric__tensor_8h.html f54a889b8b786e280225b6d9a12207d21ef7d876b9fd7e6bca59cdead5e71dab 2 @@ -17909 +17909 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/synchronous__iterator_8h.html 6b90ef60bc12c3a0448b1ac45bb0ac184098d734d7797143ab9d0788ca3cbf4f 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/synchronous__iterator_8h.html d4d8bf691727f67d69a2db6e1b7c507f33cca24df7fb515f341a52a7aa6d97ae 2 @@ -17939 +17939 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/tensor_8h.html a6f6c47bc14547973fa7d137b8bc8f28a82d60dffb1673b7a1cd1689fbb45ae5 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/tensor_8h.html 45b1ee5362f0ca2cd0f0de8640f6d58cf7c00204b6a0464350ad23de37a91a37 2 @@ -18001 +18001 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/todo.html 9d803e6ea59742b4e2d9acb6d38ad78f5f2c1c0ab22e599ee4b1abfd9ce5d3de 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/todo.html 8a49ccd0ee2d938c2f3f963fa44fb436a3c03fb6e1d64a66523f5a0ad6c79915 2 @@ -18220 +18220 @@ -/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.tag d76913c2ac1087f4a88a32ff0bf58b4c7bb536883e267aa5b5aa72a037002fd3 2 +/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.tag 5e710acd15b8b9964a166e5f0b5e17553b9fe867640439d9ab895eec4db51dfb 2 comparing rpmtags comparing RELEASE comparing PROVIDES comparing scripts comparing filelist comparing file checksum creating rename script RPM file checksum differs. Extracting packages /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/DEALGlossary.html differs (JavaScript source, ASCII text, with very long lines (1525)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/DEALGlossary.html 2023-11-25 15:25:50.353417508 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/DEALGlossary.html 2023-11-25 15:25:50.353417508 +0100 @@ -100,8 +100,8 @@

Block (linear algebra)
-

It is often convenient to treat a matrix or vector as a collection of individual blocks. For example, in step-20 (and other tutorial programs), we want to consider the global linear system $Ax=b$ in the form

-\begin{eqnarray*}
+<dd><p class=It is often convenient to treat a matrix or vector as a collection of individual blocks. For example, in step-20 (and other tutorial programs), we want to consider the global linear system $Ax=b$ in the form

+\begin{eqnarray*}
   \left(\begin{array}{cc}
     M & B^T \\ B & 0
   \end{array}\right)
@@ -112,9 +112,9 @@
   \left(\begin{array}{cc}
     F \\ G
   \end{array}\right),
-   \end{eqnarray*} + \end{eqnarray*}" src="form_92.png"/>

-

where $U,P$ are the values of velocity and pressure degrees of freedom, respectively, $M$ is the mass matrix on the velocity space, $B$ corresponds to the negative divergence operator, and $B^T$ is its transpose and corresponds to the negative gradient.

+

where $U,P$ are the values of velocity and pressure degrees of freedom, respectively, $M$ is the mass matrix on the velocity space, $B$ corresponds to the negative divergence operator, and $B^T$ is its transpose and corresponds to the negative gradient.

Using such a decomposition into blocks, one can then define preconditioners that are based on the individual operators that are present in a system of equations (for example the Schur complement, in the case of step-20), rather than the entire matrix. In essence, blocks are used to reflect the structure of a PDE system in linear algebra, in particular allowing for modular solvers for problems with multiple solution components. On the other hand, the matrix and right hand side vector can also treated as a unit, which is convenient for example during assembly of the linear system when one may not want to make a distinction between the individual components, or for an outer Krylov space solver that doesn't care about the block structure (e.g. if only the preconditioner needs the block structure).

Splitting matrices and vectors into blocks is supported by the BlockSparseMatrix, BlockVector, and related classes. See the overview of the various linear algebra classes in the Linear algebra classes module. The objects present two interfaces: one that makes the object look like a matrix or vector with global indexing operations, and one that makes the object look like a collection of sub-blocks that can be individually addressed. Depending on context, one may wish to use one or the other interface.

Typically, one defines the sub-structure of a matrix or vector by grouping the degrees of freedom that make up groups of physical quantities (for example all velocities) into individual blocks of the linear system. This is defined in more detail below in the glossary entry on Block (finite element).

@@ -133,7 +133,7 @@
FE_Q<dim>(1), 1);

With the exception of the number of blocks, the two objects are the same for all practical purposes, however.

Global degrees of freedom: While we have defined blocks above in terms of the vector components of a vector-valued solution function (or, equivalently, in terms of the vector-valued finite element space), every shape function of a finite element is part of one block or another. Consequently, we can partition all degrees of freedom defined on a DoFHandler into individual blocks. Since by default the DoFHandler class enumerates degrees of freedom in a more or less random way, you will first want to call the DoFRenumbering::component_wise function to make sure that all degrees of freedom that correspond to a single block are enumerated consecutively.

-

If you do this, you naturally partition matrices and vectors into blocks as well (see block (linear algebra)). In most cases, when you subdivide a matrix or vector into blocks, you do so by creating one block for each block defined by the finite element (i.e. in most practical cases the FESystem object). However, this needs not be so: the DoFRenumbering::component_wise function allows to group several vector components or finite element blocks into the same logical block (see, for example, the step-22 or step-31 tutorial programs, as opposed to step-20). As a consequence, using this feature, we can achieve the same result, i.e. subdividing matrices into $2\times 2$ blocks and vectors into 2 blocks, for the second way of creating a Stokes element outlined above using an extra argument as we would have using the first way of creating the Stokes element with two blocks right away.

+

If you do this, you naturally partition matrices and vectors into blocks as well (see block (linear algebra)). In most cases, when you subdivide a matrix or vector into blocks, you do so by creating one block for each block defined by the finite element (i.e. in most practical cases the FESystem object). However, this needs not be so: the DoFRenumbering::component_wise function allows to group several vector components or finite element blocks into the same logical block (see, for example, the step-22 or step-31 tutorial programs, as opposed to step-20). As a consequence, using this feature, we can achieve the same result, i.e. subdividing matrices into $2\times 2$ blocks and vectors into 2 blocks, for the second way of creating a Stokes element outlined above using an extra argument as we would have using the first way of creating the Stokes element with two blocks right away.

More information on this topic can be found in the documentation of FESystem, the Handling vector valued problems module and the tutorial programs referenced therein.

Selecting blocks: Many functions allow you to restrict their operation to certain vector components or blocks. For example, this is the case for the functions that interpolate boundary values: one may want to only interpolate the boundary values for the velocity block of a finite element field but not the pressure block. The way to do this is by passing a BlockMask argument to such functions, see the block mask entry of this glossary.

@@ -162,14 +162,14 @@
Boundary form

For a dim-dimensional triangulation in dim-dimensional space, the boundary form is a vector defined on faces. It is the vector product of the image of coordinate vectors on the surface of the unit cell. It is a vector normal to the surface, pointing outwards and having the length of the surface element.

-

A more general definition would be that (at least up to the length of this vector) it is exactly that vector that is necessary when considering integration by parts, i.e. equalities of the form $\int_\Omega \text{div} \vec \phi = -\int_{\partial\Omega} \vec n
-   \cdot \vec \phi$. Using this definition then also explains what this vector should be in the case of domains (and corresponding triangulations) of dimension dim that are embedded in a space spacedim: in that case, the boundary form is still a vector defined on the faces of the triangulation; it is orthogonal to all tangent directions of the boundary and within the tangent plane of the domain. Note that this is compatible with case dim==spacedim since there the tangent plane is the entire space ${\mathbb R}^\text{dim}$.

+

A more general definition would be that (at least up to the length of this vector) it is exactly that vector that is necessary when considering integration by parts, i.e. equalities of the form $\int_\Omega \text{div} \vec \phi = -\int_{\partial\Omega} \vec n
+   \cdot \vec \phi$. Using this definition then also explains what this vector should be in the case of domains (and corresponding triangulations) of dimension dim that are embedded in a space spacedim: in that case, the boundary form is still a vector defined on the faces of the triangulation; it is orthogonal to all tangent directions of the boundary and within the tangent plane of the domain. Note that this is compatible with case dim==spacedim since there the tangent plane is the entire space ${\mathbb R}^\text{dim}$.

In either case, the length of the vector equals the determinant of the transformation of reference face to the face of the current cell.

Boundary indicator

In a Triangulation object, every part of the boundary may be associated with a unique number (of type types::boundary_id) that is used to determine what kinds of boundary conditions are to be applied to a particular part of a boundary. The boundary is composed of the faces of the cells and, in 3d, the edges of these faces.

-

By default, all boundary indicators of a mesh are zero, unless you are reading from a mesh file that specifically sets them to something different, or unless you use one of the mesh generation functions in namespace GridGenerator that have a colorize option. A typical piece of code that sets the boundary indicator on part of the boundary to something else would look like this, here setting the boundary indicator to 42 for all faces located at $x=-1$:

for (auto &face : triangulation.active_face_iterators())
+

By default, all boundary indicators of a mesh are zero, unless you are reading from a mesh file that specifically sets them to something different, or unless you use one of the mesh generation functions in namespace GridGenerator that have a colorize option. A typical piece of code that sets the boundary indicator on part of the boundary to something else would look like this, here setting the boundary indicator to 42 for all faces located at $x=-1$:

for (auto &face : triangulation.active_face_iterators())
if (face->at_boundary())
if (face->center()[0] == -1)
face->set_boundary_id (42);
@@ -237,7 +237,7 @@

Component
-

When considering systems of equations in which the solution is not just a single scalar function, we say that we have a vector system with a vector-valued solution. For example, the vector solution in the elasticity equation considered in step-8 is $u=(u_x,u_y,u_z)^T$ consisting of the displacements in each of the three coordinate directions. The solution then has three elements. Similarly, the 3d Stokes equation considered in step-22 has four elements: $u=(v_x,v_y,v_z,p)^T$. We call the elements of the vector-valued solution components in deal.II. To be well-posed, for the solution to have $n$ components, there need to be $n$ partial differential equations to describe them. This concept is discussed in great detail in the Handling vector valued problems module.

+

When considering systems of equations in which the solution is not just a single scalar function, we say that we have a vector system with a vector-valued solution. For example, the vector solution in the elasticity equation considered in step-8 is $u=(u_x,u_y,u_z)^T$ consisting of the displacements in each of the three coordinate directions. The solution then has three elements. Similarly, the 3d Stokes equation considered in step-22 has four elements: $u=(v_x,v_y,v_z,p)^T$. We call the elements of the vector-valued solution components in deal.II. To be well-posed, for the solution to have $n$ components, there need to be $n$ partial differential equations to describe them. This concept is discussed in great detail in the Handling vector valued problems module.

In finite element programs, one frequently wants to address individual elements (components) of this vector-valued solution, or sets of components. For example, we do this extensively in step-8, and a lot of documentation is also provided in the module on Handling vector valued problems. If you are thinking only in terms of the partial differential equation (not in terms of its discretization), then the concept of components is the natural one.

On the other hand, when talking about finite elements and degrees of freedom, components are not always the correct concept because components are not always individually addressable. In particular, this is the case for non-primitive finite elements. Similarly, one may not always want to address individual components but rather sets of components — e.g. all velocity components together, and separate from the pressure in the Stokes system, without further splitting the velocities into their individual components. In either case, the correct concept to think in is that of a block. Since each component, if individually addressable, is also a block, thinking in terms of blocks is most frequently the better strategy.

For a given finite element, the number of components can be queried using the FiniteElementData::n_components() function, and you can find out which vector components are nonzero for a given finite element shape function using FiniteElement::get_nonzero_components(). The values and gradients of individual components of a shape function (if the element is primitive) can be queried using the FiniteElement::shape_value_component() and FiniteElement::shape_grad_component() functions on the reference cell. The FEValues::shape_value_component() and FEValues::shape_grad_component() functions do the same on a real cell. See also the documentation of the FiniteElement and FEValues classes.

@@ -259,7 +259,7 @@

would result in a mask [true, true, false] in 2d. Of course, in 3d, the result would be [true, true, true, false].

Note
Just as one can think of composed elements as being made up of components or blocks, there are component masks (represented by the ComponentMask class) and block masks (represented by the BlockMask class). The FiniteElement class has functions that convert between the two kinds of objects.
-Not all component masks actually make sense. For example, if you have a FE_RaviartThomas object in 2d, then it doesn't make any sense to have a component mask of the form [true, false] because you try to select individual vector components of a finite element where each shape function has both $x$ and $y$ velocities. In essence, while you can of course create such a component mask, there is nothing you can do with it.
+Not all component masks actually make sense. For example, if you have a FE_RaviartThomas object in 2d, then it doesn't make any sense to have a component mask of the form [true, false] because you try to select individual vector components of a finite element where each shape function has both $x$ and $y$ velocities. In essence, while you can of course create such a component mask, there is nothing you can do with it.
Compressing distributed vectors and matrices

For parallel computations, deal.II uses the vector and matrix classes defined in the PETScWrappers and TrilinosWrappers namespaces. When running programs in parallel using MPI, these classes only store a certain number of rows or elements on the current processor, whereas the rest of the vector or matrix is stored on the other processors that belong to our MPI universe. This presents a certain problem when you assemble linear systems: we add elements to the matrix and right hand side vectors that may or may not be stored locally. Sometimes, we may also want to just set an element, not add to it.

@@ -301,9 +301,9 @@

Degree of freedom
-

The term "degree of freedom" (often abbreviated as "DoF") is commonly used in the finite element community to indicate two slightly different, but related things. The first is that we'd like to represent the finite element solution as a linear combination of shape functions, in the form $u_h(\mathbf{x}) = \sum_{j=0}^{N-1} U_j \varphi_j(\mathbf{x})$. Here, $U_j$ is a vector of expansion coefficients. Because we don't know their values yet (we will compute them as the solution of a linear or nonlinear system), they are called "unknowns" or "degrees of freedom". The second meaning of the term can be explained as follows: A mathematical description of finite element problem is often to say that we are looking for a finite dimensional function $u_h \in V_h$ that satisfies some set of equations (e.g. $a(u_h,\varphi_h)=(f,\varphi_h)$ for all test functions $\varphi_h\in
-   V_h$). In other words, all we say here that the solution needs to lie in some space $V_h$. However, to actually solve this problem on a computer we need to choose a basis of this space; this is the set of shape functions $\varphi_j(\mathbf{x})$ we have used above in the expansion of $u_h(\mathbf
-   x)$ with coefficients $U_j$. There are of course many bases of the space $V_h$, but we will specifically choose the one that is described by the finite element functions that are traditionally defined locally on the cells of the mesh. Describing "degrees of freedom" in this context requires us to simply enumerate the basis functions of the space $V_h$. For $Q_1$ elements this means simply enumerating the vertices of the mesh in some way, but for higher elements one also has to enumerate the shape functions that are associated with edges, faces, or cell interiors of the mesh. The class that provides this enumeration of the basis functions of $V_h$ is called DoFHandler. The process of enumerating degrees of freedom is referred to as "distributing DoFs" in deal.II.

+

The term "degree of freedom" (often abbreviated as "DoF") is commonly used in the finite element community to indicate two slightly different, but related things. The first is that we'd like to represent the finite element solution as a linear combination of shape functions, in the form $u_h(\mathbf{x}) = \sum_{j=0}^{N-1} U_j \varphi_j(\mathbf{x})$. Here, $U_j$ is a vector of expansion coefficients. Because we don't know their values yet (we will compute them as the solution of a linear or nonlinear system), they are called "unknowns" or "degrees of freedom". The second meaning of the term can be explained as follows: A mathematical description of finite element problem is often to say that we are looking for a finite dimensional function $u_h \in V_h$ that satisfies some set of equations (e.g. $a(u_h,\varphi_h)=(f,\varphi_h)$ for all test functions $\varphi_h\in
+   V_h$). In other words, all we say here that the solution needs to lie in some space $V_h$. However, to actually solve this problem on a computer we need to choose a basis of this space; this is the set of shape functions $\varphi_j(\mathbf{x})$ we have used above in the expansion of $u_h(\mathbf
+   x)$ with coefficients $U_j$. There are of course many bases of the space $V_h$, but we will specifically choose the one that is described by the finite element functions that are traditionally defined locally on the cells of the mesh. Describing "degrees of freedom" in this context requires us to simply enumerate the basis functions of the space $V_h$. For $Q_1$ elements this means simply enumerating the vertices of the mesh in some way, but for higher elements one also has to enumerate the shape functions that are associated with edges, faces, or cell interiors of the mesh. The class that provides this enumeration of the basis functions of $V_h$ is called DoFHandler. The process of enumerating degrees of freedom is referred to as "distributing DoFs" in deal.II.

Direction flags
@@ -324,7 +324,7 @@
Distorted cells

A distorted cell is a cell for which the mapping from the reference cell to real cell has a Jacobian whose determinant is non-positive somewhere in the cell. Typically, we only check the sign of this determinant at the vertices of the cell. The function GeometryInfo::alternating_form_at_vertices computes these determinants at the vertices.

-

By way of example, if all of the determinants are of roughly equal value and on the order of $h^\text{dim}$ then the cell is well-shaped. For example, a square cell or face has determinants equal to $h^\text{dim}$ whereas a strongly sheared parallelogram has a determinant much smaller. Similarly, a cell with very unequal edge lengths will have widely varying determinants. Conversely, a pinched cell in which the location of two or more vertices is collapsed to a single point has a zero determinant at this location. Finally, an inverted or twisted cell in which the location of two vertices is out of order will have negative determinants.

+

By way of example, if all of the determinants are of roughly equal value and on the order of $h^\text{dim}$ then the cell is well-shaped. For example, a square cell or face has determinants equal to $h^\text{dim}$ whereas a strongly sheared parallelogram has a determinant much smaller. Similarly, a cell with very unequal edge lengths will have widely varying determinants. Conversely, a pinched cell in which the location of two or more vertices is collapsed to a single point has a zero determinant at this location. Finally, an inverted or twisted cell in which the location of two vertices is out of order will have negative determinants.

The following two images show a well-formed, a pinched, and a twisted cell for both 2d and 3d:

@@ -363,19 +363,19 @@

Generalized support points
-

"Generalized support points" are, as the name suggests, a generalization of support points. The latter are used to describe that a finite element simply interpolates values at individual points (the "support points"). If we call these points $\hat{\mathbf{x}}_i$ (where the hat indicates that these points are defined on the reference cell $\hat{K}$), then one typically defines shape functions $\varphi_j(\mathbf{x})$ in such a way that the nodal functionals $\Psi_i[\cdot]$ simply evaluate the function at the support point, i.e., that $\Psi_i[\varphi]=\varphi(\hat{\mathbf{x}}_i)$, and the basis is chosen so that $\Psi_i[\varphi_j]=\delta_{ij}$ where $\delta_{ij}$ is the Kronecker delta function. This leads to the common Lagrange elements.

-

(In the vector valued case, the only other piece of information besides the support points $\hat{\mathbf{x}}_i$ that one needs to provide is the vector component $c(i)$ the $i$th node functional corresponds, so that $\Psi_i[\varphi]=\varphi(\hat{\mathbf{x}}_i)_{c(i)}$.)

-

On the other hand, there are other kinds of elements that are not defined this way. For example, for the lowest order Raviart-Thomas element (see the FE_RaviartThomas class), the node functional evaluates not individual components of a vector-valued finite element function with dim components, but the normal component of this vector: $\Psi_i[\varphi]
+<dd><p class="Generalized support points" are, as the name suggests, a generalization of support points. The latter are used to describe that a finite element simply interpolates values at individual points (the "support points"). If we call these points $\hat{\mathbf{x}}_i$ (where the hat indicates that these points are defined on the reference cell $\hat{K}$), then one typically defines shape functions $\varphi_j(\mathbf{x})$ in such a way that the nodal functionals $\Psi_i[\cdot]$ simply evaluate the function at the support point, i.e., that $\Psi_i[\varphi]=\varphi(\hat{\mathbf{x}}_i)$, and the basis is chosen so that $\Psi_i[\varphi_j]=\delta_{ij}$ where $\delta_{ij}$ is the Kronecker delta function. This leads to the common Lagrange elements.

+

(In the vector valued case, the only other piece of information besides the support points $\hat{\mathbf{x}}_i$ that one needs to provide is the vector component $c(i)$ the $i$th node functional corresponds, so that $\Psi_i[\varphi]=\varphi(\hat{\mathbf{x}}_i)_{c(i)}$.)

+

On the other hand, there are other kinds of elements that are not defined this way. For example, for the lowest order Raviart-Thomas element (see the FE_RaviartThomas class), the node functional evaluates not individual components of a vector-valued finite element function with dim components, but the normal component of this vector: $\Psi_i[\varphi]
     =
     \varphi(\hat{\mathbf{x}}_i) \cdot \mathbf{n}_i
-   $, where the $\mathbf{n}_i$ are the normal vectors to the face of the cell on which $\hat{\mathbf{x}}_i$ is located. In other words, the node functional is a linear combination of the components of $\varphi$ when evaluated at $\hat{\mathbf{x}}_i$. Similar things happen for the BDM, ABF, and Nedelec elements (see the FE_BDM, FE_ABF, FE_Nedelec classes).

-

In these cases, the element does not have support points because it is not purely interpolatory; however, some kind of interpolation is still involved when defining shape functions as the node functionals still require point evaluations at special points $\hat{\mathbf{x}}_i$. In these cases, we call the points generalized support points.

-

Finally, there are elements that still do not fit into this scheme. For example, some hierarchical basis functions (see, for example the FE_Q_Hierarchical element) are defined so that the node functionals are moments of finite element functions, $\Psi_i[\varphi]
+   $, where the $\mathbf{n}_i$ are the normal vectors to the face of the cell on which $\hat{\mathbf{x}}_i$ is located. In other words, the node functional is a linear combination of the components of $\varphi$ when evaluated at $\hat{\mathbf{x}}_i$. Similar things happen for the BDM, ABF, and Nedelec elements (see the FE_BDM, FE_ABF, FE_Nedelec classes).

+

In these cases, the element does not have support points because it is not purely interpolatory; however, some kind of interpolation is still involved when defining shape functions as the node functionals still require point evaluations at special points $\hat{\mathbf{x}}_i$. In these cases, we call the points generalized support points.

+

Finally, there are elements that still do not fit into this scheme. For example, some hierarchical basis functions (see, for example the FE_Q_Hierarchical element) are defined so that the node functionals are moments of finite element functions, $\Psi_i[\varphi]
     =
     \int_{\hat{K}} \varphi(\hat{\mathbf{x}})
     {\hat{x}_1}^{p_1(i)}
     {\hat{x}_2}^{p_2(i)}
-   $ in 2d, and similarly for 3d, where the $p_d(i)$ are the order of the moment described by shape function $i$. Some other elements use moments over edges or faces. In all of these cases, node functionals are not defined through interpolation at all, and these elements then have neither support points, nor generalized support points.

+ $" src="form_124.png"/> in 2d, and similarly for 3d, where the $p_d(i)$ are the order of the moment described by shape function $i$. Some other elements use moments over edges or faces. In all of these cases, node functionals are not defined through interpolation at all, and these elements then have neither support points, nor generalized support points.

geometry paper
@@ -417,7 +417,7 @@
pages = {4/1--4/31}
}

It is available from http://www.math.colostate.edu/~bangerth/publications.html, also see deal.II publications for details.

-

The numerical examples shown in that paper are generated with a slightly modified version of step-27. The main difference to that tutorial program is that various operations in the program were timed for the paper to compare different options and show that $hp$ methods are really not all that expensive.

+

The numerical examples shown in that paper are generated with a slightly modified version of step-27. The main difference to that tutorial program is that various operations in the program were timed for the paper to compare different options and show that $hp$ methods are really not all that expensive.

Interpolation with finite elements
@@ -450,47 +450,47 @@
Lumped mass matrix

The mass matrix is a matrix of the form

-\begin{align*}
+<picture><source srcset=\begin{align*}
        M_{ij} = \int_\Omega \varphi_i(\mathbf x) \varphi_j(\mathbf x)\; dx,
-     \end{align*} + \end{align*}" src="form_126.png"/>

It frequently appears in the solution of time dependent problems where, if one uses an explicit time stepping method, it then leads to the need to solve problems of the form

-\begin{align*}
+<picture><source srcset=\begin{align*}
        MU^n = MU^{n-1} + k_n BU^{n-1},
-     \end{align*} + \end{align*}" src="form_127.png"/>

-

in time step $n$, where $U^n$ is the solution to be computed, $U^{n-1}$ is the known solution from the first time step, and $B$ is a matrix related to the differential operator in the PDE. $k_n$ is the size of the time step. A similar linear system of equations also arises out of the discretization of second-order differential equations.

-

The presence of the matrix $M$ on the left side is a nuisance because, even though we have used an explicit time stepping method, we still have to solve a linear system in each time step. It would be much preferable if the matrix were diagonal. "Lumping" the mass matrix is a strategy to replace $M$ by a matrix $M_\text{diagonal}$ that actually is diagonal, yet does not destroy the accuracy of the resulting solution.

-

Historically, mass lumping was performed by adding the elements of a row together and setting the diagonal entries of $M_\text{diagonal}$ to that sum. This works for $Q_1$ and $P_1$ elements, for example, and can be understood mechanically by replacing the continuous medium we are discretizating by one where the continuous mass distribution is replaced by one where (finite amounts of) mass are located only at the nodes. That is, we are "lumping together" the mass of an element at its vertices, thus giving rise to the name "lumped mass matrix". A more mathematical perspective is to compute the integral above for $M_{ij}$ via special quadrature rules; in particular, we replace the computation of

-\begin{align*}
+<p> in time step <picture><source srcset=$n$, where $U^n$ is the solution to be computed, $U^{n-1}$ is the known solution from the first time step, and $B$ is a matrix related to the differential operator in the PDE. $k_n$ is the size of the time step. A similar linear system of equations also arises out of the discretization of second-order differential equations.

+

The presence of the matrix $M$ on the left side is a nuisance because, even though we have used an explicit time stepping method, we still have to solve a linear system in each time step. It would be much preferable if the matrix were diagonal. "Lumping" the mass matrix is a strategy to replace $M$ by a matrix $M_\text{diagonal}$ that actually is diagonal, yet does not destroy the accuracy of the resulting solution.

+

Historically, mass lumping was performed by adding the elements of a row together and setting the diagonal entries of $M_\text{diagonal}$ to that sum. This works for $Q_1$ and $P_1$ elements, for example, and can be understood mechanically by replacing the continuous medium we are discretizating by one where the continuous mass distribution is replaced by one where (finite amounts of) mass are located only at the nodes. That is, we are "lumping together" the mass of an element at its vertices, thus giving rise to the name "lumped mass matrix". A more mathematical perspective is to compute the integral above for $M_{ij}$ via special quadrature rules; in particular, we replace the computation of

+\begin{align*}
        M_{ij} = \int_\Omega \varphi_i(\mathbf x) \varphi_j(\mathbf x)\; dx
               = \sum_K \int_K \varphi_i(\mathbf x) \varphi_j(\mathbf x)\; dx,
-     \end{align*} + \end{align*}" src="form_134.png"/>

by quadrature

-\begin{align*}
+<picture><source srcset=\begin{align*}
        (M_{\text{diagonal}})_{ij} = \sum_K \sum_q \varphi_i(\mathbf x_q^K) \varphi_j(\mathbf x_q^K)
        |K| w_q,
-     \end{align*} + \end{align*}" src="form_135.png"/>

where we choose the quadrature points as the nodes at which the shape functions are defined. If we order the quadrature points in the same way as the shape functions, then

-\begin{align*}
+<picture><source srcset=\begin{align*}
        \varphi_i(\mathbf x_q^K) = \delta_{iq},
-     \end{align*} + \end{align*}" src="form_136.png"/>

and consequently

-\begin{align*}
+<picture><source srcset=\begin{align*}
        (M_{\text{diagonal}})_{ij} = \delta_{ij} \sum_{K, \text{supp}\varphi_i \cap K \neq \emptyset} |K| w_i,
-     \end{align*} + \end{align*}" src="form_137.png"/>

-

where the sum extends over those cells on which $\varphi_i$ is nonzero. The so-computed mass matrix is therefore diagonal.

-

Whether or not this particular choice of quadrature formula is sufficient to retain the convergence rate of the discretization is a separate question. For the usual $Q_k$ finite elements (implemented by FE_Q and FE_DGQ), the appropriate quadrature formulas are of QGaussLobatto type. Mass lumping can also be done with FE_SimplexP_Bubbles, for example, if appropriate quadrature rules are chosen.

+

where the sum extends over those cells on which $\varphi_i$ is nonzero. The so-computed mass matrix is therefore diagonal.

+

Whether or not this particular choice of quadrature formula is sufficient to retain the convergence rate of the discretization is a separate question. For the usual $Q_k$ finite elements (implemented by FE_Q and FE_DGQ), the appropriate quadrature formulas are of QGaussLobatto type. Mass lumping can also be done with FE_SimplexP_Bubbles, for example, if appropriate quadrature rules are chosen.

For an example of where lumped mass matrices play a role, see step-69.

Manifold indicator

Every object that makes up a Triangulation (cells, faces, edges, etc.), is associated with a unique number (of type types::manifold_id) that is used to identify which manifold object is responsible to generate new points when the mesh is refined.

-

By default, all manifold indicators of a mesh are set to numbers::flat_manifold_id. A typical piece of code that sets the manifold indicator on a object to something else would look like this, here setting the manifold indicator to 42 for all cells whose center has an $x$ component less than zero:

+

By default, all manifold indicators of a mesh are set to numbers::flat_manifold_id. A typical piece of code that sets the manifold indicator on a object to something else would look like this, here setting the manifold indicator to 42 for all cells whose center has an $x$ component less than zero:

for (auto &cell : triangulation.active_cell_iterators())
if (cell->center()[0] < 0)
cell->set_manifold_id (42);
@@ -501,41 +501,41 @@
Mass matrix

The "mass matrix" is a matrix of the form

-\begin{align*}
+<picture><source srcset=\begin{align*}
/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/Tutorial.html differs (JavaScript source, Unicode text, UTF-8 text, with very long lines (1074))
--- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/Tutorial.html	2023-11-25 15:25:50.373417101 +0100
+++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/Tutorial.html	2023-11-25 15:25:50.373417101 +0100
@@ -338,7 +338,7 @@
 <p class=

-step-47

Solving the fourth-order biharmonic equation using the $C^0$ Interior Penalty (C0IP) method.
+step-47

Solving the fourth-order biharmonic equation using the $C^0$ Interior Penalty (C0IP) method.
Keywords: FEInterfaceValues

/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/_formulas.tex differs (LaTeX 2e document, Unicode text, UTF-8 text) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/_formulas.tex 2023-10-24 02:00:00.000000000 +0200 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/_formulas.tex 2023-10-24 02:00:00.000000000 +0200 @@ -16,15 +16,6 @@ \pagestyle{empty} \begin{document} -$O(\text{dim}^3)$ -\pagebreak - -$u = u - P^{-1} (A u - v)$ -\pagebreak - -$u = u - P^{-T} (A u - v)$ -\pagebreak - $F(u,\nabla u)=0$ \pagebreak @@ -143,6 +134,15 @@ $\dfrac{d f(x, y(x))}{d y}$ \pagebreak +$O(\text{dim}^3)$ +\pagebreak + +$u = u - P^{-1} (A u - v)$ +\pagebreak + +$u = u - P^{-T} (A u - v)$ +\pagebreak + $u|_{\partial\Omega}=g$ \pagebreak @@ -173,9 +173,6 @@ $\frac 1{\sqrt{14}} (x_{12}+2x_{28}+3x_{40})=0$ \pagebreak -$hp$ -\pagebreak - $x_{12}$ \pagebreak @@ -281,6 +278,9 @@ $x=C\tilde x+k$ \pagebreak +$hp$ +\pagebreak + \[ A^K_{ij} = \int_K \nabla \varphi_i(\bf x) \cdot \nabla \varphi_j(\bf x) \; dx \] @@ -322,116 +322,6 @@ $J_K$ \pagebreak -$Q_2$ -\pagebreak - -$p$ -\pagebreak - -$(A+k\,B)\,C$ -\pagebreak - -$B$ -\pagebreak - -$b-Ax$ -\pagebreak - -$V_h$ -\pagebreak - -$u_h(\mathbf x)= \sum_j U_j \varphi_i(\mathbf x)$ -\pagebreak - -$U_j$ -\pagebreak - -$\|u-u_h\|_{H^1} \le Ch^p \|u\|_{H^{p+1}}$ -\pagebreak - -$(u,v)$ -\pagebreak - -\begin{align*} - \mathbf S(u,v) &= (1-v)\mathbf c_0(u)+v \mathbf c_1(u) + (1-u)\mathbf c_2(v) + u \mathbf c_3(v) \\ - &\quad - \left[(1-u)(1-v) \mathbf x_0 + u(1-v) \mathbf x_1 + (1-u)v \mathbf x_2 + uv \mathbf x_3 \right] - \end{align*} -\pagebreak - -$\bf x_0, \bf x_1, \bf x_2, \bf x_3$ -\pagebreak - -$\bf c_0, \bf c_1, \bf c_2, \bf c_3$ -\pagebreak - -$(0,1)^2$ -\pagebreak - -$(u,v) = (0.5, - 0.5)$ -\pagebreak - -$\mathbf c_0(0.5)$ -\pagebreak - -$\mathbf c_1(0.5)$ -\pagebreak - -$\mathbf c_2(0.5)$ -\pagebreak - -$\mathbf c_3(0.5)$ -\pagebreak - -$\frac{\displaystyle 1}{\displaystyle 2}$ -\pagebreak - -$-\frac{\displaystyle 1}{\displaystyle 4}$ -\pagebreak - -$-\frac{\displaystyle - 1}{\displaystyle 4}$ -\pagebreak - -$\frac{\displaystyle - 1}{\displaystyle 2}$ -\pagebreak - -$+\frac{\displaystyle - 1}{\displaystyle 2}$ -\pagebreak - -$\frac{\displaystyle - 1}{\displaystyle 8}$ -\pagebreak - -$\frac{\displaystyle 1}{\displaystyle 8}$ -\pagebreak - -$(u_i,v_i)$ -\pagebreak - -$\mathcal O(h^{k+1})$ -\pagebreak - -$k=10$ -\pagebreak - -$\mathcal O(k)$ -\pagebreak - -$\mathcal O(k^d)$ -\pagebreak - -$\mathcal O(1)$ -\pagebreak - -$(k+1)^{d-1}$ -\pagebreak - -$(k+1)^d$ -\pagebreak - \begin{eqnarray*} \left(\begin{array}{cc} M & B^T \\ B & 0 @@ -452,6 +342,9 @@ $M$ \pagebreak +$B$ +\pagebreak + $B^T$ \pagebreak @@ -480,6 +373,9 @@ $u_h(\mathbf{x}) = \sum_{j=0}^{N-1} U_j \varphi_j(\mathbf{x})$ \pagebreak +$U_j$ +\pagebreak + $u_h \in V_h$ \pagebreak @@ -490,6 +386,9 @@ V_h$ \pagebreak +$V_h$ +\pagebreak + $\varphi_j(\mathbf{x})$ \pagebreak /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/_formulas_dark.tex differs (LaTeX 2e document, Unicode text, UTF-8 text) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/_formulas_dark.tex 2023-10-24 02:00:00.000000000 +0200 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/_formulas_dark.tex 2023-10-24 02:00:00.000000000 +0200 @@ -18,15 +18,6 @@ \pagestyle{empty} \begin{document} -$O(\text{dim}^3)$ -\pagebreak - -$u = u - P^{-1} (A u - v)$ -\pagebreak - -$u = u - P^{-T} (A u - v)$ -\pagebreak - $F(u,\nabla u)=0$ \pagebreak @@ -145,6 +136,15 @@ $\dfrac{d f(x, y(x))}{d y}$ \pagebreak +$O(\text{dim}^3)$ +\pagebreak + +$u = u - P^{-1} (A u - v)$ +\pagebreak + +$u = u - P^{-T} (A u - v)$ +\pagebreak + $u|_{\partial\Omega}=g$ \pagebreak @@ -175,9 +175,6 @@ $\frac 1{\sqrt{14}} (x_{12}+2x_{28}+3x_{40})=0$ \pagebreak -$hp$ -\pagebreak - $x_{12}$ \pagebreak @@ -283,6 +280,9 @@ $x=C\tilde x+k$ \pagebreak +$hp$ +\pagebreak + \[ A^K_{ij} = \int_K \nabla \varphi_i(\bf x) \cdot \nabla \varphi_j(\bf x) \; dx \] @@ -324,116 +324,6 @@ $J_K$ \pagebreak -$Q_2$ -\pagebreak - -$p$ -\pagebreak - -$(A+k\,B)\,C$ -\pagebreak - -$B$ -\pagebreak - -$b-Ax$ -\pagebreak - -$V_h$ -\pagebreak - -$u_h(\mathbf x)= \sum_j U_j \varphi_i(\mathbf x)$ -\pagebreak - -$U_j$ -\pagebreak - -$\|u-u_h\|_{H^1} \le Ch^p \|u\|_{H^{p+1}}$ -\pagebreak - -$(u,v)$ -\pagebreak - -\begin{align*} - \mathbf S(u,v) &= (1-v)\mathbf c_0(u)+v \mathbf c_1(u) + (1-u)\mathbf c_2(v) + u \mathbf c_3(v) \\ - &\quad - \left[(1-u)(1-v) \mathbf x_0 + u(1-v) \mathbf x_1 + (1-u)v \mathbf x_2 + uv \mathbf x_3 \right] - \end{align*} -\pagebreak - -$\bf x_0, \bf x_1, \bf x_2, \bf x_3$ -\pagebreak - -$\bf c_0, \bf c_1, \bf c_2, \bf c_3$ -\pagebreak - -$(0,1)^2$ -\pagebreak - -$(u,v) = (0.5, - 0.5)$ -\pagebreak - -$\mathbf c_0(0.5)$ -\pagebreak - -$\mathbf c_1(0.5)$ -\pagebreak - -$\mathbf c_2(0.5)$ -\pagebreak - -$\mathbf c_3(0.5)$ -\pagebreak - -$\frac{\displaystyle 1}{\displaystyle 2}$ -\pagebreak - -$-\frac{\displaystyle 1}{\displaystyle 4}$ -\pagebreak - -$-\frac{\displaystyle - 1}{\displaystyle 4}$ -\pagebreak - -$\frac{\displaystyle - 1}{\displaystyle 2}$ -\pagebreak - -$+\frac{\displaystyle - 1}{\displaystyle 2}$ -\pagebreak - -$\frac{\displaystyle - 1}{\displaystyle 8}$ -\pagebreak - -$\frac{\displaystyle 1}{\displaystyle 8}$ -\pagebreak - -$(u_i,v_i)$ -\pagebreak - -$\mathcal O(h^{k+1})$ -\pagebreak - -$k=10$ -\pagebreak - -$\mathcal O(k)$ -\pagebreak - -$\mathcal O(k^d)$ -\pagebreak - -$\mathcal O(1)$ -\pagebreak - -$(k+1)^{d-1}$ -\pagebreak - -$(k+1)^d$ -\pagebreak - \begin{eqnarray*} \left(\begin{array}{cc} M & B^T \\ B & 0 @@ -454,6 +344,9 @@ $M$ \pagebreak +$B$ +\pagebreak + $B^T$ \pagebreak @@ -482,6 +375,9 @@ $u_h(\mathbf{x}) = \sum_{j=0}^{N-1} U_j \varphi_j(\mathbf{x})$ \pagebreak +$U_j$ +\pagebreak + $u_h \in V_h$ \pagebreak @@ -492,6 +388,9 @@ V_h$ \pagebreak +$V_h$ +\pagebreak + $\varphi_j(\mathbf{x})$ \pagebreak /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/changes_between_6_1_and_6_2.html differs (JavaScript source, ASCII text, with very long lines (1340)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/changes_between_6_1_and_6_2.html 2023-11-25 15:25:50.656744668 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/changes_between_6_1_and_6_2.html 2023-11-25 15:25:50.656744668 +0100 @@ -693,7 +693,7 @@
  • -

    Improved: The FEValuesViews objects that one gets when writing things like fe_values[velocities] (see Handling vector valued problems) have become a lot smarter. They now compute a significant amount of data at creation time, rather than on the fly. This means that creating such objects becomes more expensive but using them is cheaper. To offset this cost, FEValuesBase objects now create all possible FEValuesViews objects at creation time, rather than whenever you do things like fe_values[velocities], and simply return a reference to a pre-generated object. This turns an $O(N)$ effort into an $O(1)$ effort, where $N$ is the number of cells.
    +

    Improved: The FEValuesViews objects that one gets when writing things like fe_values[velocities] (see Handling vector valued problems) have become a lot smarter. They now compute a significant amount of data at creation time, rather than on the fly. This means that creating such objects becomes more expensive but using them is cheaper. To offset this cost, FEValuesBase objects now create all possible FEValuesViews objects at creation time, rather than whenever you do things like fe_values[velocities], and simply return a reference to a pre-generated object. This turns an $O(N)$ effort into an $O(1)$ effort, where $N$ is the number of cells.
    (WB 2008/12/10)

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/changes_between_6_2_and_6_3.html differs (JavaScript source, ASCII text, with very long lines (994)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/changes_between_6_2_and_6_3.html 2023-11-25 15:25:50.673410996 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/changes_between_6_2_and_6_3.html 2023-11-25 15:25:50.673410996 +0100 @@ -364,7 +364,7 @@
  • -

    New: The GeometryInfo::d_linear_shape_function and GeometryInfo::d_linear_shape_function_gradient functions can be used to represent the $d$-linear shape functions that are frequently used to map the reference cell to real cells (though the Mapping class hierarchy also allows to use higher order mappings).
    +

    New: The GeometryInfo::d_linear_shape_function and GeometryInfo::d_linear_shape_function_gradient functions can be used to represent the $d$-linear shape functions that are frequently used to map the reference cell to real cells (though the Mapping class hierarchy also allows to use higher order mappings).
    (WB 2009/06/28)

    @@ -499,7 +499,7 @@
  • -

    New: There are new functions FullMatrix::cholesky and FullMatrix::outer_product. FullMatrix::cholesky finds the Cholesky decomposition of a matrix in lower triangular form. FullMatrix::outer_product calculates *this $= VW^T$ where $V$ and $W$ are vectors.
    +

    New: There are new functions FullMatrix::cholesky and FullMatrix::outer_product. FullMatrix::cholesky finds the Cholesky decomposition of a matrix in lower triangular form. FullMatrix::outer_product calculates *this $= VW^T$ where $V$ and $W$ are vectors.
    (Jean Marie Linhart 2009/07/27)

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/changes_between_7_0_and_7_1.html differs (JavaScript source, ASCII text, with very long lines (1392)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/changes_between_7_0_and_7_1.html 2023-11-25 15:25:50.686744058 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/changes_between_7_0_and_7_1.html 2023-11-25 15:25:50.686744058 +0100 @@ -374,7 +374,7 @@

  • -

    Improved: Evaluation of Lagrangian basis functions has been made stable by exchanging polynomial evaluation from the standard form $a_n x^n+\ldots+a_1 x + a_0$ to a product of linear factors, $c (x - x_0) (x-x_1)\ldots (x-x_n)$. This ensures accurate evaluation up to very high order and avoids inaccuracies when using high order finite elements.
    +

    Improved: Evaluation of Lagrangian basis functions has been made stable by exchanging polynomial evaluation from the standard form $a_n x^n+\ldots+a_1 x + a_0$ to a product of linear factors, $c (x - x_0) (x-x_1)\ldots (x-x_n)$. This ensures accurate evaluation up to very high order and avoids inaccuracies when using high order finite elements.
    (Martin Kronbichler 2011/07/26)

  • /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/changes_between_7_1_and_7_2.html differs (JavaScript source, ASCII text, with very long lines (1108)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/changes_between_7_1_and_7_2.html 2023-11-25 15:25:50.703410385 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/changes_between_7_1_and_7_2.html 2023-11-25 15:25:50.703410385 +0100 @@ -330,7 +330,7 @@

  • -

    Fixed: Computing the $W^{1,\infty}$ norm and seminorm in VectorTools::integrate_difference was not implemented. This is now fixed.
    +

    Fixed: Computing the $W^{1,\infty}$ norm and seminorm in VectorTools::integrate_difference was not implemented. This is now fixed.
    (Wolfgang Bangerth 2012/06/02)

  • /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/changes_between_8_1_and_8_2.html differs (JavaScript source, ASCII text, with very long lines (948)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/changes_between_8_1_and_8_2.html 2023-11-25 15:25:50.716743445 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/changes_between_8_1_and_8_2.html 2023-11-25 15:25:50.716743445 +0100 @@ -837,7 +837,7 @@

  • -

    New: There is now a new class Functions::InterpolatedTensorProductGridData that can be used to (bi-/tri-)linearly interpolate data given on a tensor product mesh of $x$ (and $y$ and $z$) values, for example to evaluate experimentally determined coefficients, or to assess the accuracy of a solution by comparing with a solution generated by a different code and written in gridded data. There is also a new class Functions::InterpolatedUniformGridData that can perform the same task more efficiently if the data is stored on meshes that are uniform in each coordinate direction.
    +

    New: There is now a new class Functions::InterpolatedTensorProductGridData that can be used to (bi-/tri-)linearly interpolate data given on a tensor product mesh of $x$ (and $y$ and $z$) values, for example to evaluate experimentally determined coefficients, or to assess the accuracy of a solution by comparing with a solution generated by a different code and written in gridded data. There is also a new class Functions::InterpolatedUniformGridData that can perform the same task more efficiently if the data is stored on meshes that are uniform in each coordinate direction.
    (Wolfgang Bangerth, 2013/12/20)

  • /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/changes_between_8_4_2_and_8_5_0.html differs (JavaScript source, ASCII text, with very long lines (1360)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/changes_between_8_4_2_and_8_5_0.html 2023-11-25 15:25:50.736743038 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/changes_between_8_4_2_and_8_5_0.html 2023-11-25 15:25:50.736743038 +0100 @@ -516,7 +516,7 @@

  • -

    Fixed: The FE_ABF class reported the maximal polynomial degree (via FiniteElement::degree) for elements of order $r$ as $r+1$, but this is wrong. It should be $r+2$ (see Section 5 of the original paper of Arnold, Boffi, and Falk). This is now fixed.
    +

    Fixed: The FE_ABF class reported the maximal polynomial degree (via FiniteElement::degree) for elements of order $r$ as $r+1$, but this is wrong. It should be $r+2$ (see Section 5 of the original paper of Arnold, Boffi, and Falk). This is now fixed.
    (Wolfgang Bangerth, 2017/01/13)

  • /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/changes_between_9_1_1_and_9_2_0.html differs (JavaScript source, ASCII text, with very long lines (1176)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/changes_between_9_1_1_and_9_2_0.html 2023-11-25 15:25:50.760075899 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/changes_between_9_1_1_and_9_2_0.html 2023-11-25 15:25:50.760075899 +0100 @@ -606,7 +606,7 @@

  • -

    Improved: GridGenerator::hyper_shell() in 3d now supports more n_cells options. While previously only 6, 12, or 96 cells were possible, the function now supports any number of the kind $6 \times 2^m$ with $m$ a non-negative integer. The new cases $m=2,3$ and $m\geq 5$ correspond to refinement in the azimuthal direction of the 6 or 12 cell case with a single mesh layer in radial direction, and are intended for shells that are thin and should be given more resolution in azimuthal direction.
    +

    Improved: GridGenerator::hyper_shell() in 3d now supports more n_cells options. While previously only 6, 12, or 96 cells were possible, the function now supports any number of the kind $6 \times 2^m$ with $m$ a non-negative integer. The new cases $m=2,3$ and $m\geq 5$ correspond to refinement in the azimuthal direction of the 6 or 12 cell case with a single mesh layer in radial direction, and are intended for shells that are thin and should be given more resolution in azimuthal direction.
    (Martin Kronbichler, 2020/04/07)

  • @@ -1560,7 +1560,7 @@

  • -

    Improved: The additional roots of the HermiteLikeInterpolation with degree $p$ greater than four have been switched to the roots of the Jacobi polynomial $P^{(4,4)}_{p-3}$, making the interior bubble functions $L_2$ orthogonal and improving the conditioning of interpolation slightly.
    +

    Improved: The additional roots of the HermiteLikeInterpolation with degree $p$ greater than four have been switched to the roots of the Jacobi polynomial $P^{(4,4)}_{p-3}$, making the interior bubble functions $L_2$ orthogonal and improving the conditioning of interpolation slightly.
    (Martin Kronbichler, 2019/07/12)

  • /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classAffineConstraints.html differs (JavaScript source, ASCII text, with very long lines (1132)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classAffineConstraints.html 2023-11-25 15:25:50.793408553 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classAffineConstraints.html 2023-11-25 15:25:50.790075289 +0100 @@ -357,9 +357,9 @@

    The algorithms used in the implementation of this class are described in some detail in the hp-paper. There is also a significant amount of documentation on how to use this class in the Constraints on degrees of freedom module.

    Description of constraints

    Each "line" in objects of this class corresponds to one constrained degree of freedom, with the number of the line being i, entered by using add_line() or add_lines(). The entries in this line are pairs of the form (j,aij), which are added by add_entry() or add_entries(). The organization is essentially a SparsityPattern, but with only a few lines containing nonzero elements, and therefore no data wasted on the others. For each line, which has been added by the mechanism above, an elimination of the constrained degree of freedom of the form

    -\[
+<picture><source srcset=\[
  x_i = \sum_j a_{ij} x_j + b_i
-\] +\]" src="form_1577.png"/>

    is performed, where bi is optional and set by set_inhomogeneity(). Thus, if a constraint is formulated for instance as a zero mean value of several degrees of freedom, one of the degrees has to be chosen to be eliminated.

    Note that the constraints are linear in the xi, and that there might be a constant (non-homogeneous) term in the constraint. This is exactly the form we need for hanging node constraints, where we need to constrain one degree of freedom in terms of others. There are other conditions of this form possible, for example for implementing mean value conditions as is done in the step-11 tutorial program. The name of the class stems from the fact that these constraints can be represented in matrix form as X x = b, and this object then describes the matrix X and the vector b. The most frequent way to create/fill objects of this type is using the DoFTools::make_hanging_node_constraints() function. The use of these objects is first explained in step-6.

    @@ -929,13 +929,13 @@
    -

    Add an entry to a given line. In other words, this function adds a term $a_{ij} x_j$ to the constraints for the $i$th degree of freedom.

    +

    Add an entry to a given line. In other words, this function adds a term $a_{ij} x_j$ to the constraints for the $i$th degree of freedom.

    If an entry with the same indices as the one this function call denotes already exists, then this function simply returns provided that the value of the entry is the same. Thus, it does no harm to enter a constraint twice.

    Parameters
    - - - + + +
    [in]constrained_dof_indexThe index $i$ of the degree of freedom that is being constrained.
    [in]columnThe index $j$ of the degree of freedom being entered into the constraint for degree of freedom $i$.
    [in]weightThe factor $a_{ij}$ that multiplies $x_j$.
    [in]constrained_dof_indexThe index $i$ of the degree of freedom that is being constrained.
    [in]columnThe index $j$ of the degree of freedom being entered into the constraint for degree of freedom $i$.
    [in]weightThe factor $a_{ij}$ that multiplies $x_j$.
    @@ -1010,11 +1010,11 @@
    -

    Set an inhomogeneity to the constraint for a degree of freedom. In other words, it adds a constant $b_i$ to the constraint for degree of freedom $i$. For this to work, you need to call add_line() first for the given degree of freedom.

    +

    Set an inhomogeneity to the constraint for a degree of freedom. In other words, it adds a constant $b_i$ to the constraint for degree of freedom $i$. For this to work, you need to call add_line() first for the given degree of freedom.

    Parameters
    - - + +
    [in]constrained_dof_indexThe index $i$ of the degree of freedom that is being constrained.
    [in]valueThe right hand side value $b_i$ for the constraint on the degree of freedom $i$.
    [in]constrained_dof_indexThe index $i$ of the degree of freedom that is being constrained.
    [in]valueThe right hand side value $b_i$ for the constraint on the degree of freedom $i$.
    @@ -1042,9 +1042,9 @@

    Close the filling of entries. Since the lines of a matrix of this type are usually filled in an arbitrary order and since we do not want to use associative constrainers to store the lines, we need to sort the lines and within the lines the columns before usage of the matrix. This is done through this function.

    Also, zero entries are discarded, since they are not needed.

    After closing, no more entries are accepted. If the object was already closed, then this function returns immediately.

    -

    This function also resolves chains of constraints. For example, degree of freedom 13 may be constrained to $u_{13} = \frac{u_3}{2} + \frac{u_7}{2}$ while degree of freedom 7 is itself constrained as $u_{7} = \frac{u_2}{2}
-+ \frac{u_4}{2}$. Then, the resolution will be that $u_{13} =
-\frac{u_3}{2} + \frac{u_2}{4} + \frac{u_4}{4}$. Note, however, that cycles in this graph of constraints are not allowed, i.e., for example $u_4$ may not itself be constrained, directly or indirectly, to $u_{13}$ again.

    +

    This function also resolves chains of constraints. For example, degree of freedom 13 may be constrained to $u_{13} = \frac{u_3}{2} + \frac{u_7}{2}$ while degree of freedom 7 is itself constrained as $u_{7} = \frac{u_2}{2}
++ \frac{u_4}{2}$. Then, the resolution will be that $u_{13} =
+\frac{u_3}{2} + \frac{u_2}{4} + \frac{u_4}{4}$. Note, however, that cycles in this graph of constraints are not allowed, i.e., for example $u_4$ may not itself be constrained, directly or indirectly, to $u_{13}$ again.

    @@ -1487,9 +1487,9 @@

    Print the constraints represented by the current object to the given stream.

    For each constraint of the form

    -\[
+<picture><source srcset=\[
  x_{42} = 0.5 x_2 + 0.25 x_{14} + 2.75
-\] +\]" src="form_1586.png"/>

    this function will write a sequence of lines that look like this:

    42 2 : 0.5
    42 14 : 0.25
    @@ -2150,7 +2150,7 @@

    This function takes a matrix of local contributions (local_matrix) corresponding to the degrees of freedom indices given in local_dof_indices and distributes them to the global matrix. In other words, this function implements a scatter operation. In most cases, these local contributions will be the result of an integration over a cell or face of a cell. However, as long as local_matrix and local_dof_indices have the same number of elements, this function is happy with whatever it is given.

    In contrast to the similar function in the DoFAccessor class, this function also takes care of constraints, i.e. if one of the elements of local_dof_indices belongs to a constrained node, then rather than writing the corresponding element of local_matrix into global_matrix, the element is distributed to the entries in the global matrix to which this particular degree of freedom is constrained.

    -

    With this scheme, we never write into rows or columns of constrained degrees of freedom. In order to make sure that the resulting matrix can still be inverted, we need to do something with the diagonal elements corresponding to constrained nodes. Thus, if a degree of freedom in local_dof_indices is constrained, we distribute the corresponding entries in the matrix, but also add the absolute value of the diagonal entry of the local matrix to the corresponding entry in the global matrix. Assuming the discretized operator is positive definite, this guarantees that the diagonal entry is always non-zero, positive, and of the same order of magnitude as the other entries of the matrix. On the other hand, when solving a source problem $Au=f$ the exact value of the diagonal element is not important, since the value of the respective degree of freedom will be overwritten by the distribute() call later on anyway.

    +

    With this scheme, we never write into rows or columns of constrained degrees of freedom. In order to make sure that the resulting matrix can still be inverted, we need to do something with the diagonal elements corresponding to constrained nodes. Thus, if a degree of freedom in local_dof_indices is constrained, we distribute the corresponding entries in the matrix, but also add the absolute value of the diagonal entry of the local matrix to the corresponding entry in the global matrix. Assuming the discretized operator is positive definite, this guarantees that the diagonal entry is always non-zero, positive, and of the same order of magnitude as the other entries of the matrix. On the other hand, when solving a source problem $Au=f$ the exact value of the diagonal element is not important, since the value of the respective degree of freedom will be overwritten by the distribute() call later on anyway.

    Note
    The procedure described above adds an unforeseeable number of artificial eigenvalues to the spectrum of the matrix. Therefore, it is recommended to use the equivalent function with two local index vectors in such a case.

    By using this function to distribute local contributions to the global object, one saves the call to the condense function after the vectors and matrices are fully assembled.

    Note
    This function in itself is thread-safe, i.e., it works properly also when several threads call it simultaneously. However, the function call is only thread-safe if the underlying global matrix allows for simultaneous access and the access is not to rows with the same global index at the same time. This needs to be made sure from the caller's site. There is no locking mechanism inside this method to prevent data races.
    @@ -2200,7 +2200,7 @@

    This function does almost the same as the function above but can treat general rectangular matrices. The main difference to achieve this is that the diagonal entries in constrained rows are left untouched instead of being filled with arbitrary values.

    -

    Since the diagonal entries corresponding to eliminated degrees of freedom are not set, the result may have a zero eigenvalue, if applied to a square matrix. This has to be considered when solving the resulting problems. For solving a source problem $Au=f$, it is possible to set the diagonal entry after building the matrix by a piece of code of the form

    +

    Since the diagonal entries corresponding to eliminated degrees of freedom are not set, the result may have a zero eigenvalue, if applied to a square matrix. This has to be considered when solving the resulting problems. For solving a source problem $Au=f$, it is possible to set the diagonal entry after building the matrix by a piece of code of the form

    for (unsigned int i=0;i<matrix.m();++i)
    if (constraints.is_constrained(i))
    matrix.diag_element(i) = 1.;
    @@ -2541,7 +2541,7 @@
    -

    Given a vector, set all constrained degrees of freedom to values so that the constraints are satisfied. For example, if the current object stores the constraint $x_3=\frac 12 x_1 + \frac 12 x_2$, then this function will read the values of $x_1$ and $x_2$ from the given vector and set the element $x_3$ according to this constraints. Similarly, if the current object stores the constraint $x_{42}=208$, then this function will set the 42nd element of the given vector to 208.

    +

    Given a vector, set all constrained degrees of freedom to values so that the constraints are satisfied. For example, if the current object stores the constraint $x_3=\frac 12 x_1 + \frac 12 x_2$, then this function will read the values of $x_1$ and $x_2$ from the given vector and set the element $x_3$ according to this constraints. Similarly, if the current object stores the constraint $x_{42}=208$, then this function will set the 42nd element of the given vector to 208.

    Note
    If this function is called with a parallel vector vec, then the vector must not contain ghost elements.
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classAlgorithms_1_1ThetaTimestepping.html differs (JavaScript source, Unicode text, UTF-8 text, with very long lines (950)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classAlgorithms_1_1ThetaTimestepping.html 2023-11-25 15:25:50.813408145 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classAlgorithms_1_1ThetaTimestepping.html 2023-11-25 15:25:50.813408145 +0100 @@ -218,9 +218,9 @@

    For fixed theta, the Crank-Nicolson scheme is the only second order scheme. Nevertheless, further stability may be achieved by choosing theta larger than ½, thereby introducing a first order error term. In order to avoid a loss of convergence order, the adaptive theta scheme can be used, where theta=½+c dt.

    Assume that we want to solve the equation u' + F(u) = 0 with a step size k. A step of the theta scheme can be written as

    -\[
+<picture><source srcset=\[
   M u_{n+1} + \theta k F(u_{n+1})  = M u_n - (1-\theta)k F(u_n).
-\] +\]" src="form_351.png"/>

    Here, M is the mass matrix. We see, that the right hand side amounts to an explicit Euler step with modified step size in weak form (up to inversion of M). The left hand side corresponds to an implicit Euler step with modified step size (right hand side given). Thus, the implementation of the theta scheme will use two Operator objects, one for the explicit, one for the implicit part. Each of these will use its own TimestepData to account for the modified step sizes (and different times if the problem is not autonomous). Note that once the explicit part has been computed, the left hand side actually constitutes a linear or nonlinear system which has to be solved.

    Usage AnyData

    @@ -300,8 +300,8 @@
    }
    size_type n() const
    size_type m() const
    -

    Now we need to study the application of the implicit and explicit operator. We assume that the pointer matrix points to the matrix created in the main program (the constructor did this for us). Here, we first get the time step size from the AnyData object that was provided as input. Then, if we are in the first step or if the timestep has changed, we fill the local matrix $m$, such that with the given matrix $M$, it becomes

    -\[ m = I - \Delta t M. \] +

    Now we need to study the application of the implicit and explicit operator. We assume that the pointer matrix points to the matrix created in the main program (the constructor did this for us). Here, we first get the time step size from the AnyData object that was provided as input. Then, if we are in the first step or if the timestep has changed, we fill the local matrix $m$, such that with the given matrix $M$, it becomes

    +\[ m = I - \Delta t M. \]

    After we have worked off the notifications, we clear them, such that the matrix is only generated when necessary.

    void Explicit::operator()(AnyData &out, const AnyData &in)
    @@ -1174,7 +1174,7 @@

    The operator computing the explicit part of the scheme. This will receive in its input data the value at the current time with name "Current time solution". It should obtain the current time and time step size from explicit_data().

    -

    Its return value is $ Mu+cF(u) $, where $u$ is the current state vector, $M$ the mass matrix, $F$ the operator in space and $c$ is the adjusted time step size $(1-\theta) \Delta t$.

    +

    Its return value is $ Mu+cF(u) $, where $u$ is the current state vector, $M$ the mass matrix, $F$ the operator in space and $c$ is the adjusted time step size $(1-\theta) \Delta t$.

    Definition at line 416 of file theta_timestepping.h.

    @@ -1202,7 +1202,7 @@

    The operator solving the implicit part of the scheme. It will receive in its input data the vector "Previous time". Information on the timestep should be obtained from implicit_data().

    -

    Its return value is the solution u of Mu-cF(u)=f, where f is the dual space vector found in the "Previous time" entry of the input data, M the mass matrix, F the operator in space and c is the adjusted time step size $ \theta \Delta t$

    +

    Its return value is the solution u of Mu-cF(u)=f, where f is the dual space vector found in the "Previous time" entry of the input data, M the mass matrix, F the operator in space and c is the adjusted time step size $ \theta \Delta t$

    Definition at line 428 of file theta_timestepping.h.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classAnisotropicPolynomials.html differs (JavaScript source, ASCII text, with very long lines (1527)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classAnisotropicPolynomials.html 2023-11-25 15:25:50.833407738 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classAnisotropicPolynomials.html 2023-11-25 15:25:50.833407738 +0100 @@ -153,10 +153,10 @@

    Detailed Description

    template<int dim>
    class AnisotropicPolynomials< dim >

    Anisotropic tensor product of given polynomials.

    -

    Given one-dimensional polynomials $P^x_1(x), P^x_2(x), \ldots$ in $x$-direction, $P^y_1(y), P^y_2(y), \ldots$ in $y$-direction, and so on, this class generates polynomials of the form $Q_{ijk}(x,y,z)
-= P^x_i(x)P^y_j(y)P^z_k(z)$. (With obvious generalization if dim is in fact only 2. If dim is in fact only 1, then the result is simply the same set of one-dimensional polynomials passed to the constructor.)

    -

    If the elements of each set of base polynomials are mutually orthogonal on the interval $[-1,1]$ or $[0,1]$, then the tensor product polynomials are orthogonal on $[-1,1]^d$ or $[0,1]^d$, respectively.

    -

    The resulting dim-dimensional tensor product polynomials are ordered as follows: We iterate over the $x$ coordinates running fastest, then the $y$ coordinate, etc. For example, for dim==2, the first few polynomials are thus $P^x_1(x)P^y_1(y)$, $P^x_2(x)P^y_1(y)$, $P^x_3(x)P^y_1(y)$, ..., $P^x_1(x)P^y_2(y)$, $P^x_2(x)P^y_2(y)$, $P^x_3(x)P^y_2(y)$, etc.

    +

    Given one-dimensional polynomials $P^x_1(x), P^x_2(x), \ldots$ in $x$-direction, $P^y_1(y), P^y_2(y), \ldots$ in $y$-direction, and so on, this class generates polynomials of the form $Q_{ijk}(x,y,z)
+= P^x_i(x)P^y_j(y)P^z_k(z)$. (With obvious generalization if dim is in fact only 2. If dim is in fact only 1, then the result is simply the same set of one-dimensional polynomials passed to the constructor.)

    +

    If the elements of each set of base polynomials are mutually orthogonal on the interval $[-1,1]$ or $[0,1]$, then the tensor product polynomials are orthogonal on $[-1,1]^d$ or $[0,1]^d$, respectively.

    +

    The resulting dim-dimensional tensor product polynomials are ordered as follows: We iterate over the $x$ coordinates running fastest, then the $y$ coordinate, etc. For example, for dim==2, the first few polynomials are thus $P^x_1(x)P^y_1(y)$, $P^x_2(x)P^y_1(y)$, $P^x_3(x)P^y_1(y)$, ..., $P^x_1(x)P^y_2(y)$, $P^x_2(x)P^y_2(y)$, $P^x_3(x)P^y_2(y)$, etc.

    Definition at line 322 of file tensor_product_polynomials.h.

    Constructor & Destructor Documentation

    @@ -676,7 +676,7 @@
    -

    Each tensor product polynomial $p_i$ is a product of one-dimensional polynomials in each space direction. Compute the indices of these one- dimensional polynomials for each space direction, given the index i.

    +

    Each tensor product polynomial $p_i$ is a product of one-dimensional polynomials in each space direction. Compute the indices of these one- dimensional polynomials for each space direction, given the index i.

    Definition at line 538 of file tensor_product_polynomials.cc.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classArpackSolver.html differs (JavaScript source, ASCII text, with very long lines (1026)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classArpackSolver.html 2023-11-25 15:25:50.850074067 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classArpackSolver.html 2023-11-25 15:25:50.850074067 +0100 @@ -229,14 +229,14 @@

    Detailed Description

    Interface for using ARPACK. ARPACK is a collection of Fortran77 subroutines designed to solve large scale eigenvalue problems. Here we interface to the routines dnaupd and dneupd of ARPACK. If the operator is specified to be symmetric we use the symmetric interface dsaupd and dseupd of ARPACK instead. The package is designed to compute a few eigenvalues and corresponding eigenvectors of a general n by n matrix A. It is most appropriate for large sparse matrices A.

    -

    In this class we make use of the method applied to the generalized eigenspectrum problem $(A-\lambda B)x=0$, for $x\neq0$; where $A$ is a system matrix, $B$ is a mass matrix, and $\lambda, x$ are a set of eigenvalues and eigenvectors respectively.

    +

    In this class we make use of the method applied to the generalized eigenspectrum problem $(A-\lambda B)x=0$, for $x\neq0$; where $A$ is a system matrix, $B$ is a mass matrix, and $\lambda, x$ are a set of eigenvalues and eigenvectors respectively.

    The ArpackSolver can be used in application codes with serial objects in the following way:

    solver.solve(A, B, OP, lambda, x, size_of_spectrum);
    SolverControl & solver_control
    -

    for the generalized eigenvalue problem $Ax=B\lambda x$, where the variable size_of_spectrum tells ARPACK the number of eigenvector/eigenvalue pairs to solve for. Here, lambda is a vector that will contain the eigenvalues computed, x a vector that will contain the eigenvectors computed, and OP is an inverse operation for the matrix A. Shift and invert transformation around zero is applied.

    +

    for the generalized eigenvalue problem $Ax=B\lambda x$, where the variable size_of_spectrum tells ARPACK the number of eigenvector/eigenvalue pairs to solve for. Here, lambda is a vector that will contain the eigenvalues computed, x a vector that will contain the eigenvectors computed, and OP is an inverse operation for the matrix A. Shift and invert transformation around zero is applied.

    Through the AdditionalData the user can specify some of the parameters to be set.

    For further information on how the ARPACK routines dsaupd, dseupd, dnaupd and dneupd work and also how to set the parameters appropriately please take a look into the ARPACK manual.

    Note
    Whenever you eliminate degrees of freedom using AffineConstraints, you generate spurious eigenvalues and eigenvectors. If you make sure that the diagonals of eliminated matrix rows are all equal to one, you get a single additional eigenvalue. But beware that some functions in deal.II set these diagonals to rather arbitrary (from the point of view of eigenvalue problems) values. See also step-36 for an example.
    @@ -525,7 +525,7 @@
    -

    Solve the generalized eigensprectrum problem $A x=\lambda B x$ by calling the dsaupd and dseupd or dnaupd and dneupd functions of ARPACK.

    +

    Solve the generalized eigensprectrum problem $A x=\lambda B x$ by calling the dsaupd and dseupd or dnaupd and dneupd functions of ARPACK.

    The function returns a vector of eigenvalues of length n and a vector of eigenvectors of length n in the symmetric case and of length n+1 in the non-symmetric case. In the symmetric case all eigenvectors are real. In the non-symmetric case complex eigenvalues always occur as complex conjugate pairs. Therefore the eigenvector for an eigenvalue with nonzero complex part is stored by putting the real and the imaginary parts in consecutive real-valued vectors. The eigenvector of the complex conjugate eigenvalue does not need to be stored, since it is just the complex conjugate of the stored eigenvector. Thus, if the last n-th eigenvalue has a nonzero imaginary part, Arpack needs in total n+1 real-valued vectors to store real and imaginary parts of the eigenvectors.

    Parameters
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classArrayView.html differs (JavaScript source, ASCII text, with very long lines (911)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classArrayView.html 2023-11-25 15:25:50.873406924 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classArrayView.html 2023-11-25 15:25:50.873406924 +0100 @@ -1025,7 +1025,7 @@
    -

    Return a reference to the $i$th element of the range represented by the current object.

    +

    Return a reference to the $i$th element of the range represented by the current object.

    This function is marked as const because it does not change the view object. It may however return a reference to a non-const memory location depending on whether the template type of the class is const or not.

    This function is only allowed to be called if the underlying data is indeed stored in CPU memory.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classAutoDerivativeFunction.html differs (JavaScript source, ASCII text, with very long lines (2139)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classAutoDerivativeFunction.html 2023-11-25 15:25:50.896739784 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classAutoDerivativeFunction.html 2023-11-25 15:25:50.896739784 +0100 @@ -346,27 +346,27 @@

    Names of difference formulas.

    Enumerator
    Euler 

    The symmetric Euler formula of second order:

    -\[
+<picture><source srcset=\[
 u'(t) \approx
 \frac{u(t+h) -
 u(t-h)}{2h}.
-\] +\]" src="form_359.png"/>

    UpwindEuler 

    The upwind Euler formula of first order:

    -\[
+<picture><source srcset=\[
 u'(t) \approx
 \frac{u(t) -
 u(t-h)}{h}.
-\] +\]" src="form_360.png"/>

    FourthOrder 

    The fourth order scheme

    -\[
+<picture><source srcset=\[
 u'(t) \approx
 \frac{u(t-2h) - 8u(t-h)
 +  8u(t+h) - u(t+2h)}{12h}.
-\] +\]" src="form_361.png"/>

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classBarycentricPolynomial.html differs (JavaScript source, ASCII text, with very long lines (564)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classBarycentricPolynomial.html 2023-11-25 15:25:50.913406112 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classBarycentricPolynomial.html 2023-11-25 15:25:50.913406112 +0100 @@ -151,7 +151,7 @@ (x, y) = c_0 (x_0, y_0) + c_1 (x_1, y_1) + c_2 (x_2, y_2). \]" src="form_626.png"/>

    -

    where each value $c_i$ is the relative weight of each vertex (so the centroid is, in 2d, where each $c_i = 1/3$). Since we only consider convex combinations we can rewrite this equation as

    +

    where each value $c_i$ is the relative weight of each vertex (so the centroid is, in 2d, where each $c_i = 1/3$). Since we only consider convex combinations we can rewrite this equation as

    \[
   (x, y) = (1 - c_1 - c_2) (x_0, y_0) + c_1 (x_1, y_1) + c_2 (x_2, y_2).
/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classBaseQR.html differs (JavaScript source, ASCII text, with very long lines (1101))
--- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classBaseQR.html	2023-11-25 15:25:50.930072437 +0100
+++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classBaseQR.html	2023-11-25 15:25:50.930072437 +0100
@@ -155,8 +155,8 @@
 <a name=

    Detailed Description

    template<typename VectorType>
    class BaseQR< VectorType >

    A base class for thin QR implementations.

    -

    This class and classes derived from it are meant to build $Q$ and $R$ matrices one row/column at a time, i.e., by growing $R$ matrix from an empty $0\times 0$ matrix to $N\times N$, where $N$ is the number of added column vectors.

    -

    As a consequence, matrices which have the same number of rows as each vector (i.e. $Q$ matrix) is stored as a collection of vectors of VectorType.

    +

    This class and classes derived from it are meant to build $Q$ and $R$ matrices one row/column at a time, i.e., by growing $R$ matrix from an empty $0\times 0$ matrix to $N\times N$, where $N$ is the number of added column vectors.

    +

    As a consequence, matrices which have the same number of rows as each vector (i.e. $Q$ matrix) is stored as a collection of vectors of VectorType.

    Definition at line 44 of file qr.h.

    Member Typedef Documentation

    @@ -377,7 +377,7 @@
    -

    Solve $Rx=y$. Vectors x and y should be consistent with the current size of the subspace. If transpose is true, $R^Tx=y$ is solved instead.

    +

    Solve $Rx=y$. Vectors x and y should be consistent with the current size of the subspace. If transpose is true, $R^Tx=y$ is solved instead.

    @@ -415,7 +415,7 @@
    -

    Set $y = Qx$. The size of $x$ should be consistent with the size of the R matrix.

    +

    Set $y = Qx$. The size of $x$ should be consistent with the size of the R matrix.

    Implemented in QR< VectorType >, and ImplicitQR< VectorType >.

    @@ -456,7 +456,7 @@
    -

    Set $y = Q^Tx$. The size of $x$ should be consistent with the size of column vectors.

    +

    Set $y = Q^Tx$. The size of $x$ should be consistent with the size of column vectors.

    Implemented in QR< VectorType >, and ImplicitQR< VectorType >.

    @@ -496,7 +496,7 @@
    -

    Set $y = QRx$. The size of $x$ should be consistent with the size of the R matrix.

    +

    Set $y = QRx$. The size of $x$ should be consistent with the size of the R matrix.

    Implemented in QR< VectorType >, and ImplicitQR< VectorType >.

    @@ -537,7 +537,7 @@
    -

    Set $y = R^T Q^Tx$. The size of $x$ should be consistent with the size of column vectors.

    +

    Set $y = R^T Q^Tx$. The size of $x$ should be consistent with the size of column vectors.

    Implemented in QR< VectorType >, and ImplicitQR< VectorType >.

    @@ -599,7 +599,7 @@
    -

    Compute $y=Hx$ where $H$ is the matrix formed by the column vectors stored by this object.

    +

    Compute $y=Hx$ where $H$ is the matrix formed by the column vectors stored by this object.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classBlockIndices.html differs (JavaScript source, ASCII text, with very long lines (848)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classBlockIndices.html 2023-11-25 15:25:50.946738767 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classBlockIndices.html 2023-11-25 15:25:50.946738767 +0100 @@ -209,7 +209,7 @@ void swap (BlockIndices &u, BlockIndices &v) &#href_anchor"details" id="details">

    Detailed Description

    -

    BlockIndices represents a range of indices (such as the range $[0,N)$ of valid indices for elements of a vector) and how this one range is broken down into smaller but contiguous "blocks" (such as the velocity and pressure parts of a solution vector). In particular, it provides the ability to translate between global indices and the indices within a block. This class is used, for example, in the BlockVector, BlockSparsityPattern, and BlockMatrixBase classes.

    +

    BlockIndices represents a range of indices (such as the range $[0,N)$ of valid indices for elements of a vector) and how this one range is broken down into smaller but contiguous "blocks" (such as the velocity and pressure parts of a solution vector). In particular, it provides the ability to translate between global indices and the indices within a block. This class is used, for example, in the BlockVector, BlockSparsityPattern, and BlockMatrixBase classes.

    The information that can be obtained from this class falls into two groups. First, it is possible to query the global size of the index space (through the total_size() member function), and the number of blocks and their sizes (via size() and the block_size() functions).

    Secondly, this class manages the conversion of global indices to the local indices within this block, and the other way around. This is required, for example, when you address a global element in a block vector and want to know within which block this is, and which index within this block it corresponds to. It is also useful if a matrix is composed of several blocks, where you have to translate global row and column indices to local ones.

    See also
    Block (linear algebra)
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classBlockLinearOperator.html differs (JavaScript source, ASCII text, with very long lines (1136)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classBlockLinearOperator.html 2023-11-25 15:25:50.973404891 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classBlockLinearOperator.html 2023-11-25 15:25:50.973404891 +0100 @@ -274,7 +274,7 @@
    LinearOperator< Range, Domain, BlockPayload::BlockType > linear_operator(const Matrix &matrix)
    BlockLinearOperator< Range, Domain, BlockPayload > block_diagonal_operator(const BlockMatrixType &block_matrix)

    A BlockLinearOperator can be sliced to a LinearOperator at any time. This removes all information about the underlying block structure (because above std::function objects are no longer available) - the linear operator interface, however, remains intact.

    -
    Note
    This class makes heavy use of std::function objects and lambda functions. This flexibility comes with a run-time penalty. Only use this object to encapsulate object with medium to large individual block sizes, and small block structure (as a rule of thumb, matrix blocks greater than $1000\times1000$).
    +
    Note
    This class makes heavy use of std::function objects and lambda functions. This flexibility comes with a run-time penalty. Only use this object to encapsulate object with medium to large individual block sizes, and small block structure (as a rule of thumb, matrix blocks greater than $1000\times1000$).

    Definition at line 166 of file block_linear_operator.h.

    Member Typedef Documentation

    @@ -812,11 +812,11 @@
    LinearOperator< Range, Domain, BlockPayload::BlockType > distribute_constraints_linear_operator(const AffineConstraints< typename Range::value_type > &constraints, const LinearOperator< Range, Domain, BlockPayload::BlockType > &exemplar)

    and Id_c is the projection to the subspace consisting of all vector entries associated with constrained degrees of freedom.

    This LinearOperator object is used together with constrained_right_hand_side() to build up the following modified system of linear equations:

    -\[
+<picture><source srcset=\[
   (C^T A C + Id_c) x = C^T (b - A\,k)
-\] +\]" src="form_1616.png"/>

    -

    with a given (unconstrained) system matrix $A$, right hand side $b$, and linear constraints $C$ with inhomogeneities $k$.

    +

    with a given (unconstrained) system matrix $A$, right hand side $b$, and linear constraints $C$ with inhomogeneities $k$.

    A detailed explanation of this approach is given in the Constraints on degrees of freedom module.

    Note
    Currently, this function may not work correctly for distributed data structures.
    @@ -865,11 +865,11 @@

    with

    This LinearOperator object is used together with constrained_right_hand_side() to build up the following modified system of linear equations:

    -\[
+<picture><source srcset=\[
   (C^T A C + Id_c) x = C^T (b - A\,k)
-\] +\]" src="form_1616.png"/>

    -

    with a given (unconstrained) system matrix $A$, right hand side $b$, and linear constraints $C$ with inhomogeneities $k$.

    +

    with a given (unconstrained) system matrix $A$, right hand side $b$, and linear constraints $C$ with inhomogeneities $k$.

    A detailed explanation of this approach is given in the Constraints on degrees of freedom module.

    Note
    Currently, this function may not work correctly for distributed data structures.
    @@ -908,8 +908,8 @@
    -

    Addition of two linear operators first_op and second_op given by $(\mathrm{first\_op}+\mathrm{second\_op})x \dealcoloneq \mathrm{first\_op}(x)
-+ \mathrm{second\_op}(x)$

    +

    Addition of two linear operators first_op and second_op given by $(\mathrm{first\_op}+\mathrm{second\_op})x \dealcoloneq \mathrm{first\_op}(x)
++ \mathrm{second\_op}(x)$

    Definition at line 390 of file linear_operator.h.

    @@ -946,8 +946,8 @@
    -

    Subtraction of two linear operators first_op and second_op given by $(\mathrm{first\_op}-\mathrm{second\_op})x \dealcoloneq \mathrm{first\_op}(x)
-- \mathrm{second\_op}(x)$

    +

    Subtraction of two linear operators first_op and second_op given by $(\mathrm{first\_op}-\mathrm{second\_op})x \dealcoloneq \mathrm{first\_op}(x)
+- \mathrm{second\_op}(x)$

    Definition at line 449 of file linear_operator.h.

    @@ -1066,8 +1066,8 @@
    -

    Composition of two linear operators first_op and second_op given by $(\mathrm{first\_op}*\mathrm{second\_op})x \dealcoloneq
-\mathrm{first\_op}(\mathrm{second\_op}(x))$

    +

    Composition of two linear operators first_op and second_op given by $(\mathrm{first\_op}*\mathrm{second\_op})x \dealcoloneq
+\mathrm{first\_op}(\mathrm{second\_op}(x))$

    Definition at line 587 of file linear_operator.h.

    @@ -1701,7 +1701,7 @@

    Return a LinearOperator that performs the operations associated with the Schur complement. There are two additional helper functions, condense_schur_rhs() and postprocess_schur_solution(), that are likely necessary to be used in order to perform any useful tasks in linear algebra with this operator.

    We construct the definition of the Schur complement in the following way:

    Consider a general system of linear equations that can be decomposed into two major sets of equations:

    -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
 \mathbf{K}\mathbf{d} = \mathbf{f}
 \quad \Rightarrow\quad
 \left(\begin{array}{cc}
@@ -1714,60 +1714,60 @@
 \left(\begin{array}{cc}
    f \\ g
 \end{array}\right),
-\end{eqnarray*} +\end{eqnarray*}" src="form_1852.png"/>

    -

    where $ A,B,C,D $ represent general subblocks of the matrix $ \mathbf{K} $ and, similarly, general subvectors of $ \mathbf{d},\mathbf{f} $ are given by $ x,y,f,g $ .

    +

    where $ A,B,C,D $ represent general subblocks of the matrix $ \mathbf{K} $ and, similarly, general subvectors of $ \mathbf{d},\mathbf{f} $ are given by $ x,y,f,g $ .

    This is equivalent to the following two statements:

    -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   (1) \quad Ax + By &=& f \\
   (2) \quad Cx + Dy &=& g \quad .
-\end{eqnarray*} +\end{eqnarray*}" src="form_1857.png"/>

    -

    Assuming that $ A,D $ are both square and invertible, we could then perform one of two possible substitutions,

    -\begin{eqnarray*}
+<p>Assuming that <picture><source srcset=$ A,D $ are both square and invertible, we could then perform one of two possible substitutions,

    +\begin{eqnarray*}
   (3) \quad x &=& A^{-1}(f - By) \quad \text{from} \quad (1) \\
   (4) \quad y &=& D^{-1}(g - Cx) \quad \text{from} \quad (2) ,
-\end{eqnarray*} +\end{eqnarray*}" src="form_1859.png"/>

    which amount to performing block Gaussian elimination on this system of equations.

    For the purpose of the current implementation, we choose to substitute (3) into (2)

    -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   C \: A^{-1}(f - By) + Dy &=& g \\
   -C \: A^{-1} \: By + Dy &=& g - C \: A^{-1} \: f \quad .
-\end{eqnarray*} +\end{eqnarray*}" src="form_1860.png"/>

    This leads to the result

    -\[
+<picture><source srcset=\[
   (5) \quad (D - C\: A^{-1} \:B)y  = g - C \: A^{-1} f
       \quad \Rightarrow \quad Sy = g'
-\] +\]" src="form_1861.png"/>

    -

    with $ S = (D - C\: A^{-1} \:B) $ being the Schur complement and the modified right-hand side vector $ g' = g - C \: A^{-1} f $ arising from the condensation step. Note that for this choice of $ S $, submatrix $ D $ need not be invertible and may thus be the null matrix. Ideally $ A $ should be well-conditioned.

    -

    So for any arbitrary vector $ a $, the Schur complement performs the following operation:

    -\[
+<p> with <picture><source srcset=$ S = (D - C\: A^{-1} \:B) $ being the Schur complement and the modified right-hand side vector $ g' = g - C \: A^{-1} f $ arising from the condensation step. Note that for this choice of $ S $, submatrix $ D $ need not be invertible and may thus be the null matrix. Ideally $ A $ should be well-conditioned.

    +

    So for any arbitrary vector $ a $, the Schur complement performs the following operation:

    +\[
   (6) \quad Sa = (D - C \: A^{-1} \: B)a
-\] +\]" src="form_1868.png"/>

    A typical set of steps needed the solve a linear system (1),(2) would be:

    1. Define the inverse matrix A_inv (using inverse_operator()).
    2. -
    3. Define the Schur complement $ S $ (using schur_complement()).
    4. -
    5. Define iterative inverse matrix $ S^{-1} $ such that (6) holds. It is necessary to use a solver with a preconditioner to compute the approximate inverse operation of $ S $ since we never compute $ S $ directly, but rather the result of its operation. To achieve this, one may again use the inverse_operator() in conjunction with the Schur complement that we've just constructed. Observe that the both $ S $ and its preconditioner operate over the same space as $ D $.
    6. +
    7. Define the Schur complement $ S $ (using schur_complement()).
    8. +
    9. Define iterative inverse matrix $ S^{-1} $ such that (6) holds. It is necessary to use a solver with a preconditioner to compute the approximate inverse operation of $ S $ since we never compute $ S $ directly, but rather the result of its operation. To achieve this, one may again use the inverse_operator() in conjunction with the Schur complement that we've just constructed. Observe that the both $ S $ and its preconditioner operate over the same space as $ D $.
    10. Perform pre-processing step on the RHS of (5) using condense_schur_rhs():

      -\[
+<picture><source srcset=\[
      g' = g - C \: A^{-1} \: f
-   \] + \]" src="form_1870.png"/>

    11. -
    12. Solve for $ y $ in (5):

      -\[
+<li>Solve for <picture><source srcset=$ y $ in (5):

      +\[
      y =  S^{-1} g'
-   \] + \]" src="form_1872.png"/>

    13. Perform the post-processing step from (3) using postprocess_schur_solution():

      -\[
+<picture><source srcset=\[
      x =  A^{-1} (f - By)
-   \] + \]" src="form_1873.png"/>

    @@ -1813,10 +1813,10 @@
    LinearOperator< Domain, Range, BlockPayload::BlockType > inverse_operator(const LinearOperator< Range, Domain, BlockPayload::BlockType > &op, Solver &solver, const Preconditioner &preconditioner)
    PackagedOperation< Domain_1 > postprocess_schur_solution(const LinearOperator< Range_1, Domain_1, Payload > &A_inv, const LinearOperator< Range_1, Domain_2, Payload > &B, const Domain_2 &y, const Range_1 &f)
    -

    In the above example, the preconditioner for $ S $ was defined as the preconditioner for $ D $, which is valid since they operate on the same space. However, if $ D $ and $ S $ are too dissimilar, then this may lead to a large number of solver iterations as $ \text{prec}(D) $ is not a good approximation for $ S^{-1} $.

    -

    A better preconditioner in such a case would be one that provides a more representative approximation for $ S^{-1} $. One approach is shown in step-22, where $ D $ is the null matrix and the preconditioner for $ S^{-1}
-$ is derived from the mass matrix over this space.

    -

    From another viewpoint, a similar result can be achieved by first constructing an object that represents an approximation for $ S $ wherein expensive operation, namely $ A^{-1} $, is approximated. Thereafter we construct the approximate inverse operator $ \tilde{S}^{-1} $ which is then used as the preconditioner for computing $ S^{-1} $.

    // Construction of approximate inverse of Schur complement
    +

    In the above example, the preconditioner for $ S $ was defined as the preconditioner for $ D $, which is valid since they operate on the same space. However, if $ D $ and $ S $ are too dissimilar, then this may lead to a large number of solver iterations as $ \text{prec}(D) $ is not a good approximation for $ S^{-1} $.

    +

    A better preconditioner in such a case would be one that provides a more representative approximation for $ S^{-1} $. One approach is shown in step-22, where $ D $ is the null matrix and the preconditioner for $ S^{-1}
+$ is derived from the mass matrix over this space.

    +

    From another viewpoint, a similar result can be achieved by first constructing an object that represents an approximation for $ S $ wherein expensive operation, namely $ A^{-1} $, is approximated. Thereafter we construct the approximate inverse operator $ \tilde{S}^{-1} $ which is then used as the preconditioner for computing $ S^{-1} $.

    // Construction of approximate inverse of Schur complement
    const auto A_inv_approx = linear_operator(preconditioner_A);
    const auto S_approx = schur_complement(A_inv_approx,B,C,D);
    @@ -1839,8 +1839,8 @@
    // Solve for y
    y = S_inv * rhs;
    x = postprocess_schur_solution (A_inv,B,y,f);
    -

    Note that due to the construction of S_inv_approx and subsequently S_inv, there are a pair of nested iterative solvers which could collectively consume a lot of resources. Therefore care should be taken in the choices leading to the construction of the iterative inverse_operators. One might consider the use of a IterationNumberControl (or a similar mechanism) to limit the number of inner solver iterations. This controls the accuracy of the approximate inverse operation $ \tilde{S}^{-1} $ which acts only as the preconditioner for $ S^{-1} $. Furthermore, the preconditioner to $ \tilde{S}^{-1} $, which in this example is $
-\text{prec}(D) $, should ideally be computationally inexpensive.

    +

    Note that due to the construction of S_inv_approx and subsequently S_inv, there are a pair of nested iterative solvers which could collectively consume a lot of resources. Therefore care should be taken in the choices leading to the construction of the iterative inverse_operators. One might consider the use of a IterationNumberControl (or a similar mechanism) to limit the number of inner solver iterations. This controls the accuracy of the approximate inverse operation $ \tilde{S}^{-1} $ which acts only as the preconditioner for $ S^{-1} $. Furthermore, the preconditioner to $ \tilde{S}^{-1} $, which in this example is $
+\text{prec}(D) $, should ideally be computationally inexpensive.

    However, if an iterative solver based on IterationNumberControl is used as a preconditioner then the preconditioning operation is not a linear operation. Here a flexible solver like SolverFGMRES (flexible GMRES) is best employed as an outer solver in order to deal with the variable behavior of the preconditioner. Otherwise the iterative solver can stagnate somewhere near the tolerance of the preconditioner or generally behave erratically. Alternatively, using a ReductionControl would ensure that the preconditioner always solves to the same tolerance, thereby rendering its behavior constant.

    Further examples of this functionality can be found in the test-suite, such as tests/lac/schur_complement_01.cc . The solution of a multi- component problem (namely step-22) using the schur_complement can be found in tests/lac/schur_complement_03.cc .

    See also
    Block (linear algebra)
    @@ -1863,7 +1863,7 @@
    -

    Return the number of blocks in a column (i.e, the number of "block rows", or the number $m$, if interpreted as a $m\times n$ block system).

    +

    Return the number of blocks in a column (i.e, the number of "block rows", or the number $m$, if interpreted as a $m\times n$ block system).

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classBlockMatrixBase.html differs (JavaScript source, ASCII text, with very long lines (741)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classBlockMatrixBase.html 2023-11-25 15:25:51.000071013 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classBlockMatrixBase.html 2023-11-25 15:25:51.000071013 +0100 @@ -1440,7 +1440,7 @@
    -

    Adding Matrix-vector multiplication. Add $M*src$ on $dst$ with $M$ being this matrix.

    +

    Adding Matrix-vector multiplication. Add $M*src$ on $dst$ with $M$ being this matrix.

    @@ -1547,7 +1547,7 @@
    -

    Compute the matrix scalar product $\left(u,Mv\right)$.

    +

    Compute the matrix scalar product $\left(u,Mv\right)$.

    @@ -1938,7 +1938,7 @@
    -

    Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

    +

    Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

    Due to problems with deriving template arguments between the block and non-block versions of the vmult/Tvmult functions, the actual functions are implemented in derived classes, with implementations forwarding the calls to the implementations provided here under a unique name for which template arguments can be derived by the compiler.

    @@ -2102,7 +2102,7 @@
    -

    Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

    +

    Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

    Due to problems with deriving template arguments between the block and non-block versions of the vmult/Tvmult functions, the actual functions are implemented in derived classes, with implementations forwarding the calls to the implementations provided here under a unique name for which template arguments can be derived by the compiler.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classBlockSparseMatrix.html differs (JavaScript source, ASCII text, with very long lines (782)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classBlockSparseMatrix.html 2023-11-25 15:25:51.030070405 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classBlockSparseMatrix.html 2023-11-25 15:25:51.030070405 +0100 @@ -950,7 +950,7 @@
    -

    Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

    +

    Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

    Definition at line 396 of file block_sparse_matrix.h.

    @@ -1114,7 +1114,7 @@
    -

    Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

    +

    Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

    Definition at line 440 of file block_sparse_matrix.h.

    @@ -2328,7 +2328,7 @@
    -

    Adding Matrix-vector multiplication. Add $M*src$ on $dst$ with $M$ being this matrix.

    +

    Adding Matrix-vector multiplication. Add $M*src$ on $dst$ with $M$ being this matrix.

    @@ -2453,7 +2453,7 @@
    -

    Compute the matrix scalar product $\left(u,Mv\right)$.

    +

    Compute the matrix scalar product $\left(u,Mv\right)$.

    @@ -2948,7 +2948,7 @@
    -

    Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

    +

    Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

    Due to problems with deriving template arguments between the block and non-block versions of the vmult/Tvmult functions, the actual functions are implemented in derived classes, with implementations forwarding the calls to the implementations provided here under a unique name for which template arguments can be derived by the compiler.

    @@ -3096,7 +3096,7 @@
    -

    Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

    +

    Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

    Due to problems with deriving template arguments between the block and non-block versions of the vmult/Tvmult functions, the actual functions are implemented in derived classes, with implementations forwarding the calls to the implementations provided here under a unique name for which template arguments can be derived by the compiler.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classBlockSparseMatrixEZ.html differs (JavaScript source, ASCII text, with very long lines (671)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classBlockSparseMatrixEZ.html 2023-11-25 15:25:51.050069998 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classBlockSparseMatrixEZ.html 2023-11-25 15:25:51.050069998 +0100 @@ -799,7 +799,7 @@
    -

    Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

    +

    Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

    Definition at line 371 of file block_sparse_matrix_ez.h.

    @@ -832,7 +832,7 @@
    -

    Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

    +

    Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

    Definition at line 409 of file block_sparse_matrix_ez.h.

    @@ -865,7 +865,7 @@
    -

    Adding Matrix-vector multiplication. Add $M*src$ on $dst$ with $M$ being this matrix.

    +

    Adding Matrix-vector multiplication. Add $M*src$ on $dst$ with $M$ being this matrix.

    Definition at line 391 of file block_sparse_matrix_ez.h.

    @@ -898,7 +898,7 @@
    -

    Adding Matrix-vector multiplication. Add $M^T*src$ to $dst$ with $M$ being this matrix. This function does the same as vmult_add() but takes the transposed matrix.

    +

    Adding Matrix-vector multiplication. Add $M^T*src$ to $dst$ with $M$ being this matrix. This function does the same as vmult_add() but takes the transposed matrix.

    Definition at line 429 of file block_sparse_matrix_ez.h.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classBlockVector.html differs (JavaScript source, ASCII text, with very long lines (894)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classBlockVector.html 2023-11-25 15:25:51.076736119 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classBlockVector.html 2023-11-25 15:25:51.076736119 +0100 @@ -1848,7 +1848,7 @@
    -

    Return the square of the $l_2$-norm.

    +

    Return the square of the $l_2$-norm.

    @@ -1900,7 +1900,7 @@
    -

    Return the $l_1$-norm of the vector, i.e. the sum of the absolute values.

    +

    Return the $l_1$-norm of the vector, i.e. the sum of the absolute values.

    @@ -1926,7 +1926,7 @@
    -

    Return the $l_2$-norm of the vector, i.e. the square root of the sum of the squares of the elements.

    +

    Return the $l_2$-norm of the vector, i.e. the square root of the sum of the squares of the elements.

    @@ -1952,7 +1952,7 @@
    -

    Return the maximum absolute value of the elements of this vector, which is the $l_\infty$-norm of a vector.

    +

    Return the maximum absolute value of the elements of this vector, which is the $l_\infty$-norm of a vector.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classBlockVectorBase.html differs (JavaScript source, ASCII text, with very long lines (910)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classBlockVectorBase.html 2023-11-25 15:25:51.103402245 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classBlockVectorBase.html 2023-11-25 15:25:51.103402245 +0100 @@ -1233,7 +1233,7 @@
    -

    Return the square of the $l_2$-norm.

    +

    Return the square of the $l_2$-norm.

    @@ -1273,7 +1273,7 @@
    -

    Return the $l_1$-norm of the vector, i.e. the sum of the absolute values.

    +

    Return the $l_1$-norm of the vector, i.e. the sum of the absolute values.

    @@ -1293,7 +1293,7 @@
    -

    Return the $l_2$-norm of the vector, i.e. the square root of the sum of the squares of the elements.

    +

    Return the $l_2$-norm of the vector, i.e. the square root of the sum of the squares of the elements.

    @@ -1313,7 +1313,7 @@
    -

    Return the maximum absolute value of the elements of this vector, which is the $l_\infty$-norm of a vector.

    +

    Return the maximum absolute value of the elements of this vector, which is the $l_\infty$-norm of a vector.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classBoundingBox.html differs (JavaScript source, ASCII text, with very long lines (1101)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classBoundingBox.html 2023-11-25 15:25:51.120068573 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classBoundingBox.html 2023-11-25 15:25:51.120068573 +0100 @@ -165,11 +165,11 @@ &#href_anchor"details" id="details">

    Detailed Description

    template<int spacedim, typename Number = double>
    class BoundingBox< spacedim, Number >

    A class that represents a box of arbitrary dimension spacedim and with sides parallel to the coordinate axes, that is, a region

    -\[
+<picture><source srcset=\[
 [x_0^L, x_0^U] \times ... \times [x_{spacedim-1}^L, x_{spacedim-1}^U],
-\] +\]" src="form_362.png"/>

    -

    where $(x_0^L , ..., x_{spacedim-1}^L)$ and $(x_0^U , ..., x_{spacedim-1}^U)$ denote the two vertices (bottom left and top right) which are used to represent the box. The quantities $x_k^L$ and $x_k^U$ denote the "lower" and "upper" bounds of values that are within the box for each coordinate direction $k$.

    +

    where $(x_0^L , ..., x_{spacedim-1}^L)$ and $(x_0^U , ..., x_{spacedim-1}^U)$ denote the two vertices (bottom left and top right) which are used to represent the box. The quantities $x_k^L$ and $x_k^U$ denote the "lower" and "upper" bounds of values that are within the box for each coordinate direction $k$.

    Geometrically, a bounding box is thus:

    Bounding boxes are, for example, useful in parallel distributed meshes to give a general description of the owners of each portion of the mesh. More generally, bounding boxes are often used to roughly describe a region of space in which an object is contained; if a candidate point is not within the bounding box (a test that is cheap to execute), then it is not necessary to perform an expensive test whether the candidate point is in fact inside the object itself. Bounding boxes are therefore often used as a first, cheap rejection test before more detailed checks. As such, bounding boxes serve many of the same purposes as the convex hull, for which it is also relatively straightforward to compute whether a point is inside or outside, though not quite as cheap as for the bounding box.

    -

    Taking the cross section of a BoundingBox<spacedim> orthogonal to a given direction gives a box in one dimension lower: BoundingBox<spacedim - 1>. In 3d, the 2 coordinates of the cross section of BoundingBox<3> can be ordered in 2 different ways. That is, if we take the cross section orthogonal to the y direction we could either order a 3d-coordinate into a 2d-coordinate as $(x,z)$ or as $(z,x)$. This class uses the second convention, corresponding to the coordinates being ordered cyclicly $x \rightarrow y \rightarrow z \rightarrow x \rightarrow ... $ To be precise, if we take a cross section:

    +

    Taking the cross section of a BoundingBox<spacedim> orthogonal to a given direction gives a box in one dimension lower: BoundingBox<spacedim - 1>. In 3d, the 2 coordinates of the cross section of BoundingBox<3> can be ordered in 2 different ways. That is, if we take the cross section orthogonal to the y direction we could either order a 3d-coordinate into a 2d-coordinate as $(x,z)$ or as $(z,x)$. This class uses the second convention, corresponding to the coordinates being ordered cyclicly $x \rightarrow y \rightarrow z \rightarrow x \rightarrow ... $ To be precise, if we take a cross section:

    @@ -751,7 +751,7 @@
    Orthogonal to Cross section coordinates ordered as
    -

    Returns the indexth vertex of the box. Vertex is meant in the same way as for a cell, so that index $\in [0, 2^{\text{dim}} - 1]$.

    +

    Returns the indexth vertex of the box. Vertex is meant in the same way as for a cell, so that index $\in [0, 2^{\text{dim}} - 1]$.

    Definition at line 233 of file bounding_box.cc.

    @@ -819,7 +819,7 @@

    Apply the affine transformation that transforms this BoundingBox to a unit BoundingBox object.

    -

    If $B$ is this bounding box, and $\hat{B}$ is the unit bounding box, compute the affine mapping that satisfies $G(B) = \hat{B}$ and apply it to point.

    +

    If $B$ is this bounding box, and $\hat{B}$ is the unit bounding box, compute the affine mapping that satisfies $G(B) = \hat{B}$ and apply it to point.

    Definition at line 312 of file bounding_box.cc.

    @@ -842,7 +842,7 @@

    Apply the affine transformation that transforms the unit BoundingBox object to this object.

    -

    If $B$ is this bounding box, and $\hat{B}$ is the unit bounding box, compute the affine mapping that satisfies $F(\hat{B}) = B$ and apply it to point.

    +

    If $B$ is this bounding box, and $\hat{B}$ is the unit bounding box, compute the affine mapping that satisfies $F(\hat{B}) = B$ and apply it to point.

    Definition at line 327 of file bounding_box.cc.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classCUDAWrappers_1_1PreconditionIC.html differs (JavaScript source, ASCII text, with very long lines (638)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classCUDAWrappers_1_1PreconditionIC.html 2023-11-25 15:25:51.136734901 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classCUDAWrappers_1_1PreconditionIC.html 2023-11-25 15:25:51.136734901 +0100 @@ -491,7 +491,7 @@
    -

    cuSPARSE description of the lower triangular matrix $L$.

    +

    cuSPARSE description of the lower triangular matrix $L$.

    Definition at line 176 of file cuda_precondition.h.

    @@ -545,7 +545,7 @@
    -

    Solve and analysis structure for the lower triangular matrix $L$.

    +

    Solve and analysis structure for the lower triangular matrix $L$.

    Definition at line 186 of file cuda_precondition.h.

    @@ -761,7 +761,7 @@
    -

    Determine if level information should be generated for the lower triangular matrix $L$. This value can be modified through an AdditionalData object.

    +

    Determine if level information should be generated for the lower triangular matrix $L$. This value can be modified through an AdditionalData object.

    Definition at line 233 of file cuda_precondition.h.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classCUDAWrappers_1_1PreconditionILU.html differs (JavaScript source, ASCII text, with very long lines (639)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classCUDAWrappers_1_1PreconditionILU.html 2023-11-25 15:25:51.153401226 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classCUDAWrappers_1_1PreconditionILU.html 2023-11-25 15:25:51.153401226 +0100 @@ -493,7 +493,7 @@
    -

    cuSPARSE description of the lower triangular matrix $L$.

    +

    cuSPARSE description of the lower triangular matrix $L$.

    Definition at line 388 of file cuda_precondition.h.

    @@ -574,7 +574,7 @@
    -

    Solve and analysis structure for the lower triangular matrix $L$.

    +

    Solve and analysis structure for the lower triangular matrix $L$.

    Definition at line 403 of file cuda_precondition.h.

    @@ -790,7 +790,7 @@
    -

    Determine if level information should be generated for the lower triangular matrix $L$. This value can be modified through an AdditionalData object.

    +

    Determine if level information should be generated for the lower triangular matrix $L$. This value can be modified through an AdditionalData object.

    Definition at line 450 of file cuda_precondition.h.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classCUDAWrappers_1_1SparseMatrix.html differs (JavaScript source, ASCII text, with very long lines (779)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classCUDAWrappers_1_1SparseMatrix.html 2023-11-25 15:25:51.173400819 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classCUDAWrappers_1_1SparseMatrix.html 2023-11-25 15:25:51.173400819 +0100 @@ -802,7 +802,7 @@
    -

    Matrix-vector multiplication: let $dst = M \cdot src$ with $M$ being this matrix.

    +

    Matrix-vector multiplication: let $dst = M \cdot src$ with $M$ being this matrix.

    Definition at line 512 of file cuda_sparse_matrix.cc.

    @@ -833,7 +833,7 @@
    -

    Matrix-vector multiplication: let $dst = M^T \cdot src$ with $M$ being this matrix. This function does the same as vmult() but takes this transposed matrix.

    +

    Matrix-vector multiplication: let $dst = M^T \cdot src$ with $M$ being this matrix. This function does the same as vmult() but takes this transposed matrix.

    Definition at line 530 of file cuda_sparse_matrix.cc.

    @@ -864,7 +864,7 @@
    -

    Adding matrix-vector multiplication. Add $M \cdot src$ on $dst$ with $M$ being this matrix.

    +

    Adding matrix-vector multiplication. Add $M \cdot src$ on $dst$ with $M$ being this matrix.

    Definition at line 548 of file cuda_sparse_matrix.cc.

    @@ -895,7 +895,7 @@
    -

    Adding matrix-vector multiplication. Add $M^T \cdot src$ to $dst$ with $M$ being this matrix. This function foes the same as vmult_add() but takes the transposed matrix.

    +

    Adding matrix-vector multiplication. Add $M^T \cdot src$ to $dst$ with $M$ being this matrix. This function foes the same as vmult_add() but takes the transposed matrix.

    Definition at line 566 of file cuda_sparse_matrix.cc.

    @@ -917,7 +917,7 @@
    -

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e., $\left(v,Mv\right)$. This is useful, e.g., in the finite context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    +

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e., $\left(v,Mv\right)$. This is useful, e.g., in the finite context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    Obviously, the matrix needs to be quadratic for this operation.

    Definition at line 584 of file cuda_sparse_matrix.cc.

    @@ -949,7 +949,7 @@
    -

    Compute the matrix scalar product $\left(u,Mv\right)$.

    +

    Compute the matrix scalar product $\left(u,Mv\right)$.

    Definition at line 597 of file cuda_sparse_matrix.cc.

    @@ -985,8 +985,8 @@
    -

    Compute the residual of an equation $M \cdot x=b$, where the residual is defined to be $r=b-M \cdot x$. Write the residual into $dst$. The $l_2$ norm of the residual vector is returned.

    -

    Source $x$ and destination $dst$ must not be the same vector.

    +

    Compute the residual of an equation $M \cdot x=b$, where the residual is defined to be $r=b-M \cdot x$. Write the residual into $dst$. The $l_2$ norm of the residual vector is returned.

    +

    Source $x$ and destination $dst$ must not be the same vector.

    Definition at line 611 of file cuda_sparse_matrix.cc.

    @@ -1005,8 +1005,8 @@
    -

    Return the $l_1$-norm of the matrix, that is $|M|_1=\max_{\mathrm{all\
-columns\ }j}\sum_{\mathrm{all\ rows\ }i} |M_{ij}|$, (max. sum of columns). This is the natural matrix norm that is compatible to the $l_1$-norm for vectors, i.e., $|Mv|_1\leq |M|_1 |v|_1$.

    +

    Return the $l_1$-norm of the matrix, that is $|M|_1=\max_{\mathrm{all\
+columns\ }j}\sum_{\mathrm{all\ rows\ }i} |M_{ij}|$, (max. sum of columns). This is the natural matrix norm that is compatible to the $l_1$-norm for vectors, i.e., $|Mv|_1\leq |M|_1 |v|_1$.

    Definition at line 626 of file cuda_sparse_matrix.cc.

    @@ -1025,8 +1025,8 @@
    -

    Return the $l_\infty$-norm of the matrix, that is $|M|_\infty=\max_{\mathrm{all\ rows\ }i}\sum_{\mathrm{all\ columns\ }j}
-|M_{ij}|$, (max. sum of rows). This is the natural norm that is compatible to the $l_\infty$-norm of vectors, i.e., $|Mv|_\infty \leq
+<p>Return the <picture><source srcset=$l_\infty$-norm of the matrix, that is $|M|_\infty=\max_{\mathrm{all\ rows\ }i}\sum_{\mathrm{all\ columns\ }j}
+|M_{ij}|$, (max. sum of rows). This is the natural norm that is compatible to the $l_\infty$-norm of vectors, i.e., $|Mv|_\infty \leq
 |M|_\infty |v|_\infty$.

    Definition at line 645 of file cuda_sparse_matrix.cc.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classCellAccessor.html differs (JavaScript source, ASCII text, with very long lines (755)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classCellAccessor.html 2023-11-25 15:25:51.216733272 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classCellAccessor.html 2023-11-25 15:25:51.216733272 +0100 @@ -4164,8 +4164,8 @@
    -

    This function computes a fast approximate transformation from the real to the unit cell by inversion of an affine approximation of the $d$-linear function from the reference $d$-dimensional cell.

    -

    The affine approximation of the unit to real cell mapping is found by a least squares fit of an affine function to the $2^d$ vertices of the present object. For any valid mesh cell whose geometry is not degenerate, this operation results in a unique affine mapping. Thus, this function will return a finite result for all given input points, even in cases where the actual transformation by an actual bi-/trilinear or higher order mapping might be singular. Besides only approximating the mapping from the vertex points, this function also ignores the attached manifold descriptions. The result is only exact in case the transformation from the unit to the real cell is indeed affine, such as in one dimension or for Cartesian and affine (parallelogram) meshes in 2d/3d.

    +

    This function computes a fast approximate transformation from the real to the unit cell by inversion of an affine approximation of the $d$-linear function from the reference $d$-dimensional cell.

    +

    The affine approximation of the unit to real cell mapping is found by a least squares fit of an affine function to the $2^d$ vertices of the present object. For any valid mesh cell whose geometry is not degenerate, this operation results in a unique affine mapping. Thus, this function will return a finite result for all given input points, even in cases where the actual transformation by an actual bi-/trilinear or higher order mapping might be singular. Besides only approximating the mapping from the vertex points, this function also ignores the attached manifold descriptions. The result is only exact in case the transformation from the unit to the real cell is indeed affine, such as in one dimension or for Cartesian and affine (parallelogram) meshes in 2d/3d.

    For exact transformations to the unit cell, use Mapping::transform_real_to_unit_cell().

    Note
    If dim<spacedim we first project p onto the plane.
    @@ -4206,7 +4206,7 @@
    -

    Center of the object. The center of an object is defined to be the average of the locations of the vertices, which is also where a $Q_1$ mapping would map the center of the reference cell. However, you can also ask this function to instead return the average of the vertices as computed by the underlying Manifold object associated with the current object, by setting to true the optional parameter respect_manifold. Manifolds would then typically pull back the coordinates of the vertices to a reference domain (not necessarily the reference cell), compute the average there, and then push forward the coordinates of the averaged point to the physical space again; the resulting point is guaranteed to lie within the manifold, even if the manifold is curved.

    +

    Center of the object. The center of an object is defined to be the average of the locations of the vertices, which is also where a $Q_1$ mapping would map the center of the reference cell. However, you can also ask this function to instead return the average of the vertices as computed by the underlying Manifold object associated with the current object, by setting to true the optional parameter respect_manifold. Manifolds would then typically pull back the coordinates of the vertices to a reference domain (not necessarily the reference cell), compute the average there, and then push forward the coordinates of the averaged point to the physical space again; the resulting point is guaranteed to lie within the manifold, even if the manifold is curved.

    When the object uses a different manifold description as its surrounding, like when part of the bounding objects of this TriaAccessor use a non-flat manifold description but the object itself is flat, the result given by the TriaAccessor::center() function may not be accurate enough, even when parameter respect_manifold is set to true. If you find this to be case, than you can further refine the computation of the center by setting to true the second additional parameter interpolate_from_surrounding. This computes the location of the center by a so-called transfinite interpolation from the center of all the bounding objects. For a 2d object, it puts a weight of 1/2 on each of the four surrounding lines and a weight -1/4 on the four vertices. This corresponds to a linear interpolation between the descriptions of the four faces, subtracting the contribution of the vertices that is added twice when coming through both lines adjacent to the vertex. In 3d, the weights for faces are 1/2, the weights for lines are -1/4, and the weights for vertices are 1/8. For further information, also confer to the TransfiniteInterpolationManifold class that is able to not only apply this beneficial description to a single cell but all children of a coarse cell.

    Definition at line 1787 of file tria_accessor.cc.

    @@ -4234,7 +4234,7 @@
    -

    Return the barycenter (also called centroid) of the object. The barycenter for an object $K$ of dimension $d$ in $D$ space dimensions is given by the $D$-dimensional vector $\mathbf x_K$ defined by

    +

    Return the barycenter (also called centroid) of the object. The barycenter for an object $K$ of dimension $d$ in $D$ space dimensions is given by the $D$-dimensional vector $\mathbf x_K$ defined by

    \[
   \mathbf x_K = \frac{1}{|K|} \int_K \mathbf x \; \textrm{d}x
 \] @@ -4244,7 +4244,7 @@ |K| = \int_K \mathbf 1 \; \textrm{d}x. \]" src="form_1483.png"/>

    -

    This function assumes that $K$ is mapped by a $d$-linear function from the reference $d$-dimensional cell. Then the integrals above can be pulled back to the reference cell and evaluated exactly (if through lengthy and, compared to the center() function, expensive computations).

    +

    This function assumes that $K$ is mapped by a $d$-linear function from the reference $d$-dimensional cell. Then the integrals above can be pulled back to the reference cell and evaluated exactly (if through lengthy and, compared to the center() function, expensive computations).

    Definition at line 1597 of file tria_accessor.cc.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classChartManifold.html differs (JavaScript source, ASCII text, with very long lines (1480)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classChartManifold.html 2023-11-25 15:25:51.240066129 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classChartManifold.html 2023-11-25 15:25:51.240066129 +0100 @@ -204,37 +204,37 @@

    Detailed Description

    template<int dim, int spacedim = dim, int chartdim = dim>
    class ChartManifold< dim, spacedim, chartdim >

    This class describes mappings that can be expressed in terms of charts. Specifically, this class with its template arguments describes a chart of dimension chartdim, which is part of a Manifold<dim,spacedim> and is used in an object of type Triangulation<dim,spacedim>: It specializes a Manifold of dimension chartdim embedded in a manifold of dimension spacedim, for which you have explicit pull_back() and push_forward() transformations. Its use is explained in great detail in step-53.

    -

    This is a helper class which is useful when you have an explicit map from an Euclidean space of dimension chartdim to an Euclidean space of dimension spacedim which represents your manifold, i.e., when your manifold $\mathcal{M}$ can be represented by a map

    -\[ F: \mathcal{B} \subset
-R^{\text{chartdim}} \mapsto \mathcal{M} \subset R^{\text{spacedim}} \] +

    This is a helper class which is useful when you have an explicit map from an Euclidean space of dimension chartdim to an Euclidean space of dimension spacedim which represents your manifold, i.e., when your manifold $\mathcal{M}$ can be represented by a map

    +\[ F: \mathcal{B} \subset
+R^{\text{chartdim}} \mapsto \mathcal{M} \subset R^{\text{spacedim}} \]

    (the push_forward() function) and that admits the inverse transformation

    -\[ F^{-1}: \mathcal{M} \subset R^{\text{spacedim}} \mapsto \mathcal{B}
-\subset R^{\text{chartdim}} \] +\[ F^{-1}: \mathcal{M} \subset R^{\text{spacedim}} \mapsto \mathcal{B}
+\subset R^{\text{chartdim}} \]

    (the pull_back() function).

    The get_new_point() function of the ChartManifold class is implemented by calling the pull_back() method for all surrounding_points, computing their weighted average in the chartdim Euclidean space, and calling the push_forward() method with the resulting point, i.e.,

    -\[
-\mathbf x^{\text{new}} = F(\sum_i w_i F^{-1}(\mathbf x_i)).  \] +\[
+\mathbf x^{\text{new}} = F(\sum_i w_i F^{-1}(\mathbf x_i)).  \]

    Derived classes are required to implement the push_forward() and the pull_back() methods. All other functions (with the exception of the push_forward_gradient() function, see below) that are required by mappings will then be provided by this class.

    Providing function gradients

    -

    In order to compute vectors that are tangent to the manifold (for example, tangent to a surface embedded in higher dimensional space, or simply the three unit vectors of ${\mathbb R}^3$), one needs to also have access to the gradient of the push-forward function $F$. The gradient is the matrix $(\nabla F)_{ij}=\partial_j F_i$, where we take the derivative with regard to the chartdim reference coordinates on the flat Euclidean space in which $\mathcal B$ is located. In other words, at a point $\mathbf x$, $\nabla F(\mathbf x)$ is a matrix of size spacedim times chartdim.

    +

    In order to compute vectors that are tangent to the manifold (for example, tangent to a surface embedded in higher dimensional space, or simply the three unit vectors of ${\mathbb R}^3$), one needs to also have access to the gradient of the push-forward function $F$. The gradient is the matrix $(\nabla F)_{ij}=\partial_j F_i$, where we take the derivative with regard to the chartdim reference coordinates on the flat Euclidean space in which $\mathcal B$ is located. In other words, at a point $\mathbf x$, $\nabla F(\mathbf x)$ is a matrix of size spacedim times chartdim.

    Only the ChartManifold::get_tangent_vector() function uses the gradient of the push-forward, but only a subset of all finite element codes actually require the computation of tangent vectors. Consequently, while derived classes need to implement the abstract virtual push_forward() and pull_back() functions of this class, they do not need to implement the virtual push_forward_gradient() function. Rather, that function has a default implementation (and consequently is not abstract, therefore not forcing derived classes to overload it), but the default implementation clearly can not compute anything useful and therefore simply triggers and exception.

    A note on the template arguments

    The dimension arguments chartdim, dim and spacedim must satisfy the following relationships:

    dim <= spacedim
    chartdim <= spacedim

    However, there is no a priori relationship between dim and chartdim. For example, if you want to describe a mapping for an edge (a 1d object) in a 2d triangulation embedded in 3d space, you could do so by parameterizing it via a line

    -\[
+<picture><source srcset=\[
      F: [0,1] \rightarrow {\mathbb R}^3
-  \] + \]" src="form_1426.png"/>

    in which case chartdim is 1. On the other hand, there is no reason why one can't describe this as a mapping

    -\[
+<picture><source srcset=\[
      F: {\mathbb R}^3 \rightarrow {\mathbb R}^3
-  \] + \]" src="form_1427.png"/>

    -

    in such a way that the line $[0,1]\times \{0\}\times \{0\}$ happens to be mapped onto the edge in question. Here, chartdim is 3. This may seem cumbersome but satisfies the requirements of an invertible function $F$ just fine as long as it is possible to get from the edge to the pull-back space and then back again. Finally, given that we are dealing with a 2d triangulation in 3d, one will often have a mapping from, say, the 2d unit square or unit disk to the domain in 3d space, and the edge in question may simply be the mapped edge of the unit domain in 2d space. In this case, chartdim is 2.

    +

    in such a way that the line $[0,1]\times \{0\}\times \{0\}$ happens to be mapped onto the edge in question. Here, chartdim is 3. This may seem cumbersome but satisfies the requirements of an invertible function $F$ just fine as long as it is possible to get from the edge to the pull-back space and then back again. Finally, given that we are dealing with a 2d triangulation in 3d, one will often have a mapping from, say, the 2d unit square or unit disk to the domain in 3d space, and the edge in question may simply be the mapped edge of the unit domain in 2d space. In this case, chartdim is 2.

    Definition at line 902 of file manifold.h.

    Member Typedef Documentation

    @@ -589,7 +589,7 @@
    -

    Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the chartdim-dimensional to the spacedim-dimensional space. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

    +

    Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the chartdim-dimensional to the spacedim-dimensional space. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

    This function is used in the computations required by the get_tangent_vector() function. Since not all users of the Manifold class interface will require calling that function, the current function is implemented but will trigger an exception whenever called. This allows derived classes to avoid implementing the push_forward_gradient function if this functionality is not needed in the user program.

    Refer to the general documentation of this class for more information.

    @@ -632,24 +632,24 @@
    -

    Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

    -

    For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

    -\begin{align*}
+<p>Return a vector that, at <picture><source srcset=$\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

    +

    For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

    +\begin{align*}
   \zeta(t) &= \xi_1 +  t (\xi_2-\xi_1)
  \\          &= F^{-1}(\mathbf x_1) + t\left[F^{-1}(\mathbf x_2)
                                             -F^{-1}(\mathbf x_1)\right]
-\end{align*} +\end{align*}" src="form_1440.png"/>

    In image space, i.e., in the space in which we operate, this leads to the curve

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   \mathbf s(t) &= F(\zeta(t))
  \\          &= F(\xi_1 +  t (\xi_2-\xi_1))
  \\          &= F\left(F^{-1}(\mathbf x_1) + t\left[F^{-1}(\mathbf x_2)
                                     -F^{-1}(\mathbf x_1)\right]\right).
-\end{align*} +\end{align*}" src="form_1441.png"/>

    -

    What the current function is supposed to return is $\mathbf s'(0)$. By the chain rule, this is equal to

    -\begin{align*}
+<p> What the current function is supposed to return is <picture><source srcset=$\mathbf s'(0)$. By the chain rule, this is equal to

    +\begin{align*}
   \mathbf s'(0) &=
     \frac{d}{dt}\left. F\left(F^{-1}(\mathbf x_1)
                        + t\left[F^{-1}(\mathbf x_2)
@@ -658,11 +658,11 @@
 \\ &= \nabla_\xi F\left(F^{-1}(\mathbf x_1)\right)
                    \left[F^{-1}(\mathbf x_2)
                                 -F^{-1}(\mathbf x_1)\right].
-\end{align*} +\end{align*}" src="form_1442.png"/>

    This formula may then have to be slightly modified by considering any periodicity that was assumed in the call to the constructor.

    -

    Thus, the computation of tangent vectors also requires the implementation of derivatives $\nabla_\xi F(\xi)$ of the push-forward mapping. Here, $F^{-1}(\mathbf x_2)-F^{-1}(\mathbf x_1)$ is a chartdim-dimensional vector, and $\nabla_\xi F\left(F^{-1}(\mathbf
-x_1)\right) = \nabla_\xi F\left(\xi_1\right)$ is a spacedim-times-chartdim-dimensional matrix. Consequently, and as desired, the operation results in a spacedim-dimensional vector.

    +

    Thus, the computation of tangent vectors also requires the implementation of derivatives $\nabla_\xi F(\xi)$ of the push-forward mapping. Here, $F^{-1}(\mathbf x_2)-F^{-1}(\mathbf x_1)$ is a chartdim-dimensional vector, and $\nabla_\xi F\left(F^{-1}(\mathbf
+x_1)\right) = \nabla_\xi F\left(\xi_1\right)$ is a spacedim-times-chartdim-dimensional matrix. Consequently, and as desired, the operation results in a spacedim-dimensional vector.

    Parameters
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classChunkSparseMatrix.html differs (JavaScript source, ASCII text, with very long lines (699)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classChunkSparseMatrix.html 2023-11-25 15:25:51.263398990 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classChunkSparseMatrix.html 2023-11-25 15:25:51.263398990 +0100 @@ -1071,7 +1071,7 @@
    x1The first point that describes the geodesic, and the one at which the "direction" is to be evaluated.
    -

    Symmetrize the matrix by forming the mean value between the existing matrix and its transpose, $A = \frac 12(A+A^T)$.

    +

    Symmetrize the matrix by forming the mean value between the existing matrix and its transpose, $A = \frac 12(A+A^T)$.

    This operation assumes that the underlying sparsity pattern represents a symmetric object. If this is not the case, then the result of this operation will not be a symmetric matrix, since it only explicitly symmetrizes by looping over the lower left triangular part for efficiency reasons; if there are entries in the upper right triangle, then these elements are missed in the symmetrization. Symmetrization of the sparsity pattern can be obtain by ChunkSparsityPattern::symmetrize().

    @@ -1480,7 +1480,7 @@
    -

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    +

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    Obviously, the matrix needs to be quadratic for this operation, and for the result to actually be a norm it also needs to be either real symmetric or complex hermitian.

    The underlying template types of both this matrix and the given vector should either both be real or complex-valued, but not mixed, for this function to make sense.

    @@ -1513,7 +1513,7 @@
    -

    Compute the matrix scalar product $\left(u,Mv\right)$.

    +

    Compute the matrix scalar product $\left(u,Mv\right)$.

    @@ -1570,8 +1570,8 @@
    -

    Return the l1-norm of the matrix, that is $|M|_1=max_{all columns
-j}\sum_{all rows i} |M_ij|$, (max. sum of columns). This is the natural matrix norm that is compatible to the l1-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann : Numerische Mathematik)

    +

    Return the l1-norm of the matrix, that is $|M|_1=max_{all columns
+j}\sum_{all rows i} |M_ij|$, (max. sum of columns). This is the natural matrix norm that is compatible to the l1-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann : Numerische Mathematik)

    @@ -1591,8 +1591,8 @@
    -

    Return the linfty-norm of the matrix, that is $|M|_infty=max_{all rows
-i}\sum_{all columns j} |M_ij|$, (max. sum of rows). This is the natural matrix norm that is compatible to the linfty-norm of vectors, i.e. $|Mv|_infty \leq |M|_infty |v|_infty$. (cf. Haemmerlin-Hoffmann : Numerische Mathematik)

    +

    Return the linfty-norm of the matrix, that is $|M|_infty=max_{all rows
+i}\sum_{all columns j} |M_ij|$, (max. sum of rows). This is the natural matrix norm that is compatible to the linfty-norm of vectors, i.e. $|Mv|_infty \leq |M|_infty |v|_infty$. (cf. Haemmerlin-Hoffmann : Numerische Mathematik)

    @@ -2418,7 +2418,7 @@
    -

    Return the location of entry $(i,j)$ within the val array.

    +

    Return the location of entry $(i,j)$ within the val array.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classChunkSparsityPattern.html differs (JavaScript source, ASCII text, with very long lines (935)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classChunkSparsityPattern.html 2023-11-25 15:25:51.286731848 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classChunkSparsityPattern.html 2023-11-25 15:25:51.286731848 +0100 @@ -1233,7 +1233,7 @@
    -

    Compute the bandwidth of the matrix represented by this structure. The bandwidth is the maximum of $|i-j|$ for which the index pair $(i,j)$ represents a nonzero entry of the matrix. Consequently, the maximum bandwidth a $n\times m$ matrix can have is $\max\{n-1,m-1\}$.

    +

    Compute the bandwidth of the matrix represented by this structure. The bandwidth is the maximum of $|i-j|$ for which the index pair $(i,j)$ represents a nonzero entry of the matrix. Consequently, the maximum bandwidth a $n\times m$ matrix can have is $\max\{n-1,m-1\}$.

    Definition at line 520 of file chunk_sparsity_pattern.cc.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classComponentMask.html differs (JavaScript source, ASCII text, with very long lines (1097)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classComponentMask.html 2023-11-25 15:25:51.300064908 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classComponentMask.html 2023-11-25 15:25:51.300064908 +0100 @@ -137,7 +137,7 @@ std::ostream & operator<< (std::ostream &out, const ComponentMask &mask) &#href_anchor"details" id="details">

    Detailed Description

    This class represents a mask that can be used to select individual vector components of a finite element (see also this glossary entry). It will typically have as many elements as the finite element has vector components, and one can use operator[] to query whether a particular component has been selected.

    -
    Note
    A "mask" represents a data structure with true and false entries that is generally used to enable or disable an operation for a particular vector component. By this definition, disabled vector components still exist – they are simply not touched. As a consequence, when you apply a component mask for interpolating boundary values (to choose just one example) of a problem with $C$ vector components, the input argument that describes the boundary values will still have to provide $C$ components even if the mask says that we only want to interpolate a subset of these components onto the finite element space. In other words, a component mask does not represent a reduction operation; it represents a selection.
    +
    Note
    A "mask" represents a data structure with true and false entries that is generally used to enable or disable an operation for a particular vector component. By this definition, disabled vector components still exist – they are simply not touched. As a consequence, when you apply a component mask for interpolating boundary values (to choose just one example) of a problem with $C$ vector components, the input argument that describes the boundary values will still have to provide $C$ components even if the mask says that we only want to interpolate a subset of these components onto the finite element space. In other words, a component mask does not represent a reduction operation; it represents a selection.

    Objects of this kind are used in many places where one wants to restrict operations to a certain subset of components, e.g. in DoFTools::make_zero_boundary_values() or VectorTools::interpolate_boundary_values(). These objects can either be created by hand, or, simpler, by asking the finite element to generate a component mask from certain selected components using code such as this where we create a mask that only denotes the velocity components of a Stokes element (see Handling vector valued problems):

    // Q2 element for the velocities, Q1 element for the pressure
    FESystem<dim> stokes_fe (FE_Q<dim>(2), dim,
    FE_Q<dim>(1), 1);
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classCompositionManifold.html differs (JavaScript source, ASCII text, with very long lines (1066)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classCompositionManifold.html 2023-11-25 15:25:51.320064500 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classCompositionManifold.html 2023-11-25 15:25:51.320064500 +0100 @@ -631,24 +631,24 @@
    -

    Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

    -

    For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

    -\begin{align*}
+<p>Return a vector that, at <picture><source srcset=$\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

    +

    For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

    +\begin{align*}
   \zeta(t) &= \xi_1 +  t (\xi_2-\xi_1)
  \\          &= F^{-1}(\mathbf x_1) + t\left[F^{-1}(\mathbf x_2)
                                             -F^{-1}(\mathbf x_1)\right]
-\end{align*} +\end{align*}" src="form_1440.png"/>

    In image space, i.e., in the space in which we operate, this leads to the curve

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   \mathbf s(t) &= F(\zeta(t))
  \\          &= F(\xi_1 +  t (\xi_2-\xi_1))
  \\          &= F\left(F^{-1}(\mathbf x_1) + t\left[F^{-1}(\mathbf x_2)
                                     -F^{-1}(\mathbf x_1)\right]\right).
-\end{align*} +\end{align*}" src="form_1441.png"/>

    -

    What the current function is supposed to return is $\mathbf s'(0)$. By the chain rule, this is equal to

    -\begin{align*}
+<p> What the current function is supposed to return is <picture><source srcset=$\mathbf s'(0)$. By the chain rule, this is equal to

    +\begin{align*}
   \mathbf s'(0) &=
     \frac{d}{dt}\left. F\left(F^{-1}(\mathbf x_1)
                        + t\left[F^{-1}(\mathbf x_2)
@@ -657,11 +657,11 @@
 \\ &= \nabla_\xi F\left(F^{-1}(\mathbf x_1)\right)
                    \left[F^{-1}(\mathbf x_2)
                                 -F^{-1}(\mathbf x_1)\right].
-\end{align*} +\end{align*}" src="form_1442.png"/>

    This formula may then have to be slightly modified by considering any periodicity that was assumed in the call to the constructor.

    -

    Thus, the computation of tangent vectors also requires the implementation of derivatives $\nabla_\xi F(\xi)$ of the push-forward mapping. Here, $F^{-1}(\mathbf x_2)-F^{-1}(\mathbf x_1)$ is a chartdim-dimensional vector, and $\nabla_\xi F\left(F^{-1}(\mathbf
-x_1)\right) = \nabla_\xi F\left(\xi_1\right)$ is a spacedim-times-chartdim-dimensional matrix. Consequently, and as desired, the operation results in a spacedim-dimensional vector.

    +

    Thus, the computation of tangent vectors also requires the implementation of derivatives $\nabla_\xi F(\xi)$ of the push-forward mapping. Here, $F^{-1}(\mathbf x_2)-F^{-1}(\mathbf x_1)$ is a chartdim-dimensional vector, and $\nabla_\xi F\left(F^{-1}(\mathbf
+x_1)\right) = \nabla_\xi F\left(\xi_1\right)$ is a spacedim-times-chartdim-dimensional matrix. Consequently, and as desired, the operation results in a spacedim-dimensional vector.

    Parameters
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classConvergenceTable.html differs (JavaScript source, ASCII text, with very long lines (810)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classConvergenceTable.html 2023-11-25 15:25:51.336730828 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classConvergenceTable.html 2023-11-25 15:25:51.336730828 +0100 @@ -372,14 +372,14 @@
    x1The first point that describes the geodesic, and the one at which the "direction" is to be evaluated.

    Evaluate the convergence rates of the data column data_column_key due to the RateMode in relation to the reference column reference_column_key. Be sure that the value types of the table entries of the data column and the reference data column is a number, i.e. double, float, (unsigned) int, and so on.

    -

    As this class has no information on the space dimension upon which the reference column vs. the value column is based upon, it needs to be passed as last argument to this method. The default dimension for the reference column is 2, which is appropriate for the number of cells in 2d. If you work in 3d, set the number to 3. If the reference column is $1/h$, remember to set the dimension to 1 also when working in 3d to get correct rates.

    +

    As this class has no information on the space dimension upon which the reference column vs. the value column is based upon, it needs to be passed as last argument to this method. The default dimension for the reference column is 2, which is appropriate for the number of cells in 2d. If you work in 3d, set the number to 3. If the reference column is $1/h$, remember to set the dimension to 1 also when working in 3d to get correct rates.

    The new rate column and the data column will be merged to a supercolumn. The tex caption of the supercolumn will be (by default) the same as the one of the data column. This may be changed by using the set_tex_supercaption (...) function of the base class TableHandler.

    This method behaves in the following way:

    -

    If RateMode is reduction_rate, then the computed output is $
-\frac{e_{n-1}/k_{n-1}}{e_n/k_n}, $ where $k$ is the reference column (no dimension dependence!).

    -

    If RateMode is reduction_rate_log2, then the computed output is $ dim
-\frac{\log |e_{n-1}/e_{n}|}{\log |k_n/k_{n-1}|} $.

    -

    This is useful, for example, if we use as reference key the number of degrees of freedom or better, the number of cells. Assuming that the error is proportional to $ C (1/\sqrt{k})^r $ in 2d, then this method will produce the rate $r$ as a result. For general dimension, as described by the last parameter of this function, the formula needs to be $ C (1/\sqrt[dim]{k})^r $.

    +

    If RateMode is reduction_rate, then the computed output is $
+\frac{e_{n-1}/k_{n-1}}{e_n/k_n}, $ where $k$ is the reference column (no dimension dependence!).

    +

    If RateMode is reduction_rate_log2, then the computed output is $ dim
+\frac{\log |e_{n-1}/e_{n}|}{\log |k_n/k_{n-1}|} $.

    +

    This is useful, for example, if we use as reference key the number of degrees of freedom or better, the number of cells. Assuming that the error is proportional to $ C (1/\sqrt{k})^r $ in 2d, then this method will produce the rate $r$ as a result. For general dimension, as described by the last parameter of this function, the formula needs to be $ C (1/\sqrt[dim]{k})^r $.

    Note
    Since this function adds columns to the table after several rows have already been filled, it switches off the auto fill mode of the TableHandler base class. If you intend to add further data with auto fill, you will have to re-enable it after calling this function.

    Definition at line 23 of file convergence_table.cc.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classCylindricalManifold.html differs (JavaScript source, ASCII text, with very long lines (853)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classCylindricalManifold.html 2023-11-25 15:25:51.360063689 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classCylindricalManifold.html 2023-11-25 15:25:51.360063689 +0100 @@ -423,7 +423,7 @@
    -

    Compute the cylindrical coordinates $(r, \phi, \lambda)$ for the given space point where $r$ denotes the distance from the axis, $\phi$ the angle between the given point and the computed normal direction, and $\lambda$ the axial position.

    +

    Compute the cylindrical coordinates $(r, \phi, \lambda)$ for the given space point where $r$ denotes the distance from the axis, $\phi$ the angle between the given point and the computed normal direction, and $\lambda$ the axial position.

    Implements ChartManifold< dim, spacedim, chartdim >.

    @@ -455,7 +455,7 @@
    -

    Compute the Cartesian coordinates for a chart point given in cylindrical coordinates $(r, \phi, \lambda)$, where $r$ denotes the distance from the axis, $\phi$ the angle between the given point and the computed normal direction, and $\lambda$ the axial position.

    +

    Compute the Cartesian coordinates for a chart point given in cylindrical coordinates $(r, \phi, \lambda)$, where $r$ denotes the distance from the axis, $\phi$ the angle between the given point and the computed normal direction, and $\lambda$ the axial position.

    Definition at line 1144 of file manifold_lib.cc.

    @@ -485,7 +485,7 @@
    -

    Compute the derivatives of the mapping from cylindrical coordinates $(r, \phi, \lambda)$ to cartesian coordinates where $r$ denotes the distance from the axis, $\phi$ the angle between the given point and the computed normal direction, and $\lambda$ the axial position.

    +

    Compute the derivatives of the mapping from cylindrical coordinates $(r, \phi, \lambda)$ to cartesian coordinates where $r$ denotes the distance from the axis, $\phi$ the angle between the given point and the computed normal direction, and $\lambda$ the axial position.

    Definition at line 1164 of file manifold_lib.cc.

    @@ -682,7 +682,7 @@
    -

    Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the chartdim-dimensional to the spacedim-dimensional space. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

    +

    Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the chartdim-dimensional to the spacedim-dimensional space. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

    This function is used in the computations required by the get_tangent_vector() function. Since not all users of the Manifold class interface will require calling that function, the current function is implemented but will trigger an exception whenever called. This allows derived classes to avoid implementing the push_forward_gradient function if this functionality is not needed in the user program.

    Refer to the general documentation of this class for more information.

    @@ -725,24 +725,24 @@
    -

    Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

    -

    For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

    -\begin{align*}
+<p>Return a vector that, at <picture><source srcset=$\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

    +

    For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

    +\begin{align*}
   \zeta(t) &= \xi_1 +  t (\xi_2-\xi_1)
  \\          &= F^{-1}(\mathbf x_1) + t\left[F^{-1}(\mathbf x_2)
                                             -F^{-1}(\mathbf x_1)\right]
-\end{align*} +\end{align*}" src="form_1440.png"/>

    In image space, i.e., in the space in which we operate, this leads to the curve

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   \mathbf s(t) &= F(\zeta(t))
  \\          &= F(\xi_1 +  t (\xi_2-\xi_1))
  \\          &= F\left(F^{-1}(\mathbf x_1) + t\left[F^{-1}(\mathbf x_2)
                                     -F^{-1}(\mathbf x_1)\right]\right).
-\end{align*} +\end{align*}" src="form_1441.png"/>

    -

    What the current function is supposed to return is $\mathbf s'(0)$. By the chain rule, this is equal to

    -\begin{align*}
+<p> What the current function is supposed to return is <picture><source srcset=$\mathbf s'(0)$. By the chain rule, this is equal to

    +\begin{align*}
   \mathbf s'(0) &=
     \frac{d}{dt}\left. F\left(F^{-1}(\mathbf x_1)
                        + t\left[F^{-1}(\mathbf x_2)
@@ -751,11 +751,11 @@
 \\ &= \nabla_\xi F\left(F^{-1}(\mathbf x_1)\right)
                    \left[F^{-1}(\mathbf x_2)
                                 -F^{-1}(\mathbf x_1)\right].
-\end{align*} +\end{align*}" src="form_1442.png"/>

    This formula may then have to be slightly modified by considering any periodicity that was assumed in the call to the constructor.

    -

    Thus, the computation of tangent vectors also requires the implementation of derivatives $\nabla_\xi F(\xi)$ of the push-forward mapping. Here, $F^{-1}(\mathbf x_2)-F^{-1}(\mathbf x_1)$ is a chartdim-dimensional vector, and $\nabla_\xi F\left(F^{-1}(\mathbf
-x_1)\right) = \nabla_\xi F\left(\xi_1\right)$ is a spacedim-times-chartdim-dimensional matrix. Consequently, and as desired, the operation results in a spacedim-dimensional vector.

    +

    Thus, the computation of tangent vectors also requires the implementation of derivatives $\nabla_\xi F(\xi)$ of the push-forward mapping. Here, $F^{-1}(\mathbf x_2)-F^{-1}(\mathbf x_1)$ is a chartdim-dimensional vector, and $\nabla_\xi F\left(F^{-1}(\mathbf
+x_1)\right) = \nabla_\xi F\left(\xi_1\right)$ is a spacedim-times-chartdim-dimensional matrix. Consequently, and as desired, the operation results in a spacedim-dimensional vector.

    Parameters
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classDataPostprocessor.html differs (JavaScript source, ASCII text, with very long lines (1573)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classDataPostprocessor.html 2023-11-25 15:25:51.376730014 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classDataPostprocessor.html 2023-11-25 15:25:51.376730014 +0100 @@ -182,7 +182,7 @@

    As a consequence, DataOut is forced to take things apart into their real and imaginary parts, and both are output as separate quantities. This is the case for data that is written directly to a file by DataOut, but it is also the case for data that is first routed through DataPostprocessor objects (or objects of their derived classes): All these objects see is a collection of real values, even if the underlying solution vector was complex-valued.

    All of this has two implications:

    • If a solution vector is complex-valued, then this results in at least two input components at each evaluation point. As a consequence, the DataPostprocessor::evaluate_scalar_field() function is never called, even if the underlying finite element had only a single solution component. Instead, DataOut will always call DataPostprocessor::evaluate_vector_field().
    • -
    • Implementations of the DataPostprocessor::evaluate_vector_field() in derived classes must understand how the solution values are arranged in the DataPostprocessorInputs::Vector objects they receive as input. The rule here is: If the finite element has $N$ vector components (including the case $N=1$, i.e., a scalar element), then the inputs for complex-valued solution vectors will have $2N$ components. These first contain the values (or gradients, or Hessians) of the real parts of all solution components, and then the values (or gradients, or Hessians) of the imaginary parts of all solution components.
    • +
    • Implementations of the DataPostprocessor::evaluate_vector_field() in derived classes must understand how the solution values are arranged in the DataPostprocessorInputs::Vector objects they receive as input. The rule here is: If the finite element has $N$ vector components (including the case $N=1$, i.e., a scalar element), then the inputs for complex-valued solution vectors will have $2N$ components. These first contain the values (or gradients, or Hessians) of the real parts of all solution components, and then the values (or gradients, or Hessians) of the imaginary parts of all solution components.

    step-58 provides an example of how this class (or, rather, the derived DataPostprocessorScalar class) is used in a complex-valued situation.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classDataPostprocessorTensor.html differs (JavaScript source, ASCII text, with very long lines (1315)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classDataPostprocessorTensor.html 2023-11-25 15:25:51.393396343 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classDataPostprocessorTensor.html 2023-11-25 15:25:51.393396343 +0100 @@ -254,7 +254,7 @@

    These pictures show an ellipse representing the gradient tensor at, on average, every tenth mesh point. You may want to read through the documentation of the VisIt visualization program (see https://wci.llnl.gov/simulation/computer-codes/visit/) for an interpretation of how exactly tensors are visualizated.

    -

    In elasticity, one is often interested not in the gradient of the displacement, but in the "strain", i.e., the symmetrized version of the gradient $\varepsilon=\frac 12 (\nabla u + \nabla u^T)$. This is easily facilitated with the following minor modification:

    template <int dim>
    +

    In elasticity, one is often interested not in the gradient of the displacement, but in the "strain", i.e., the symmetrized version of the gradient $\varepsilon=\frac 12 (\nabla u + \nabla u^T)$. This is easily facilitated with the following minor modification:

    template <int dim>
    class StrainPostprocessor : public DataPostprocessorTensor<dim>
    {
    public:
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classDataPostprocessorVector.html differs (JavaScript source, ASCII text, with very long lines (1411)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classDataPostprocessorVector.html 2023-11-25 15:25:51.410062671 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classDataPostprocessorVector.html 2023-11-25 15:25:51.410062671 +0100 @@ -245,7 +245,7 @@

    In the second image, the background color corresponds to the magnitude of the gradient vector and the vector glyphs to the gradient itself. It may be surprising at first to see that from each vertex, multiple vectors originate, going in different directions. But that is because the solution is only continuous: in general, the gradient is discontinuous across edges, and so the multiple vectors originating from each vertex simply represent the differing gradients of the solution at each adjacent cell.

    -

    The output above – namely, the gradient $\nabla u$ of the solution – corresponds to the temperature gradient if one interpreted step-6 as solving a steady-state heat transfer problem. It is very small in the central part of the domain because in step-6 we are solving an equation that has a coefficient $a(\mathbf x)$ that is large in the central part and small on the outside. This can be thought as a material that conducts heat well, and consequently the temperature gradient is small. On the other hand, the "heat flux" corresponds to the quantity $a(\mathbf x) \nabla u(\mathbf x)$. For the solution of that equation, the flux should be continuous across the interface. This is easily verified by the following modification of the postprocessor:

    template <int dim>
    +

    The output above – namely, the gradient $\nabla u$ of the solution – corresponds to the temperature gradient if one interpreted step-6 as solving a steady-state heat transfer problem. It is very small in the central part of the domain because in step-6 we are solving an equation that has a coefficient $a(\mathbf x)$ that is large in the central part and small on the outside. This can be thought as a material that conducts heat well, and consequently the temperature gradient is small. On the other hand, the "heat flux" corresponds to the quantity $a(\mathbf x) \nabla u(\mathbf x)$. For the solution of that equation, the flux should be continuous across the interface. This is easily verified by the following modification of the postprocessor:

    template <int dim>
    class HeatFluxPostprocessor : public DataPostprocessorVector<dim>
    {
    public:
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classDerivativeApproximation_1_1internal_1_1SecondDerivative.html differs (JavaScript source, ASCII text, with very long lines (634)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classDerivativeApproximation_1_1internal_1_1SecondDerivative.html 2023-11-25 15:25:51.426728996 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classDerivativeApproximation_1_1internal_1_1SecondDerivative.html 2023-11-25 15:25:51.426728996 +0100 @@ -244,7 +244,7 @@
    x1The first point that describes the geodesic, and the one at which the "direction" is to be evaluated.
    -

    Return the norm of the derivative object. Here, for the (symmetric) tensor of second derivatives, we choose the absolute value of the largest eigenvalue, which is the matrix norm associated to the $l_2$ norm of vectors. It is also the largest value of the curvature of the solution.

    +

    Return the norm of the derivative object. Here, for the (symmetric) tensor of second derivatives, we choose the absolute value of the largest eigenvalue, which is the matrix norm associated to the $l_2$ norm of vectors. It is also the largest value of the curvature of the solution.

    Definition at line 492 of file derivative_approximation.cc.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classDerivativeApproximation_1_1internal_1_1ThirdDerivative.html differs (JavaScript source, ASCII text, with very long lines (632)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classDerivativeApproximation_1_1internal_1_1ThirdDerivative.html 2023-11-25 15:25:51.440062061 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classDerivativeApproximation_1_1internal_1_1ThirdDerivative.html 2023-11-25 15:25:51.440062061 +0100 @@ -239,7 +239,7 @@
    -

    Return the norm of the derivative object. Here, for the (symmetric) tensor of second derivatives, we choose the absolute value of the largest eigenvalue, which is the matrix norm associated to the $l_2$ norm of vectors. It is also the largest value of the curvature of the solution.

    +

    Return the norm of the derivative object. Here, for the (symmetric) tensor of second derivatives, we choose the absolute value of the largest eigenvalue, which is the matrix norm associated to the $l_2$ norm of vectors. It is also the largest value of the curvature of the solution.

    Definition at line 631 of file derivative_approximation.cc.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classDerivativeForm.html differs (JavaScript source, ASCII text, with very long lines (1196)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classDerivativeForm.html 2023-11-25 15:25:51.456728389 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classDerivativeForm.html 2023-11-25 15:25:51.456728389 +0100 @@ -163,24 +163,24 @@ DerivativeForm< 1, spacedim, dim, Number >&#href_anchor"memTemplItemRight" valign="bottom">transpose (const DerivativeForm< 1, dim, spacedim, Number > &DF) &#href_anchor"details" id="details">

    Detailed Description

    template<int order, int dim, int spacedim, typename Number = double>
    -class DerivativeForm< order, dim, spacedim, Number >

    This class represents the (tangential) derivatives of a function $ \mathbf F:
-{\mathbb R}^{\text{dim}} \rightarrow {\mathbb R}^{\text{spacedim}}$. Such functions are always used to map the reference dim-dimensional cell into spacedim-dimensional space. For such objects, the first derivative of the function is a linear map from ${\mathbb R}^{\text{dim}}$ to ${\mathbb
-R}^{\text{spacedim}}$, i.e., it can be represented as a matrix in ${\mathbb
-R}^{\text{spacedim}\times \text{dim}}$. This makes sense since one would represent the first derivative, $\nabla \mathbf F(\mathbf x)$ with $\mathbf
+class DerivativeForm< order, dim, spacedim, Number ></div><p>This class represents the (tangential) derivatives of a function <picture><source srcset=$ \mathbf F:
+{\mathbb R}^{\text{dim}} \rightarrow {\mathbb R}^{\text{spacedim}}$. Such functions are always used to map the reference dim-dimensional cell into spacedim-dimensional space. For such objects, the first derivative of the function is a linear map from ${\mathbb R}^{\text{dim}}$ to ${\mathbb
+R}^{\text{spacedim}}$, i.e., it can be represented as a matrix in ${\mathbb
+R}^{\text{spacedim}\times \text{dim}}$. This makes sense since one would represent the first derivative, $\nabla \mathbf F(\mathbf x)$ with $\mathbf
 x\in
-{\mathbb R}^{\text{dim}}$, in such a way that the directional derivative in direction $\mathbf d\in {\mathbb R}^{\text{dim}}$ so that

    -\begin{align*}
+{\mathbb R}^{\text{dim}}$, in such a way that the directional derivative in direction $\mathbf d\in {\mathbb R}^{\text{dim}}$ so that

    +\begin{align*}
   \nabla \mathbf F(\mathbf x) \mathbf d
   = \lim_{\varepsilon\rightarrow 0}
     \frac{\mathbf F(\mathbf x + \varepsilon \mathbf d) - \mathbf F(\mathbf
 x)}{\varepsilon},
-\end{align*} +\end{align*}" src="form_387.png"/>

    -

    i.e., one needs to be able to multiply the matrix $\nabla \mathbf F(\mathbf
-x)$ by a vector in ${\mathbb R}^{\text{dim}}$, and the result is a difference of function values, which are in ${\mathbb R}^{\text{spacedim}}$. Consequently, the matrix must be of size $\text{spacedim}\times\text{dim}$.

    -

    Similarly, the second derivative is a bilinear map from ${\mathbb
-R}^{\text{dim}} \times  {\mathbb R}^{\text{dim}}$ to ${\mathbb
-R}^{\text{spacedim}}$, which one can think of a rank-3 object of size $\text{spacedim}\times\text{dim}\times\text{dim}$.

    +

    i.e., one needs to be able to multiply the matrix $\nabla \mathbf F(\mathbf
+x)$ by a vector in ${\mathbb R}^{\text{dim}}$, and the result is a difference of function values, which are in ${\mathbb R}^{\text{spacedim}}$. Consequently, the matrix must be of size $\text{spacedim}\times\text{dim}$.

    +

    Similarly, the second derivative is a bilinear map from ${\mathbb
+R}^{\text{dim}} \times  {\mathbb R}^{\text{dim}}$ to ${\mathbb
+R}^{\text{spacedim}}$, which one can think of a rank-3 object of size $\text{spacedim}\times\text{dim}\times\text{dim}$.

    In deal.II we represent these derivatives using objects of type DerivativeForm<1,dim,spacedim,Number>, DerivativeForm<2,dim,spacedim,Number> and so on.

    Definition at line 58 of file derivative_form.h.

    @@ -392,7 +392,7 @@
    -

    Converts a DerivativeForm <order, dim, dim, Number> to Tensor<order+1, dim, Number>. In particular, if order == 1 and the derivative is the Jacobian of $\mathbf F(\mathbf x)$, then Tensor[i] = $\nabla F_i(\mathbf x)$.

    +

    Converts a DerivativeForm <order, dim, dim, Number> to Tensor<order+1, dim, Number>. In particular, if order == 1 and the derivative is the Jacobian of $\mathbf F(\mathbf x)$, then Tensor[i] = $\nabla F_i(\mathbf x)$.

    @@ -473,7 +473,7 @@
    -

    Compute the volume element associated with the jacobian of the transformation $\mathbf F$. That is to say if $DF$ is square, it computes $\det(DF)$, in case DF is not square returns $\sqrt{\det(DF^T \,DF)}$.

    +

    Compute the volume element associated with the jacobian of the transformation $\mathbf F$. That is to say if $DF$ is square, it computes $\det(DF)$, in case DF is not square returns $\sqrt{\det(DF^T \,DF)}$.

    @@ -493,7 +493,7 @@
    -

    Assuming that the current object stores the Jacobian of a mapping $\mathbf F$, then the current function computes the covariant form of the derivative, namely $(\nabla \mathbf F) {\mathbf G}^{-1}$, where $\mathbf G = (\nabla \mathbf F)^{T}(\nabla \mathbf F)$. If $\nabla \mathbf
+<p>Assuming that the current object stores the Jacobian of a mapping <picture><source srcset=$\mathbf F$, then the current function computes the covariant form of the derivative, namely $(\nabla \mathbf F) {\mathbf G}^{-1}$, where $\mathbf G = (\nabla \mathbf F)^{T}(\nabla \mathbf F)$. If $\nabla \mathbf
 F$ is a square matrix (i.e., $\mathbf F:
 {\mathbb R}^n \mapsto {\mathbb R}^n$), then this function simplifies to computing $\nabla {\mathbf F}^{-T}$.

    @@ -589,21 +589,21 @@
    -

    One of the uses of DerivativeForm is to apply it as a linear transformation. This function returns $\nabla \mathbf F(\mathbf x) \Delta \mathbf x$, which approximates the change in $\mathbf F(\mathbf x)$ when $\mathbf x$ is changed by the amount $\Delta \mathbf x$

    -\[
+<p>One of the uses of <a class=DerivativeForm is to apply it as a linear transformation. This function returns $\nabla \mathbf F(\mathbf x) \Delta \mathbf x$, which approximates the change in $\mathbf F(\mathbf x)$ when $\mathbf x$ is changed by the amount $\Delta \mathbf x$

    +\[
   \nabla \mathbf F(\mathbf x) \; \Delta \mathbf x
   \approx
   \mathbf F(\mathbf x + \Delta \mathbf x) - \mathbf F(\mathbf x).
-\] +\]" src="form_396.png"/>

    The transformation corresponds to

    -\[
+<picture><source srcset=\[
   [\text{result}]_{i_1,\dots,i_k} = i\sum_{j}
   \left[\nabla \mathbf F(\mathbf x)\right]_{i_1,\dots,i_k, j}
   \Delta x_j
-\] +\]" src="form_397.png"/>

    -

    in index notation and corresponds to $[\Delta \mathbf x] [\nabla \mathbf F(\mathbf x)]^T$ in matrix notation.

    +

    in index notation and corresponds to $[\Delta \mathbf x] [\nabla \mathbf F(\mathbf x)]^T$ in matrix notation.

    Definition at line 454 of file derivative_form.h.

    @@ -642,7 +642,7 @@
    -

    Similar to the previous apply_transformation(). Each row of the result corresponds to one of the rows of D_X transformed by grad_F, equivalent to $\mathrm{D\_X} \, \mathrm{grad\_F}^T$ in matrix notation.

    +

    Similar to the previous apply_transformation(). Each row of the result corresponds to one of the rows of D_X transformed by grad_F, equivalent to $\mathrm{D\_X} \, \mathrm{grad\_F}^T$ in matrix notation.

    Definition at line 479 of file derivative_form.h.

    @@ -681,7 +681,7 @@
    -

    Similar to the previous apply_transformation(), specialized for the case dim == spacedim where we can return a rank-2 tensor instead of the more general DerivativeForm. Each row of the result corresponds to one of the rows of D_X transformed by grad_F, equivalent to $\mathrm{D\_X} \, \mathrm{grad\_F}^T$ in matrix notation.

    +

    Similar to the previous apply_transformation(), specialized for the case dim == spacedim where we can return a rank-2 tensor instead of the more general DerivativeForm. Each row of the result corresponds to one of the rows of D_X transformed by grad_F, equivalent to $\mathrm{D\_X} \, \mathrm{grad\_F}^T$ in matrix notation.

    Definition at line 505 of file derivative_form.h.

    @@ -759,11 +759,11 @@
    -

    Similar to the previous apply_transformation(). In matrix notation, it computes $DF2 \, DF1^{T}$. Moreover, the result of this operation $\mathbf A$ can be interpreted as a metric tensor in ${\mathbb R}^\text{spacedim}$ which corresponds to the Euclidean metric tensor in ${\mathbb R}^\text{dim}$. For every pair of vectors $\mathbf u, \mathbf v \in {\mathbb R}^\text{spacedim}$, we have:

    -\[
+<p>Similar to the previous <a class=apply_transformation(). In matrix notation, it computes $DF2 \, DF1^{T}$. Moreover, the result of this operation $\mathbf A$ can be interpreted as a metric tensor in ${\mathbb R}^\text{spacedim}$ which corresponds to the Euclidean metric tensor in ${\mathbb R}^\text{dim}$. For every pair of vectors $\mathbf u, \mathbf v \in {\mathbb R}^\text{spacedim}$, we have:

    +\[
   \mathbf u \cdot \mathbf A \mathbf v =
   \text{DF2}^{-1}(\mathbf u) \cdot \text{DF1}^{-1}(\mathbf v)
-\] +\]" src="form_404.png"/>

    Definition at line 565 of file derivative_form.h.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classDifferentiation_1_1AD_1_1CellLevelBase.html differs (JavaScript source, ASCII text, with very long lines (1216)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classDifferentiation_1_1AD_1_1CellLevelBase.html 2023-11-25 15:25:51.476727982 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classDifferentiation_1_1AD_1_1CellLevelBase.html 2023-11-25 15:25:51.476727982 +0100 @@ -295,8 +295,8 @@

    The constructor for the class.

    Parameters
    - - + +
    [in]n_independent_variablesThe number of independent variables that will be used in the definition of the functions that it is desired to compute the sensitivities of. In the computation of $\mathbf{f}(\mathbf{X})$, this will be the number of inputs $\mathbf{X}$, i.e., the dimension of the domain space.
    [in]n_dependent_variablesThe number of scalar functions to be defined that will have a sensitivity to the given independent variables. In the computation of $\mathbf{f}(\mathbf{X})$, this will be the number of outputs $\mathbf{f}$, i.e., the dimension of the image space.
    [in]n_independent_variablesThe number of independent variables that will be used in the definition of the functions that it is desired to compute the sensitivities of. In the computation of $\mathbf{f}(\mathbf{X})$, this will be the number of inputs $\mathbf{X}$, i.e., the dimension of the domain space.
    [in]n_dependent_variablesThe number of scalar functions to be defined that will have a sensitivity to the given independent variables. In the computation of $\mathbf{f}(\mathbf{X})$, this will be the number of outputs $\mathbf{f}$, i.e., the dimension of the image space.
    @@ -525,7 +525,7 @@
    -

    Compute the value of the residual vector field $\mathbf{r}(\mathbf{X})$.

    +

    Compute the value of the residual vector field $\mathbf{r}(\mathbf{X})$.

    Parameters
    @@ -564,9 +564,9 @@
    [out]residualA Vector object with the value for each component of the vector field evaluated at the point defined by the independent variable values.

    Compute the gradient (first derivative) of the residual vector field with respect to all independent variables, i.e.

    -\[
+<picture><source srcset=\[
   \frac{\partial\mathbf{r}(\mathbf{X})}{\partial\mathbf{X}}
-\] +\]" src="form_904.png"/>

    Parameters
    @@ -839,8 +839,8 @@

    In the rare case that the number of independent or dependent variables has changed, this can also reconfigured by passing in the appropriate arguments to the function.

    Parameters
    - - + +
    [in]n_independent_variablesThe number of independent variables that will be used in the definition of the functions that it is desired to compute the sensitivities of. In the computation of $\mathbf{f}(\mathbf{X})$, this will be the number of inputs $\mathbf{X}$, i.e., the dimension of the domain space.
    [in]n_dependent_variablesThe number of scalar functions to be defined that will have a sensitivity to the given independent variables. In the computation of $\mathbf{f}(\mathbf{X})$, this will be the number of outputs $\mathbf{f}$, i.e., the dimension of the image space.
    [in]n_independent_variablesThe number of independent variables that will be used in the definition of the functions that it is desired to compute the sensitivities of. In the computation of $\mathbf{f}(\mathbf{X})$, this will be the number of inputs $\mathbf{X}$, i.e., the dimension of the domain space.
    [in]n_dependent_variablesThe number of scalar functions to be defined that will have a sensitivity to the given independent variables. In the computation of $\mathbf{f}(\mathbf{X})$, this will be the number of outputs $\mathbf{f}$, i.e., the dimension of the image space.
    [in]clear_registered_tapesA flag that indicates the that list of registered_tapes must be cleared. If set to true then the data structure that tracks which tapes have been recorded is cleared as well. It is then expected that any preexisting tapes be re-recorded.
    @@ -1349,7 +1349,7 @@
    -

    Set the actual value of the independent variable $X_{i}$.

    +

    Set the actual value of the independent variable $X_{i}$.

    Parameters
    @@ -1396,7 +1396,7 @@
    [in]indexThe index in the vector of independent variables.
    -

    Initialize an independent variable $X_{i}$ such that subsequent operations performed with it are tracked.

    +

    Initialize an independent variable $X_{i}$ such that subsequent operations performed with it are tracked.

    Note
    Care must be taken to mark each independent variable only once.
    The order in which the independent variables are marked defines the order of all future internal operations. They must be manipulated in the same order as that in which they are first marked. If not then, for example, ADOL-C won't throw an error, but rather it might complain nonsensically during later computations or produce garbage results.
    @@ -1474,7 +1474,7 @@
    -

    Initialize an independent variable $X_{i}$.

    +

    Initialize an independent variable $X_{i}$.

    Parameters
    @@ -1605,7 +1605,7 @@
    [out]outAn auto-differentiable number that is ready for use in standard computations. The operations that are performed with it are not recorded on the tape, and so should only be used when not in recording mode.
    -

    Register the definition of the index'th dependent variable $f(\mathbf{X})$.

    +

    Register the definition of the index'th dependent variable $f(\mathbf{X})$.

    Parameters
    @@ -1807,7 +1807,7 @@
    [in]indexThe index of the entry in the global list of dependent variables that this function belongs to.
    -

    The set of dependent variables $\mathbf{f}(\mathbf{X})$ of which the derivatives with respect to $\mathbf{X}$ will be computed.

    +

    The set of dependent variables $\mathbf{f}(\mathbf{X})$ of which the derivatives with respect to $\mathbf{X}$ will be computed.

    The gradients and Hessians of these dependent variables will be computed at the values $\mathbf{X}$ set with the set_sensitivity_value() function.

    Note
    These are stored as an ad_type so that we can use them to compute function values and directional derivatives in the case that tapeless numbers are used
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classDifferentiation_1_1AD_1_1EnergyFunctional.html differs (JavaScript source, ASCII text, with very long lines (1228)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classDifferentiation_1_1AD_1_1EnergyFunctional.html 2023-11-25 15:25:51.503394103 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classDifferentiation_1_1AD_1_1EnergyFunctional.html 2023-11-25 15:25:51.503394103 +0100 @@ -430,11 +430,11 @@

    The constructor for the class.

    Parameters
    - +
    [in]n_independent_variablesThe number of independent variables that will be used in the definition of the functions that it is desired to compute the sensitivities of. In the computation of $\Psi(\mathbf{X})$, this will be the number of inputs $\mathbf{X}$, i.e., the dimension of the domain space.
    [in]n_independent_variablesThe number of independent variables that will be used in the definition of the functions that it is desired to compute the sensitivities of. In the computation of $\Psi(\mathbf{X})$, this will be the number of inputs $\mathbf{X}$, i.e., the dimension of the domain space.
    -
    Note
    There is only one dependent variable associated with the total energy attributed to the local finite element. That is to say, this class assumes that the (local) right hand side and matrix contribution is computed from the first and second derivatives of a scalar function $\Psi(\mathbf{X})$.
    +
    Note
    There is only one dependent variable associated with the total energy attributed to the local finite element. That is to say, this class assumes that the (local) right hand side and matrix contribution is computed from the first and second derivatives of a scalar function $\Psi(\mathbf{X})$.

    Definition at line 793 of file ad_helpers.cc.

    @@ -486,7 +486,7 @@
    -

    Register the definition of the total cell energy $\Psi(\mathbf{X})$.

    +

    Register the definition of the total cell energy $\Psi(\mathbf{X})$.

    Parameters
    @@ -515,9 +515,9 @@
    [in]energyA recorded function that defines the total cell energy. This represents the single dependent variable from which both the residual and its linearization are to be computed.

    Evaluation of the total scalar energy functional for a chosen set of degree of freedom values, i.e.

    -\[
+<picture><source srcset=\[
   \Psi(\mathbf{X}) \vert_{\mathbf{X}}
-\] +\]" src="form_906.png"/>

    The values at the evaluation point $\mathbf{X}$ are obtained by calling CellLevelBase::set_dof_values().

    Returns
    The value of the energy functional at the evaluation point corresponding to a chosen set of local degree of freedom values.
    @@ -551,12 +551,12 @@
    -

    Evaluation of the residual for a chosen set of degree of freedom values. Underlying this is the computation of the gradient (first derivative) of the scalar function $\Psi$ with respect to all independent variables, i.e.

    -\[
+<p>Evaluation of the residual for a chosen set of degree of freedom values. Underlying this is the computation of the gradient (first derivative) of the scalar function <picture><source srcset=$\Psi$ with respect to all independent variables, i.e.

    +\[
   \mathbf{r}(\mathbf{X}) =
 \frac{\partial\Psi(\mathbf{X})}{\partial\mathbf{X}}
 \Big\vert_{\mathbf{X}}
-\] +\]" src="form_907.png"/>

    The values at the evaluation point $\mathbf{X}$ are obtained by calling CellLevelBase::set_dof_values().

    Parameters
    @@ -597,13 +597,13 @@
    -

    Compute the linearization of the residual vector around a chosen set of degree of freedom values. Underlying this is the computation of the Hessian (second derivative) of the scalar function $\Psi$ with respect to all independent variables, i.e.

    -\[
+<p>Compute the linearization of the residual vector around a chosen set of degree of freedom values. Underlying this is the computation of the Hessian (second derivative) of the scalar function <picture><source srcset=$\Psi$ with respect to all independent variables, i.e.

    +\[
   \frac{\partial\mathbf{r}(\mathbf{X})}{\partial\mathbf{X}}
     =
 \frac{\partial^{2}\Psi(\mathbf{X})}{\partial\mathbf{X}
 \otimes \partial\mathbf{X}} \Big\vert_{\mathbf{X}}
-\] +\]" src="form_908.png"/>

    The values at the evaluation point $\mathbf{X}$ are obtained by calling CellLevelBase::set_dof_values().

    Parameters
    @@ -1084,8 +1084,8 @@

    In the rare case that the number of independent or dependent variables has changed, this can also reconfigured by passing in the appropriate arguments to the function.

    Parameters
    - - + +
    [in]n_independent_variablesThe number of independent variables that will be used in the definition of the functions that it is desired to compute the sensitivities of. In the computation of $\mathbf{f}(\mathbf{X})$, this will be the number of inputs $\mathbf{X}$, i.e., the dimension of the domain space.
    [in]n_dependent_variablesThe number of scalar functions to be defined that will have a sensitivity to the given independent variables. In the computation of $\mathbf{f}(\mathbf{X})$, this will be the number of outputs $\mathbf{f}$, i.e., the dimension of the image space.
    [in]n_independent_variablesThe number of independent variables that will be used in the definition of the functions that it is desired to compute the sensitivities of. In the computation of $\mathbf{f}(\mathbf{X})$, this will be the number of inputs $\mathbf{X}$, i.e., the dimension of the domain space.
    [in]n_dependent_variablesThe number of scalar functions to be defined that will have a sensitivity to the given independent variables. In the computation of $\mathbf{f}(\mathbf{X})$, this will be the number of outputs $\mathbf{f}$, i.e., the dimension of the image space.
    [in]clear_registered_tapesA flag that indicates the that list of registered_tapes must be cleared. If set to true then the data structure that tracks which tapes have been recorded is cleared as well. It is then expected that any preexisting tapes be re-recorded.
    @@ -1594,7 +1594,7 @@
    -

    Set the actual value of the independent variable $X_{i}$.

    +

    Set the actual value of the independent variable $X_{i}$.

    Parameters
    @@ -1641,7 +1641,7 @@
    [in]indexThe index in the vector of independent variables.
    -

    Initialize an independent variable $X_{i}$ such that subsequent operations performed with it are tracked.

    +

    Initialize an independent variable $X_{i}$ such that subsequent operations performed with it are tracked.

    Note
    Care must be taken to mark each independent variable only once.
    The order in which the independent variables are marked defines the order of all future internal operations. They must be manipulated in the same order as that in which they are first marked. If not then, for example, ADOL-C won't throw an error, but rather it might complain nonsensically during later computations or produce garbage results.
    @@ -1719,7 +1719,7 @@
    -

    Initialize an independent variable $X_{i}$.

    +

    Initialize an independent variable $X_{i}$.

    Parameters
    @@ -1850,7 +1850,7 @@
    [out]outAn auto-differentiable number that is ready for use in standard computations. The operations that are performed with it are not recorded on the tape, and so should only be used when not in recording mode.
    -

    Register the definition of the index'th dependent variable $f(\mathbf{X})$.

    +

    Register the definition of the index'th dependent variable $f(\mathbf{X})$.

    Parameters
    @@ -2052,7 +2052,7 @@
    [in]indexThe index of the entry in the global list of dependent variables that this function belongs to.
    -

    The set of dependent variables $\mathbf{f}(\mathbf{X})$ of which the derivatives with respect to $\mathbf{X}$ will be computed.

    +

    The set of dependent variables $\mathbf{f}(\mathbf{X})$ of which the derivatives with respect to $\mathbf{X}$ will be computed.

    The gradients and Hessians of these dependent variables will be computed at the values $\mathbf{X}$ set with the set_sensitivity_value() function.

    Note
    These are stored as an ad_type so that we can use them to compute function values and directional derivatives in the case that tapeless numbers are used
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classDifferentiation_1_1AD_1_1HelperBase.html differs (JavaScript source, ASCII text, with very long lines (1204)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classDifferentiation_1_1AD_1_1HelperBase.html 2023-11-25 15:25:51.526726964 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classDifferentiation_1_1AD_1_1HelperBase.html 2023-11-25 15:25:51.526726964 +0100 @@ -195,7 +195,7 @@

    Detailed Description

    template<enum AD::NumberTypes ADNumberTypeCode, typename ScalarType = double>
    -class Differentiation::AD::HelperBase< ADNumberTypeCode, ScalarType >

    A base helper class that facilitates the evaluation of the derivative(s) of a number of user-defined dependent variables $\mathbf{f}(\mathbf{X})$ with respect to a set of independent variables $\mathbf{X}$, that is $\dfrac{d^{i} \mathbf{f}(\mathbf{X})}{d \mathbf{X}^{i}}$.

    +class Differentiation::AD::HelperBase< ADNumberTypeCode, ScalarType >

    A base helper class that facilitates the evaluation of the derivative(s) of a number of user-defined dependent variables $\mathbf{f}(\mathbf{X})$ with respect to a set of independent variables $\mathbf{X}$, that is $\dfrac{d^{i} \mathbf{f}(\mathbf{X})}{d \mathbf{X}^{i}}$.

    This class is templated on the floating point type scalar_type of the number that we'd like to differentiate, as well as an enumeration indicating the ADNumberTypeCode . The ADNumberTypeCode dictates which auto-differentiation library is to be used, and what the nature of the underlying auto-differentiable number is. Refer to the Automatic and symbolic differentiation module for more details in this regard.

    For all of the classes derived from this base class, there are two possible ways that the code in which they are used can be structured. The one approach is effectively a subset of the other, and which might be necessary to use depends on the nature of the chosen auto-differentiable number.

    When "tapeless" numbers are employed, the most simple code structure would be of the following form:

    @@ -345,8 +345,8 @@

    The constructor for the class.

    Parameters
    - - + +
    [in]n_independent_variablesThe number of independent variables that will be used in the definition of the functions that it is desired to compute the sensitivities of. In the computation of $\mathbf{f}(\mathbf{X})$, this will be the number of inputs $\mathbf{X}$, i.e., the dimension of the domain space.
    [in]n_dependent_variablesThe number of scalar functions to be defined that will have a sensitivity to the given independent variables. In the computation of $\mathbf{f}(\mathbf{X})$, this will be the number of outputs $\mathbf{f}$, i.e., the dimension of the image space.
    [in]n_independent_variablesThe number of independent variables that will be used in the definition of the functions that it is desired to compute the sensitivities of. In the computation of $\mathbf{f}(\mathbf{X})$, this will be the number of inputs $\mathbf{X}$, i.e., the dimension of the domain space.
    [in]n_dependent_variablesThe number of scalar functions to be defined that will have a sensitivity to the given independent variables. In the computation of $\mathbf{f}(\mathbf{X})$, this will be the number of outputs $\mathbf{f}$, i.e., the dimension of the image space.
    @@ -603,8 +603,8 @@

    In the rare case that the number of independent or dependent variables has changed, this can also reconfigured by passing in the appropriate arguments to the function.

    Parameters
    - - + +
    [in]n_independent_variablesThe number of independent variables that will be used in the definition of the functions that it is desired to compute the sensitivities of. In the computation of $\mathbf{f}(\mathbf{X})$, this will be the number of inputs $\mathbf{X}$, i.e., the dimension of the domain space.
    [in]n_dependent_variablesThe number of scalar functions to be defined that will have a sensitivity to the given independent variables. In the computation of $\mathbf{f}(\mathbf{X})$, this will be the number of outputs $\mathbf{f}$, i.e., the dimension of the image space.
    [in]n_independent_variablesThe number of independent variables that will be used in the definition of the functions that it is desired to compute the sensitivities of. In the computation of $\mathbf{f}(\mathbf{X})$, this will be the number of inputs $\mathbf{X}$, i.e., the dimension of the domain space.
    [in]n_dependent_variablesThe number of scalar functions to be defined that will have a sensitivity to the given independent variables. In the computation of $\mathbf{f}(\mathbf{X})$, this will be the number of outputs $\mathbf{f}$, i.e., the dimension of the image space.
    [in]clear_registered_tapesA flag that indicates the that list of registered_tapes must be cleared. If set to true then the data structure that tracks which tapes have been recorded is cleared as well. It is then expected that any preexisting tapes be re-recorded.
    @@ -1033,7 +1033,7 @@
    -

    Set the actual value of the independent variable $X_{i}$.

    +

    Set the actual value of the independent variable $X_{i}$.

    Parameters
    @@ -1080,7 +1080,7 @@
    [in]indexThe index in the vector of independent variables.
    -

    Initialize an independent variable $X_{i}$ such that subsequent operations performed with it are tracked.

    +

    Initialize an independent variable $X_{i}$ such that subsequent operations performed with it are tracked.

    Note
    Care must be taken to mark each independent variable only once.
    The order in which the independent variables are marked defines the order of all future internal operations. They must be manipulated in the same order as that in which they are first marked. If not then, for example, ADOL-C won't throw an error, but rather it might complain nonsensically during later computations or produce garbage results.
    @@ -1158,7 +1158,7 @@
    -

    Initialize an independent variable $X_{i}$.

    +

    Initialize an independent variable $X_{i}$.

    Parameters
    @@ -1289,7 +1289,7 @@
    [out]outAn auto-differentiable number that is ready for use in standard computations. The operations that are performed with it are not recorded on the tape, and so should only be used when not in recording mode.
    -

    Register the definition of the index'th dependent variable $f(\mathbf{X})$.

    +

    Register the definition of the index'th dependent variable $f(\mathbf{X})$.

    Parameters
    @@ -1491,7 +1491,7 @@
    [in]indexThe index of the entry in the global list of dependent variables that this function belongs to.
    -

    The set of dependent variables $\mathbf{f}(\mathbf{X})$ of which the derivatives with respect to $\mathbf{X}$ will be computed.

    +

    The set of dependent variables $\mathbf{f}(\mathbf{X})$ of which the derivatives with respect to $\mathbf{X}$ will be computed.

    The gradients and Hessians of these dependent variables will be computed at the values $\mathbf{X}$ set with the set_sensitivity_value() function.

    Note
    These are stored as an ad_type so that we can use them to compute function values and directional derivatives in the case that tapeless numbers are used
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classDifferentiation_1_1AD_1_1PointLevelFunctionsBase.html differs (JavaScript source, ASCII text, with very long lines (1256)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classDifferentiation_1_1AD_1_1PointLevelFunctionsBase.html 2023-11-25 15:25:51.550059820 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classDifferentiation_1_1AD_1_1PointLevelFunctionsBase.html 2023-11-25 15:25:51.550059820 +0100 @@ -299,8 +299,8 @@

    The constructor for the class.

    Parameters
    - - + +
    [in]n_independent_variablesThe number of independent variables that will be used in the definition of the functions that it is desired to compute the sensitivities of. In the computation of $\mathbf{f}(\mathbf{X})$, this will be the number of inputs $\mathbf{X}$, i.e., the dimension of the domain space.
    [in]n_dependent_variablesThe number of scalar functions to be defined that will have a sensitivity to the given independent variables. In the computation of $\mathbf{f}(\mathbf{X})$, this will be the number of outputs $\mathbf{f}$, i.e., the dimension of the image space.
    [in]n_independent_variablesThe number of independent variables that will be used in the definition of the functions that it is desired to compute the sensitivities of. In the computation of $\mathbf{f}(\mathbf{X})$, this will be the number of inputs $\mathbf{X}$, i.e., the dimension of the domain space.
    [in]n_dependent_variablesThe number of scalar functions to be defined that will have a sensitivity to the given independent variables. In the computation of $\mathbf{f}(\mathbf{X})$, this will be the number of outputs $\mathbf{f}$, i.e., the dimension of the image space.
    @@ -381,8 +381,8 @@

    In the rare case that the number of independent or dependent variables has changed, this can also be reconfigured by passing in the appropriate arguments to the function.

    Parameters
    - - + +
    [in]n_independent_variablesThe number of independent variables that will be used in the definition of the functions that it is desired to compute the sensitivities of. In the computation of $\mathbf{f}(\mathbf{X})$, this will be the number of inputs $\mathbf{X}$, i.e., the dimension of the domain space.
    [in]n_dependent_variablesThe number of scalar functions to be defined that will have a sensitivity to the given independent variables. In the computation of $\mathbf{f}(\mathbf{X})$, this will be the number of outputs $\mathbf{f}$, i.e., the dimension of the image space.
    [in]n_independent_variablesThe number of independent variables that will be used in the definition of the functions that it is desired to compute the sensitivities of. In the computation of $\mathbf{f}(\mathbf{X})$, this will be the number of inputs $\mathbf{X}$, i.e., the dimension of the domain space.
    [in]n_dependent_variablesThe number of scalar functions to be defined that will have a sensitivity to the given independent variables. In the computation of $\mathbf{f}(\mathbf{X})$, this will be the number of outputs $\mathbf{f}$, i.e., the dimension of the image space.
    [in]clear_registered_tapesA flag that indicates the that list of registered_tapes must be cleared. If set to true then the data structure that tracks which tapes have been recorded is cleared as well. It is then expected that any preexisting tapes be re-recorded.
    @@ -455,7 +455,7 @@
    -

    Register the subset of independent variables $\mathbf{A} \subset \mathbf{X}$.

    +

    Register the subset of independent variables $\mathbf{A} \subset \mathbf{X}$.

    Parameters
    @@ -576,7 +576,7 @@
    [in]valueA field that defines a number of independent variables. When considering taped AD numbers with branching functions, to avoid potential issues with branch switching it may be a good idea to choose these values close or equal to those that will be later evaluated and differentiated around.
    -

    Set the values for a subset of independent variables $\mathbf{A} \subset \mathbf{X}$.

    +

    Set the values for a subset of independent variables $\mathbf{A} \subset \mathbf{X}$.

    Parameters
    @@ -631,7 +631,7 @@
    [in]valueA field that defines the values of a number of independent variables.
    -

    Set the actual value of the independent variable $X_{i}$.

    +

    Set the actual value of the independent variable $X_{i}$.

    Parameters
    @@ -1414,7 +1414,7 @@
    [in]indexThe index in the vector of independent variables.
    -

    Set the actual value of the independent variable $X_{i}$.

    +

    Set the actual value of the independent variable $X_{i}$.

    Parameters
    @@ -1461,7 +1461,7 @@
    [in]indexThe index in the vector of independent variables.
    -

    Initialize an independent variable $X_{i}$ such that subsequent operations performed with it are tracked.

    +

    Initialize an independent variable $X_{i}$ such that subsequent operations performed with it are tracked.

    Note
    Care must be taken to mark each independent variable only once.
    The order in which the independent variables are marked defines the order of all future internal operations. They must be manipulated in the same order as that in which they are first marked. If not then, for example, ADOL-C won't throw an error, but rather it might complain nonsensically during later computations or produce garbage results.
    @@ -1539,7 +1539,7 @@
    -

    Initialize an independent variable $X_{i}$.

    +

    Initialize an independent variable $X_{i}$.

    Parameters
    @@ -1670,7 +1670,7 @@
    [out]outAn auto-differentiable number that is ready for use in standard computations. The operations that are performed with it are not recorded on the tape, and so should only be used when not in recording mode.
    -

    Register the definition of the index'th dependent variable $f(\mathbf{X})$.

    +

    Register the definition of the index'th dependent variable $f(\mathbf{X})$.

    Parameters
    @@ -1926,7 +1926,7 @@
    [in]indexThe index of the entry in the global list of dependent variables that this function belongs to.
    -

    The set of dependent variables $\mathbf{f}(\mathbf{X})$ of which the derivatives with respect to $\mathbf{X}$ will be computed.

    +

    The set of dependent variables $\mathbf{f}(\mathbf{X})$ of which the derivatives with respect to $\mathbf{X}$ will be computed.

    The gradients and Hessians of these dependent variables will be computed at the values $\mathbf{X}$ set with the set_sensitivity_value() function.

    Note
    These are stored as an ad_type so that we can use them to compute function values and directional derivatives in the case that tapeless numbers are used
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classDifferentiation_1_1AD_1_1ResidualLinearization.html differs (JavaScript source, ASCII text, with very long lines (1248)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classDifferentiation_1_1AD_1_1ResidualLinearization.html 2023-11-25 15:25:51.576725946 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classDifferentiation_1_1AD_1_1ResidualLinearization.html 2023-11-25 15:25:51.576725946 +0100 @@ -452,8 +452,8 @@

    The constructor for the class.

    Parameters
    - - + +
    [in]n_independent_variablesThe number of independent variables that will be used in the definition of the functions that it is desired to compute the sensitivities of. In the computation of $\mathbf{r}(\mathbf{X})$, this will be the number of inputs $\mathbf{X}$, i.e., the dimension of the domain space.
    [in]n_dependent_variablesThe number of scalar functions to be defined that will have a sensitivity to the given independent variables. In the computation of $\mathbf{r}(\mathbf{X})$, this will be the number of outputs $\mathbf{r}$, i.e., the dimension of the image space.
    [in]n_independent_variablesThe number of independent variables that will be used in the definition of the functions that it is desired to compute the sensitivities of. In the computation of $\mathbf{r}(\mathbf{X})$, this will be the number of inputs $\mathbf{X}$, i.e., the dimension of the domain space.
    [in]n_dependent_variablesThe number of scalar functions to be defined that will have a sensitivity to the given independent variables. In the computation of $\mathbf{r}(\mathbf{X})$, this will be the number of outputs $\mathbf{r}$, i.e., the dimension of the image space.
    @@ -508,7 +508,7 @@
    -

    Register the definition of the cell residual vector $\mathbf{r}(\mathbf{X})$.

    +

    Register the definition of the cell residual vector $\mathbf{r}(\mathbf{X})$.

    Parameters
    @@ -549,9 +549,9 @@
    [in]residualA vector of recorded functions that defines the residual. The components of this vector represents the dependent variables.

    Evaluation of the residual for a chosen set of degree of freedom values. This corresponds to the computation of the residual vector, i.e.

    -\[
+<picture><source srcset=\[
   \mathbf{r}(\mathbf{X}) \vert_{\mathbf{X}}
-\] +\]" src="form_910.png"/>

    The values at the evaluation point $\mathbf{X}$ are obtained by calling CellLevelBase::set_dof_values().

    Parameters
    @@ -592,10 +592,10 @@
    -

    Compute the linearization of the residual vector around a chosen set of degree of freedom values. Underlying this is the computation of the gradient (first derivative) of the residual vector $\mathbf{r}$ with respect to all independent variables, i.e.

    -\[
+<p>Compute the linearization of the residual vector around a chosen set of degree of freedom values. Underlying this is the computation of the gradient (first derivative) of the residual vector <picture><source srcset=$\mathbf{r}$ with respect to all independent variables, i.e.

    +\[
   \frac{\partial\mathbf{r}(\mathbf{X})}{\partial\mathbf{X}}
-\] +\]" src="form_904.png"/>

    The values at the evaluation point $\mathbf{X}$ are obtained by calling CellLevelBase::set_dof_values().

    Parameters
    @@ -1076,8 +1076,8 @@

    In the rare case that the number of independent or dependent variables has changed, this can also reconfigured by passing in the appropriate arguments to the function.

    Parameters
    - - + +
    [in]n_independent_variablesThe number of independent variables that will be used in the definition of the functions that it is desired to compute the sensitivities of. In the computation of $\mathbf{f}(\mathbf{X})$, this will be the number of inputs $\mathbf{X}$, i.e., the dimension of the domain space.
    [in]n_dependent_variablesThe number of scalar functions to be defined that will have a sensitivity to the given independent variables. In the computation of $\mathbf{f}(\mathbf{X})$, this will be the number of outputs $\mathbf{f}$, i.e., the dimension of the image space.
    [in]n_independent_variablesThe number of independent variables that will be used in the definition of the functions that it is desired to compute the sensitivities of. In the computation of $\mathbf{f}(\mathbf{X})$, this will be the number of inputs $\mathbf{X}$, i.e., the dimension of the domain space.
    [in]n_dependent_variablesThe number of scalar functions to be defined that will have a sensitivity to the given independent variables. In the computation of $\mathbf{f}(\mathbf{X})$, this will be the number of outputs $\mathbf{f}$, i.e., the dimension of the image space.
    [in]clear_registered_tapesA flag that indicates the that list of registered_tapes must be cleared. If set to true then the data structure that tracks which tapes have been recorded is cleared as well. It is then expected that any preexisting tapes be re-recorded.
    @@ -1586,7 +1586,7 @@
    -

    Set the actual value of the independent variable $X_{i}$.

    +

    Set the actual value of the independent variable $X_{i}$.

    Parameters
    @@ -1633,7 +1633,7 @@
    [in]indexThe index in the vector of independent variables.
    -

    Initialize an independent variable $X_{i}$ such that subsequent operations performed with it are tracked.

    +

    Initialize an independent variable $X_{i}$ such that subsequent operations performed with it are tracked.

    Note
    Care must be taken to mark each independent variable only once.
    The order in which the independent variables are marked defines the order of all future internal operations. They must be manipulated in the same order as that in which they are first marked. If not then, for example, ADOL-C won't throw an error, but rather it might complain nonsensically during later computations or produce garbage results.
    @@ -1711,7 +1711,7 @@
    -

    Initialize an independent variable $X_{i}$.

    +

    Initialize an independent variable $X_{i}$.

    Parameters
    @@ -1842,7 +1842,7 @@
    [out]outAn auto-differentiable number that is ready for use in standard computations. The operations that are performed with it are not recorded on the tape, and so should only be used when not in recording mode.
    -

    Register the definition of the index'th dependent variable $f(\mathbf{X})$.

    +

    Register the definition of the index'th dependent variable $f(\mathbf{X})$.

    Parameters
    @@ -2044,7 +2044,7 @@
    [in]indexThe index of the entry in the global list of dependent variables that this function belongs to.
    -

    The set of dependent variables $\mathbf{f}(\mathbf{X})$ of which the derivatives with respect to $\mathbf{X}$ will be computed.

    +

    The set of dependent variables $\mathbf{f}(\mathbf{X})$ of which the derivatives with respect to $\mathbf{X}$ will be computed.

    The gradients and Hessians of these dependent variables will be computed at the values $\mathbf{X}$ set with the set_sensitivity_value() function.

    Note
    These are stored as an ad_type so that we can use them to compute function values and directional derivatives in the case that tapeless numbers are used
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classDifferentiation_1_1AD_1_1ScalarFunction.html differs (JavaScript source, ASCII text, with very long lines (1220)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classDifferentiation_1_1AD_1_1ScalarFunction.html 2023-11-25 15:25:51.603392070 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classDifferentiation_1_1AD_1_1ScalarFunction.html 2023-11-25 15:25:51.603392070 +0100 @@ -457,7 +457,7 @@

    The constructor for the class.

    Parameters
    - +
    [in]n_independent_variablesThe number of independent variables that will be used in the definition of the functions that it is desired to compute the sensitivities of. In the computation of $\mathbf{f}(\mathbf{X})$, this will be the number of inputs $\mathbf{X}$, i.e., the dimension of the domain space.
    [in]n_independent_variablesThe number of independent variables that will be used in the definition of the functions that it is desired to compute the sensitivities of. In the computation of $\mathbf{f}(\mathbf{X})$, this will be the number of inputs $\mathbf{X}$, i.e., the dimension of the domain space.
    @@ -512,7 +512,7 @@
    -

    Register the definition of the scalar field $\Psi(\mathbf{X})$.

    +

    Register the definition of the scalar field $\Psi(\mathbf{X})$.

    Parameters
    @@ -540,7 +540,7 @@
    [in]funcThe recorded function that defines a dependent variable.
    -

    Compute the value of the scalar field $\Psi(\mathbf{X})$ using the tape as opposed to executing the source code.

    +

    Compute the value of the scalar field $\Psi(\mathbf{X})$ using the tape as opposed to executing the source code.

    Returns
    A scalar object with the value for the scalar field evaluated at the point defined by the independent variable values.

    Definition at line 1348 of file ad_helpers.cc.

    @@ -565,9 +565,9 @@

    Compute the gradient (first derivative) of the scalar field with respect to all independent variables, i.e.

    -\[
+<picture><source srcset=\[
   \frac{\partial\Psi(\mathbf{X})}{\partial\mathbf{X}}
-\] +\]" src="form_912.png"/>

    Parameters
    @@ -598,10 +598,10 @@

    Compute the Hessian (second derivative) of the scalar field with respect to all independent variables, i.e.

    -\[
+<picture><source srcset=\[
   \frac{\partial^{2}\Psi(\mathbf{X})}{\partial\mathbf{X} \otimes
 \partial\mathbf{X}}
-\] +\]" src="form_913.png"/>

    Parameters
    @@ -651,10 +651,10 @@
    -

    Extract the function gradient for a subset of independent variables $\mathbf{A} \subset \mathbf{X}$, i.e.

    -\[
+<p>Extract the function gradient for a subset of independent variables <picture><source srcset=$\mathbf{A} \subset \mathbf{X}$, i.e.

    +\[
   \frac{\partial\Psi(\mathbf{X})}{\partial\mathbf{A}}
-\] +\]" src="form_914.png"/>

    Parameters
    @@ -710,13 +710,13 @@
    -

    Extract the function Hessian for a subset of independent variables $\mathbf{A},\mathbf{B} \subset \mathbf{X}$, i.e.

    -\[
+<p>Extract the function Hessian for a subset of independent variables <picture><source srcset=$\mathbf{A},\mathbf{B} \subset \mathbf{X}$, i.e.

    +\[
   \frac{}{\partial\mathbf{B}} \left[
 \frac{\partial\Psi(\mathbf{X})}{\partial\mathbf{A}} \right] =
 \frac{\partial^{2}\Psi(\mathbf{X})}{\partial\mathbf{B} \otimes
 \partial\mathbf{A}}
-\] +\]" src="form_916.png"/>

    Parameters
    @@ -769,11 +769,11 @@
    -

    Extract the function Hessian for a subset of independent variables $\mathbf{A},\mathbf{B} \subset \mathbf{X}$, i.e.

    -\[
+<p>Extract the function Hessian for a subset of independent variables <picture><source srcset=$\mathbf{A},\mathbf{B} \subset \mathbf{X}$, i.e.

    +\[
   \frac{}{\partial\mathbf{B}} \left[
 \frac{\partial\Psi(\mathbf{X})}{\partial\mathbf{A}} \right]
-\] +\]" src="form_917.png"/>

    This function is a specialization of the above for rank-0 tensors (scalars). This corresponds to extracting a single entry of the Hessian matrix because both extractors imply selection of just a single row or column of the matrix.

    @@ -820,11 +820,11 @@
    -

    Extract the function Hessian for a subset of independent variables $\mathbf{A},\mathbf{B} \subset \mathbf{X}$, i.e.

    -\[
+<p>Extract the function Hessian for a subset of independent variables <picture><source srcset=$\mathbf{A},\mathbf{B} \subset \mathbf{X}$, i.e.

    +\[
   \frac{}{\partial\mathbf{B}} \left[
 \frac{\partial\Psi(\mathbf{X})}{\partial\mathbf{A}} \right]
-\] +\]" src="form_917.png"/>

    This function is a specialization of the above for rank-4 symmetric tensors.

    @@ -875,8 +875,8 @@

    In the rare case that the number of independent or dependent variables has changed, this can also be reconfigured by passing in the appropriate arguments to the function.

    Parameters
    - - + +
    [in]n_independent_variablesThe number of independent variables that will be used in the definition of the functions that it is desired to compute the sensitivities of. In the computation of $\mathbf{f}(\mathbf{X})$, this will be the number of inputs $\mathbf{X}$, i.e., the dimension of the domain space.
    [in]n_dependent_variablesThe number of scalar functions to be defined that will have a sensitivity to the given independent variables. In the computation of $\mathbf{f}(\mathbf{X})$, this will be the number of outputs $\mathbf{f}$, i.e., the dimension of the image space.
    [in]n_independent_variablesThe number of independent variables that will be used in the definition of the functions that it is desired to compute the sensitivities of. In the computation of $\mathbf{f}(\mathbf{X})$, this will be the number of inputs $\mathbf{X}$, i.e., the dimension of the domain space.
    [in]n_dependent_variablesThe number of scalar functions to be defined that will have a sensitivity to the given independent variables. In the computation of $\mathbf{f}(\mathbf{X})$, this will be the number of outputs $\mathbf{f}$, i.e., the dimension of the image space.
    [in]clear_registered_tapesA flag that indicates the that list of registered_tapes must be cleared. If set to true then the data structure that tracks which tapes have been recorded is cleared as well. It is then expected that any preexisting tapes be re-recorded.
    @@ -965,7 +965,7 @@
    -

    Register the subset of independent variables $\mathbf{A} \subset \mathbf{X}$.

    +

    Register the subset of independent variables $\mathbf{A} \subset \mathbf{X}$.

    Parameters
    @@ -1118,7 +1118,7 @@
    [in]valueA field that defines a number of independent variables. When considering taped AD numbers with branching functions, to avoid potential issues with branch switching it may be a good idea to choose these values close or equal to those that will be later evaluated and differentiated around.
    -

    Set the values for a subset of independent variables $\mathbf{A} \subset \mathbf{X}$.

    +

    Set the values for a subset of independent variables $\mathbf{A} \subset \mathbf{X}$.

    Parameters
    @@ -1173,7 +1173,7 @@
    [in]valueA field that defines the values of a number of independent variables.
    -

    Set the actual value of the independent variable $X_{i}$.

    +

    Set the actual value of the independent variable $X_{i}$.

    Parameters
    @@ -1221,7 +1221,7 @@
    [in]indexThe index in the vector of independent variables.
    -

    Set the actual value of the independent variable $X_{i}$.

    +

    Set the actual value of the independent variable $X_{i}$.

    Parameters
    @@ -2003,7 +2003,7 @@
    [in]indexThe index in the vector of independent variables.
    -

    Initialize an independent variable $X_{i}$ such that subsequent operations performed with it are tracked.

    +

    Initialize an independent variable $X_{i}$ such that subsequent operations performed with it are tracked.

    Note
    Care must be taken to mark each independent variable only once.
    The order in which the independent variables are marked defines the order of all future internal operations. They must be manipulated in the same order as that in which they are first marked. If not then, for example, ADOL-C won't throw an error, but rather it might complain nonsensically during later computations or produce garbage results.
    @@ -2081,7 +2081,7 @@
    -

    Initialize an independent variable $X_{i}$.

    +

    Initialize an independent variable $X_{i}$.

    Parameters
    @@ -2212,7 +2212,7 @@
    [out]outAn auto-differentiable number that is ready for use in standard computations. The operations that are performed with it are not recorded on the tape, and so should only be used when not in recording mode.
    -

    Register the definition of the index'th dependent variable $f(\mathbf{X})$.

    +

    Register the definition of the index'th dependent variable $f(\mathbf{X})$.

    Parameters
    @@ -2468,7 +2468,7 @@
    [in]indexThe index of the entry in the global list of dependent variables that this function belongs to.
    -

    The set of dependent variables $\mathbf{f}(\mathbf{X})$ of which the derivatives with respect to $\mathbf{X}$ will be computed.

    +

    The set of dependent variables $\mathbf{f}(\mathbf{X})$ of which the derivatives with respect to $\mathbf{X}$ will be computed.

    The gradients and Hessians of these dependent variables will be computed at the values $\mathbf{X}$ set with the set_sensitivity_value() function.

    Note
    These are stored as an ad_type so that we can use them to compute function values and directional derivatives in the case that tapeless numbers are used
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classDifferentiation_1_1AD_1_1VectorFunction.html differs (JavaScript source, ASCII text, with very long lines (1220)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classDifferentiation_1_1AD_1_1VectorFunction.html 2023-11-25 15:25:51.633391460 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classDifferentiation_1_1AD_1_1VectorFunction.html 2023-11-25 15:25:51.633391460 +0100 @@ -468,8 +468,8 @@

    The constructor for the class.

    Parameters
    - - + +
    [in]n_independent_variablesThe number of independent variables that will be used in the definition of the functions that it is desired to compute the sensitivities of. In the computation of $\mathbf{f}(\mathbf{X})$, this will be the number of inputs $\mathbf{X}$, i.e., the dimension of the domain space.
    [in]n_dependent_variablesThe number of scalar functions to be defined that will have a sensitivity to the given independent variables. In the computation of $\mathbf{f}(\mathbf{X})$, this will be the number of outputs $\mathbf{f}$, i.e., the dimension of the image space.
    [in]n_independent_variablesThe number of independent variables that will be used in the definition of the functions that it is desired to compute the sensitivities of. In the computation of $\mathbf{f}(\mathbf{X})$, this will be the number of inputs $\mathbf{X}$, i.e., the dimension of the domain space.
    [in]n_dependent_variablesThe number of scalar functions to be defined that will have a sensitivity to the given independent variables. In the computation of $\mathbf{f}(\mathbf{X})$, this will be the number of outputs $\mathbf{f}$, i.e., the dimension of the image space.
    @@ -524,7 +524,7 @@
    -

    Register the definition of the vector field $\boldsymbol{\Psi}(\mathbf{X})$.

    +

    Register the definition of the vector field $\boldsymbol{\Psi}(\mathbf{X})$.

    Parameters
    @@ -567,7 +567,7 @@
    [in]funcsA vector of recorded functions that defines the dependent variables.
    -

    Register the definition of the vector field $\hat{\mathbf{g}}(\mathbf{X}) \subset \boldsymbol{\Psi}(\mathbf{X})$ that may represent a subset of the dependent variables.

    +

    Register the definition of the vector field $\hat{\mathbf{g}}(\mathbf{X}) \subset \boldsymbol{\Psi}(\mathbf{X})$ that may represent a subset of the dependent variables.

    Parameters
    @@ -598,7 +598,7 @@
    [in]funcsThe recorded functions that define a set of dependent variables.
    -

    Compute the value of the vector field $\boldsymbol{\Psi}(\mathbf{X})$.

    +

    Compute the value of the vector field $\boldsymbol{\Psi}(\mathbf{X})$.

    Parameters
    @@ -628,10 +628,10 @@
    [out]valuesA Vector object with the value for each component of the vector field evaluated at the point defined by the independent variable values. The output values vector has a length corresponding to n_dependent_variables.

    Compute the Jacobian (first derivative) of the vector field with respect to all independent variables, i.e.

    -\[
+<picture><source srcset=\[
   \mathbf{J}(\boldsymbol{\Psi})
      = \frac{\partial\boldsymbol{\Psi}(\mathbf{X})}{\partial\mathbf{X}}
-\] +\]" src="form_920.png"/>

    Parameters
    @@ -681,7 +681,7 @@
    -

    Extract the set of functions' values for a subset of dependent variables $\mathbf{g} \subset \boldsymbol{\Psi}(\mathbf{X})$.

    +

    Extract the set of functions' values for a subset of dependent variables $\mathbf{g} \subset \boldsymbol{\Psi}(\mathbf{X})$.

    Parameters
    @@ -735,13 +735,13 @@
    [in]valuesA Vector object with the value for each component of the vector field evaluated at the point defined by the independent variable values.
    -

    Extract the Jacobian of the subset of dependent functions $\mathbf{g} \subset \boldsymbol{\Psi}(\mathbf{X})$ for a subset of independent variables $\mathbf{A} \subset \mathbf{X}$, i.e.

    -\[
+<p>Extract the Jacobian of the subset of dependent functions <picture><source srcset=$\mathbf{g} \subset \boldsymbol{\Psi}(\mathbf{X})$ for a subset of independent variables $\mathbf{A} \subset \mathbf{X}$, i.e.

    +\[
   \mathbf{J}(\mathbf{g})
      = \frac{\partial\mathbf{g}(\mathbf{X})}{\partial\mathbf{A}}
-\] +\]" src="form_922.png"/>

    -

    The first index of the Jacobian matrix $\mathbf{J}(\mathbf{g})$ relates to the dependent variables, while the second index relates to the independent variables.

    +

    The first index of the Jacobian matrix $\mathbf{J}(\mathbf{g})$ relates to the dependent variables, while the second index relates to the independent variables.

    Parameters
    @@ -793,11 +793,11 @@
    [in]jacobianThe Jacobian of the vector function with respect to all independent variables, i.e., that returned by compute_jacobian().
    -

    Extract the Jacobian of the subset of dependent functions $\mathbf{g} \subset \boldsymbol{\Psi}(\mathbf{X})$ for a subset of independent variables $\mathbf{A} \subset \mathbf{X}$, i.e.

    -\[
+<p>Extract the Jacobian of the subset of dependent functions <picture><source srcset=$\mathbf{g} \subset \boldsymbol{\Psi}(\mathbf{X})$ for a subset of independent variables $\mathbf{A} \subset \mathbf{X}$, i.e.

    +\[
   \mathbf{J}(\mathbf{g})
      = \frac{\partial\mathbf{g}(\mathbf{X})}{\partial\mathbf{A}}
-\] +\]" src="form_922.png"/>

    This function is a specialization of the above for rank-0 tensors (scalars). This corresponds to extracting a single entry of the Jacobian matrix because both extractors imply selection of just a single row or column of the matrix.

    @@ -844,11 +844,11 @@
    -

    Extract the Jacobian of the subset of dependent functions $\mathbf{g} \subset \boldsymbol{\Psi}(\mathbf{X})$ for a subset of independent variables $\mathbf{A} \subset \mathbf{X}$, i.e.

    -\[
+<p>Extract the Jacobian of the subset of dependent functions <picture><source srcset=$\mathbf{g} \subset \boldsymbol{\Psi}(\mathbf{X})$ for a subset of independent variables $\mathbf{A} \subset \mathbf{X}$, i.e.

    +\[
   \mathbf{J}(\mathbf{g})
      = \frac{\partial\mathbf{g}(\mathbf{X})}{\partial\mathbf{A}}
-\] +\]" src="form_922.png"/>

    This function is a specialization of the above for rank-4 symmetric tensors.

    @@ -899,8 +899,8 @@

    In the rare case that the number of independent or dependent variables has changed, this can also be reconfigured by passing in the appropriate arguments to the function.

    Parameters
    - - + +
    [in]n_independent_variablesThe number of independent variables that will be used in the definition of the functions that it is desired to compute the sensitivities of. In the computation of $\mathbf{f}(\mathbf{X})$, this will be the number of inputs $\mathbf{X}$, i.e., the dimension of the domain space.
    [in]n_dependent_variablesThe number of scalar functions to be defined that will have a sensitivity to the given independent variables. In the computation of $\mathbf{f}(\mathbf{X})$, this will be the number of outputs $\mathbf{f}$, i.e., the dimension of the image space.
    [in]n_independent_variablesThe number of independent variables that will be used in the definition of the functions that it is desired to compute the sensitivities of. In the computation of $\mathbf{f}(\mathbf{X})$, this will be the number of inputs $\mathbf{X}$, i.e., the dimension of the domain space.
    [in]n_dependent_variablesThe number of scalar functions to be defined that will have a sensitivity to the given independent variables. In the computation of $\mathbf{f}(\mathbf{X})$, this will be the number of outputs $\mathbf{f}$, i.e., the dimension of the image space.
    [in]clear_registered_tapesA flag that indicates the that list of registered_tapes must be cleared. If set to true then the data structure that tracks which tapes have been recorded is cleared as well. It is then expected that any preexisting tapes be re-recorded.
    @@ -989,7 +989,7 @@
    -

    Register the subset of independent variables $\mathbf{A} \subset \mathbf{X}$.

    +

    Register the subset of independent variables $\mathbf{A} \subset \mathbf{X}$.

    Parameters
    @@ -1142,7 +1142,7 @@
    [in]valueA field that defines a number of independent variables. When considering taped AD numbers with branching functions, to avoid potential issues with branch switching it may be a good idea to choose these values close or equal to those that will be later evaluated and differentiated around.
    -

    Set the values for a subset of independent variables $\mathbf{A} \subset \mathbf{X}$.

    +

    Set the values for a subset of independent variables $\mathbf{A} \subset \mathbf{X}$.

    Parameters
    @@ -1197,7 +1197,7 @@
    [in]valueA field that defines the values of a number of independent variables.
    -

    Set the actual value of the independent variable $X_{i}$.

    +

    Set the actual value of the independent variable $X_{i}$.

    Parameters
    @@ -1245,7 +1245,7 @@
    [in]indexThe index in the vector of independent variables.
    -

    Set the actual value of the independent variable $X_{i}$.

    +

    Set the actual value of the independent variable $X_{i}$.

    Parameters
    @@ -2027,7 +2027,7 @@
    [in]indexThe index in the vector of independent variables.
    -

    Initialize an independent variable $X_{i}$ such that subsequent operations performed with it are tracked.

    +

    Initialize an independent variable $X_{i}$ such that subsequent operations performed with it are tracked.

    Note
    Care must be taken to mark each independent variable only once.
    The order in which the independent variables are marked defines the order of all future internal operations. They must be manipulated in the same order as that in which they are first marked. If not then, for example, ADOL-C won't throw an error, but rather it might complain nonsensically during later computations or produce garbage results.
    @@ -2105,7 +2105,7 @@
    -

    Initialize an independent variable $X_{i}$.

    +

    Initialize an independent variable $X_{i}$.

    Parameters
    @@ -2236,7 +2236,7 @@
    [out]outAn auto-differentiable number that is ready for use in standard computations. The operations that are performed with it are not recorded on the tape, and so should only be used when not in recording mode.
    -

    Register the definition of the index'th dependent variable $f(\mathbf{X})$.

    +

    Register the definition of the index'th dependent variable $f(\mathbf{X})$.

    Parameters
    @@ -2492,7 +2492,7 @@
    [in]indexThe index of the entry in the global list of dependent variables that this function belongs to.
    -

    The set of dependent variables $\mathbf{f}(\mathbf{X})$ of which the derivatives with respect to $\mathbf{X}$ will be computed.

    +

    The set of dependent variables $\mathbf{f}(\mathbf{X})$ of which the derivatives with respect to $\mathbf{X}$ will be computed.

    The gradients and Hessians of these dependent variables will be computed at the values $\mathbf{X}$ set with the set_sensitivity_value() function.

    Note
    These are stored as an ad_type so that we can use them to compute function values and directional derivatives in the case that tapeless numbers are used
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classDiscreteTime.html differs (JavaScript source, ASCII text, with very long lines (2430)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classDiscreteTime.html 2023-11-25 15:25:51.650057788 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classDiscreteTime.html 2023-11-25 15:25:51.650057788 +0100 @@ -183,7 +183,7 @@

    Since time is marched forward in a discrete manner in our simulations, we need to discuss how we increment time. During time stepping we enter two separate alternating regimes in every step.

    @@ -489,7 +489,7 @@

    -

    Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the chartdim-dimensional to the spacedim-dimensional space. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

    +

    Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the chartdim-dimensional to the spacedim-dimensional space. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

    This function is used in the computations required by the get_tangent_vector() function. Since not all users of the Manifold class interface will require calling that function, the current function is implemented but will trigger an exception whenever called. This allows derived classes to avoid implementing the push_forward_gradient function if this functionality is not needed in the user program.

    Refer to the general documentation of this class for more information.

    @@ -521,7 +521,7 @@

    Return the periodicity associated with the submanifold.

    -

    For $\text{dim}=2$ and $\text{spacedim}=2$, the first coordinate is non-periodic, while the second coordinate has a periodicity of $2\pi$.

    +

    For $\text{dim}=2$ and $\text{spacedim}=2$, the first coordinate is non-periodic, while the second coordinate has a periodicity of $2\pi$.

    Definition at line 1244 of file manifold_lib.cc.

    @@ -777,7 +777,7 @@
    -

    Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the chartdim-dimensional to the spacedim-dimensional space. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

    +

    Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the chartdim-dimensional to the spacedim-dimensional space. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

    This function is used in the computations required by the get_tangent_vector() function. Since not all users of the Manifold class interface will require calling that function, the current function is implemented but will trigger an exception whenever called. This allows derived classes to avoid implementing the push_forward_gradient function if this functionality is not needed in the user program.

    Refer to the general documentation of this class for more information.

    @@ -820,24 +820,24 @@
    -

    Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

    -

    For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

    -\begin{align*}
+<p>Return a vector that, at <picture><source srcset=$\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

    +

    For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

    +\begin{align*}
   \zeta(t) &= \xi_1 +  t (\xi_2-\xi_1)
  \\          &= F^{-1}(\mathbf x_1) + t\left[F^{-1}(\mathbf x_2)
                                             -F^{-1}(\mathbf x_1)\right]
-\end{align*} +\end{align*}" src="form_1440.png"/>

    In image space, i.e., in the space in which we operate, this leads to the curve

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   \mathbf s(t) &= F(\zeta(t))
  \\          &= F(\xi_1 +  t (\xi_2-\xi_1))
  \\          &= F\left(F^{-1}(\mathbf x_1) + t\left[F^{-1}(\mathbf x_2)
                                     -F^{-1}(\mathbf x_1)\right]\right).
-\end{align*} +\end{align*}" src="form_1441.png"/>

    -

    What the current function is supposed to return is $\mathbf s'(0)$. By the chain rule, this is equal to

    -\begin{align*}
+<p> What the current function is supposed to return is <picture><source srcset=$\mathbf s'(0)$. By the chain rule, this is equal to

    +\begin{align*}
   \mathbf s'(0) &=
     \frac{d}{dt}\left. F\left(F^{-1}(\mathbf x_1)
                        + t\left[F^{-1}(\mathbf x_2)
@@ -846,11 +846,11 @@
 \\ &= \nabla_\xi F\left(F^{-1}(\mathbf x_1)\right)
                    \left[F^{-1}(\mathbf x_2)
                                 -F^{-1}(\mathbf x_1)\right].
-\end{align*} +\end{align*}" src="form_1442.png"/>

    This formula may then have to be slightly modified by considering any periodicity that was assumed in the call to the constructor.

    -

    Thus, the computation of tangent vectors also requires the implementation of derivatives $\nabla_\xi F(\xi)$ of the push-forward mapping. Here, $F^{-1}(\mathbf x_2)-F^{-1}(\mathbf x_1)$ is a chartdim-dimensional vector, and $\nabla_\xi F\left(F^{-1}(\mathbf
-x_1)\right) = \nabla_\xi F\left(\xi_1\right)$ is a spacedim-times-chartdim-dimensional matrix. Consequently, and as desired, the operation results in a spacedim-dimensional vector.

    +

    Thus, the computation of tangent vectors also requires the implementation of derivatives $\nabla_\xi F(\xi)$ of the push-forward mapping. Here, $F^{-1}(\mathbf x_2)-F^{-1}(\mathbf x_1)$ is a chartdim-dimensional vector, and $\nabla_\xi F\left(F^{-1}(\mathbf
+x_1)\right) = \nabla_\xi F\left(\xi_1\right)$ is a spacedim-times-chartdim-dimensional matrix. Consequently, and as desired, the operation results in a spacedim-dimensional vector.

    Parameters
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEEvaluation.html differs (JavaScript source, ASCII text, with very long lines (1236)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEEvaluation.html 2023-11-25 15:25:51.856720248 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEEvaluation.html 2023-11-25 15:25:51.856720248 +0100 @@ -453,7 +453,7 @@
    unsigned int cell_index
    Definition: grid_tools.cc:1196

    Likewise, a gradient of the finite element solution represented by vector can be interpolated to the quadrature points by fe_eval.get_gradient(q). The combination of read_dof_values(), evaluate() and get_value() is similar to what FEValues::get_function_values or FEValues::get_function_gradients does, but it is in general much faster because it makes use of the tensor product, see the description of the evaluation routines below, and can do this operation for several cells at once through vectorization.

    -

    The second class of tasks done by FEEvaluation are integration tasks for right hand sides. In finite element computations, these typically consist of multiplying a quantity on quadrature points (a function value, or a field interpolated by the finite element space itself) by a set of test functions and integrating over the cell through summation of the values in each quadrature point, multiplied by the quadrature weight and the Jacobian determinant of the transformation. If a generic Function object is given and we want to compute $v_i = \int_\Omega \varphi_i f dx$, this is done by the following cell-wise integration:

    +

    The second class of tasks done by FEEvaluation are integration tasks for right hand sides. In finite element computations, these typically consist of multiplying a quantity on quadrature points (a function value, or a field interpolated by the finite element space itself) by a set of test functions and integrating over the cell through summation of the values in each quadrature point, multiplied by the quadrature weight and the Jacobian determinant of the transformation. If a generic Function object is given and we want to compute $v_i = \int_\Omega \varphi_i f dx$, this is done by the following cell-wise integration:

    Function<dim> &function = ...;
    for (unsigned int cell_index = cell_range.first;
    @@ -1072,7 +1072,7 @@
    x1The first point that describes the geodesic, and the one at which the "direction" is to be evaluated.
    -

    Constructor for the reduced functionality. This constructor is equivalent to the other one except that it makes the object use a $Q_1$ mapping (i.e., an object of type MappingQ(1)) implicitly.

    +

    Constructor for the reduced functionality. This constructor is equivalent to the other one except that it makes the object use a $Q_1$ mapping (i.e., an object of type MappingQ(1)) implicitly.

    @@ -2142,8 +2142,8 @@
    -

    Return the derivative of a finite element function at quadrature point number q_point after a call to FEEvaluation::evaluate(EvaluationFlags::gradients) the direction normal to the face: $\boldsymbol \nabla u(\mathbf x_q) \cdot \mathbf n(\mathbf
-x_q)$

    +

    Return the derivative of a finite element function at quadrature point number q_point after a call to FEEvaluation::evaluate(EvaluationFlags::gradients) the direction normal to the face: $\boldsymbol \nabla u(\mathbf x_q) \cdot \mathbf n(\mathbf
+x_q)$

    This call is equivalent to calling get_gradient() * normal_vector() but will use a more efficient internal representation of data.

    Note
    The derived class FEEvaluationAccess overloads this operation with specializations for the scalar case (n_components == 1) and for the vector-valued case (n_components == dim).
    @@ -2437,7 +2437,7 @@
    -

    Return the curl of the vector field, $\nabla \times v$ after a call to evaluate(EvaluationFlags::gradients).

    +

    Return the curl of the vector field, $\nabla \times v$ after a call to evaluate(EvaluationFlags::gradients).

    Note
    Only available for the vector-valued case (n_components == dim).
    @@ -2956,8 +2956,8 @@
    -

    Return the inverse and transposed version $J^{-\mathrm T}$ of the Jacobian of the mapping between the unit to the real cell defined as $J_{ij} = d x_i / d\hat x_j$. The $(i,j)$ entry of the returned tensor contains $d\hat x_j/dx_i$, i.e., columns refer to reference space coordinates and rows to real cell coordinates. Thus, the returned tensor represents a covariant transformation, which is used in the FEEvaluationBase::get_gradient() function to transform the unit cell gradients to gradients on the real cell by a multiplication $J^{-\mathrm
-T} \hat{\nabla} u_h$.

    +

    Return the inverse and transposed version $J^{-\mathrm T}$ of the Jacobian of the mapping between the unit to the real cell defined as $J_{ij} = d x_i / d\hat x_j$. The $(i,j)$ entry of the returned tensor contains $d\hat x_j/dx_i$, i.e., columns refer to reference space coordinates and rows to real cell coordinates. Thus, the returned tensor represents a covariant transformation, which is used in the FEEvaluationBase::get_gradient() function to transform the unit cell gradients to gradients on the real cell by a multiplication $J^{-\mathrm
+T} \hat{\nabla} u_h$.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEEvaluationAccess.html differs (JavaScript source, ASCII text, with very long lines (1677)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEEvaluationAccess.html 2023-11-25 15:25:51.900052697 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEEvaluationAccess.html 2023-11-25 15:25:51.900052697 +0100 @@ -1223,8 +1223,8 @@
    -

    Return the derivative of a finite element function at quadrature point number q_point after a call to FEEvaluation::evaluate(EvaluationFlags::gradients) the direction normal to the face: $\boldsymbol \nabla u(\mathbf x_q) \cdot \mathbf n(\mathbf
-x_q)$

    +

    Return the derivative of a finite element function at quadrature point number q_point after a call to FEEvaluation::evaluate(EvaluationFlags::gradients) the direction normal to the face: $\boldsymbol \nabla u(\mathbf x_q) \cdot \mathbf n(\mathbf
+x_q)$

    This call is equivalent to calling get_gradient() * normal_vector() but will use a more efficient internal representation of data.

    Note
    The derived class FEEvaluationAccess overloads this operation with specializations for the scalar case (n_components == 1) and for the vector-valued case (n_components == dim).
    @@ -1518,7 +1518,7 @@
    -

    Return the curl of the vector field, $\nabla \times v$ after a call to evaluate(EvaluationFlags::gradients).

    +

    Return the curl of the vector field, $\nabla \times v$ after a call to evaluate(EvaluationFlags::gradients).

    Note
    Only available for the vector-valued case (n_components == dim).
    @@ -2037,8 +2037,8 @@
    -

    Return the inverse and transposed version $J^{-\mathrm T}$ of the Jacobian of the mapping between the unit to the real cell defined as $J_{ij} = d x_i / d\hat x_j$. The $(i,j)$ entry of the returned tensor contains $d\hat x_j/dx_i$, i.e., columns refer to reference space coordinates and rows to real cell coordinates. Thus, the returned tensor represents a covariant transformation, which is used in the FEEvaluationBase::get_gradient() function to transform the unit cell gradients to gradients on the real cell by a multiplication $J^{-\mathrm
-T} \hat{\nabla} u_h$.

    +

    Return the inverse and transposed version $J^{-\mathrm T}$ of the Jacobian of the mapping between the unit to the real cell defined as $J_{ij} = d x_i / d\hat x_j$. The $(i,j)$ entry of the returned tensor contains $d\hat x_j/dx_i$, i.e., columns refer to reference space coordinates and rows to real cell coordinates. Thus, the returned tensor represents a covariant transformation, which is used in the FEEvaluationBase::get_gradient() function to transform the unit cell gradients to gradients on the real cell by a multiplication $J^{-\mathrm
+T} \hat{\nabla} u_h$.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEEvaluationAccess_3_011_00_011_00_01Number_00_01is__face_00_01VectorizedArrayType_01_4.html differs (JavaScript source, ASCII text, with very long lines (1744)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEEvaluationAccess_3_011_00_011_00_01Number_00_01is__face_00_01VectorizedArrayType_01_4.html 2023-11-25 15:25:51.940051883 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEEvaluationAccess_3_011_00_011_00_01Number_00_01is__face_00_01VectorizedArrayType_01_4.html 2023-11-25 15:25:51.940051883 +0100 @@ -973,8 +973,8 @@

    -

    Return the derivative of a finite element function at quadrature point number q_point after a call to FEEvaluation::evaluate(EvaluationFlags::gradients) the direction normal to the face: $\boldsymbol \nabla u(\mathbf x_q) \cdot \mathbf n(\mathbf
-x_q)$

    +

    Return the derivative of a finite element function at quadrature point number q_point after a call to FEEvaluation::evaluate(EvaluationFlags::gradients) the direction normal to the face: $\boldsymbol \nabla u(\mathbf x_q) \cdot \mathbf n(\mathbf
+x_q)$

    This call is equivalent to calling get_gradient() * normal_vector() but will use a more efficient internal representation of data.

    Note
    The derived class FEEvaluationAccess overloads this operation with specializations for the scalar case (n_components == 1) and for the vector-valued case (n_components == dim).
    @@ -1753,7 +1753,7 @@
    -

    Return the curl of the vector field, $\nabla \times v$ after a call to evaluate(EvaluationFlags::gradients).

    +

    Return the curl of the vector field, $\nabla \times v$ after a call to evaluate(EvaluationFlags::gradients).

    Note
    Only available for the vector-valued case (n_components == dim).
    @@ -2221,8 +2221,8 @@
    -

    Return the inverse and transposed version $J^{-\mathrm T}$ of the Jacobian of the mapping between the unit to the real cell defined as $J_{ij} = d x_i / d\hat x_j$. The $(i,j)$ entry of the returned tensor contains $d\hat x_j/dx_i$, i.e., columns refer to reference space coordinates and rows to real cell coordinates. Thus, the returned tensor represents a covariant transformation, which is used in the FEEvaluationBase::get_gradient() function to transform the unit cell gradients to gradients on the real cell by a multiplication $J^{-\mathrm
-T} \hat{\nabla} u_h$.

    +

    Return the inverse and transposed version $J^{-\mathrm T}$ of the Jacobian of the mapping between the unit to the real cell defined as $J_{ij} = d x_i / d\hat x_j$. The $(i,j)$ entry of the returned tensor contains $d\hat x_j/dx_i$, i.e., columns refer to reference space coordinates and rows to real cell coordinates. Thus, the returned tensor represents a covariant transformation, which is used in the FEEvaluationBase::get_gradient() function to transform the unit cell gradients to gradients on the real cell by a multiplication $J^{-\mathrm
+T} \hat{\nabla} u_h$.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEEvaluationAccess_3_01dim_00_011_00_01Number_00_01is__face_00_01VectorizedArrayType_01_4.html differs (JavaScript source, ASCII text, with very long lines (1748)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEEvaluationAccess_3_01dim_00_011_00_01Number_00_01is__face_00_01VectorizedArrayType_01_4.html 2023-11-25 15:25:51.980051072 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEEvaluationAccess_3_01dim_00_011_00_01Number_00_01is__face_00_01VectorizedArrayType_01_4.html 2023-11-25 15:25:51.980051072 +0100 @@ -946,8 +946,8 @@

    -

    Return the derivative of a finite element function at quadrature point number q_point after a call to FEEvaluation::evaluate(EvaluationFlags::gradients) the direction normal to the face: $\boldsymbol \nabla u(\mathbf x_q) \cdot \mathbf n(\mathbf
-x_q)$

    +

    Return the derivative of a finite element function at quadrature point number q_point after a call to FEEvaluation::evaluate(EvaluationFlags::gradients) the direction normal to the face: $\boldsymbol \nabla u(\mathbf x_q) \cdot \mathbf n(\mathbf
+x_q)$

    This call is equivalent to calling get_gradient() * normal_vector() but will use a more efficient internal representation of data.

    Note
    The derived class FEEvaluationAccess overloads this operation with specializations for the scalar case (n_components == 1) and for the vector-valued case (n_components == dim).
    @@ -1653,7 +1653,7 @@
    -

    Return the curl of the vector field, $\nabla \times v$ after a call to evaluate(EvaluationFlags::gradients).

    +

    Return the curl of the vector field, $\nabla \times v$ after a call to evaluate(EvaluationFlags::gradients).

    Note
    Only available for the vector-valued case (n_components == dim).
    @@ -2121,8 +2121,8 @@
    -

    Return the inverse and transposed version $J^{-\mathrm T}$ of the Jacobian of the mapping between the unit to the real cell defined as $J_{ij} = d x_i / d\hat x_j$. The $(i,j)$ entry of the returned tensor contains $d\hat x_j/dx_i$, i.e., columns refer to reference space coordinates and rows to real cell coordinates. Thus, the returned tensor represents a covariant transformation, which is used in the FEEvaluationBase::get_gradient() function to transform the unit cell gradients to gradients on the real cell by a multiplication $J^{-\mathrm
-T} \hat{\nabla} u_h$.

    +

    Return the inverse and transposed version $J^{-\mathrm T}$ of the Jacobian of the mapping between the unit to the real cell defined as $J_{ij} = d x_i / d\hat x_j$. The $(i,j)$ entry of the returned tensor contains $d\hat x_j/dx_i$, i.e., columns refer to reference space coordinates and rows to real cell coordinates. Thus, the returned tensor represents a covariant transformation, which is used in the FEEvaluationBase::get_gradient() function to transform the unit cell gradients to gradients on the real cell by a multiplication $J^{-\mathrm
+T} \hat{\nabla} u_h$.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEEvaluationAccess_3_01dim_00_01dim_00_01Number_00_01is__face_00_01VectorizedArrayType_01_4.html differs (JavaScript source, ASCII text, with very long lines (1754)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEEvaluationAccess_3_01dim_00_01dim_00_01Number_00_01is__face_00_01VectorizedArrayType_01_4.html 2023-11-25 15:25:52.016716990 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEEvaluationAccess_3_01dim_00_01dim_00_01Number_00_01is__face_00_01VectorizedArrayType_01_4.html 2023-11-25 15:25:52.016716990 +0100 @@ -865,7 +865,7 @@
    -

    Return the curl of the vector field, $\nabla \times v$ after a call to evaluate(EvaluationFlags::gradients).

    +

    Return the curl of the vector field, $\nabla \times v$ after a call to evaluate(EvaluationFlags::gradients).

    @@ -1463,8 +1463,8 @@
    -

    Return the derivative of a finite element function at quadrature point number q_point after a call to FEEvaluation::evaluate(EvaluationFlags::gradients) the direction normal to the face: $\boldsymbol \nabla u(\mathbf x_q) \cdot \mathbf n(\mathbf
-x_q)$

    +

    Return the derivative of a finite element function at quadrature point number q_point after a call to FEEvaluation::evaluate(EvaluationFlags::gradients) the direction normal to the face: $\boldsymbol \nabla u(\mathbf x_q) \cdot \mathbf n(\mathbf
+x_q)$

    This call is equivalent to calling get_gradient() * normal_vector() but will use a more efficient internal representation of data.

    Note
    The derived class FEEvaluationAccess overloads this operation with specializations for the scalar case (n_components == 1) and for the vector-valued case (n_components == dim).
    @@ -1986,8 +1986,8 @@
    -

    Return the inverse and transposed version $J^{-\mathrm T}$ of the Jacobian of the mapping between the unit to the real cell defined as $J_{ij} = d x_i / d\hat x_j$. The $(i,j)$ entry of the returned tensor contains $d\hat x_j/dx_i$, i.e., columns refer to reference space coordinates and rows to real cell coordinates. Thus, the returned tensor represents a covariant transformation, which is used in the FEEvaluationBase::get_gradient() function to transform the unit cell gradients to gradients on the real cell by a multiplication $J^{-\mathrm
-T} \hat{\nabla} u_h$.

    +

    Return the inverse and transposed version $J^{-\mathrm T}$ of the Jacobian of the mapping between the unit to the real cell defined as $J_{ij} = d x_i / d\hat x_j$. The $(i,j)$ entry of the returned tensor contains $d\hat x_j/dx_i$, i.e., columns refer to reference space coordinates and rows to real cell coordinates. Thus, the returned tensor represents a covariant transformation, which is used in the FEEvaluationBase::get_gradient() function to transform the unit cell gradients to gradients on the real cell by a multiplication $J^{-\mathrm
+T} \hat{\nabla} u_h$.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEEvaluationBase.html differs (JavaScript source, ASCII text, with very long lines (1401)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEEvaluationBase.html 2023-11-25 15:25:52.056716176 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEEvaluationBase.html 2023-11-25 15:25:52.056716176 +0100 @@ -1123,8 +1123,8 @@
    -

    Return the derivative of a finite element function at quadrature point number q_point after a call to FEEvaluation::evaluate(EvaluationFlags::gradients) the direction normal to the face: $\boldsymbol \nabla u(\mathbf x_q) \cdot \mathbf n(\mathbf
-x_q)$

    +

    Return the derivative of a finite element function at quadrature point number q_point after a call to FEEvaluation::evaluate(EvaluationFlags::gradients) the direction normal to the face: $\boldsymbol \nabla u(\mathbf x_q) \cdot \mathbf n(\mathbf
+x_q)$

    This call is equivalent to calling get_gradient() * normal_vector() but will use a more efficient internal representation of data.

    Note
    The derived class FEEvaluationAccess overloads this operation with specializations for the scalar case (n_components == 1) and for the vector-valued case (n_components == dim).
    @@ -1346,7 +1346,7 @@
    -

    Return the curl of the vector field, $\nabla \times v$ after a call to evaluate(EvaluationFlags::gradients).

    +

    Return the curl of the vector field, $\nabla \times v$ after a call to evaluate(EvaluationFlags::gradients).

    Note
    Only available for the vector-valued case (n_components == dim).
    @@ -1853,8 +1853,8 @@
    -

    Return the inverse and transposed version $J^{-\mathrm T}$ of the Jacobian of the mapping between the unit to the real cell defined as $J_{ij} = d x_i / d\hat x_j$. The $(i,j)$ entry of the returned tensor contains $d\hat x_j/dx_i$, i.e., columns refer to reference space coordinates and rows to real cell coordinates. Thus, the returned tensor represents a covariant transformation, which is used in the FEEvaluationBase::get_gradient() function to transform the unit cell gradients to gradients on the real cell by a multiplication $J^{-\mathrm
-T} \hat{\nabla} u_h$.

    +

    Return the inverse and transposed version $J^{-\mathrm T}$ of the Jacobian of the mapping between the unit to the real cell defined as $J_{ij} = d x_i / d\hat x_j$. The $(i,j)$ entry of the returned tensor contains $d\hat x_j/dx_i$, i.e., columns refer to reference space coordinates and rows to real cell coordinates. Thus, the returned tensor represents a covariant transformation, which is used in the FEEvaluationBase::get_gradient() function to transform the unit cell gradients to gradients on the real cell by a multiplication $J^{-\mathrm
+T} \hat{\nabla} u_h$.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEEvaluationData.html differs (JavaScript source, ASCII text, with very long lines (811)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEEvaluationData.html 2023-11-25 15:25:52.090048833 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEEvaluationData.html 2023-11-25 15:25:52.090048833 +0100 @@ -796,8 +796,8 @@
    -

    Return the inverse and transposed version $J^{-\mathrm T}$ of the Jacobian of the mapping between the unit to the real cell defined as $J_{ij} = d x_i / d\hat x_j$. The $(i,j)$ entry of the returned tensor contains $d\hat x_j/dx_i$, i.e., columns refer to reference space coordinates and rows to real cell coordinates. Thus, the returned tensor represents a covariant transformation, which is used in the FEEvaluationBase::get_gradient() function to transform the unit cell gradients to gradients on the real cell by a multiplication $J^{-\mathrm
-T} \hat{\nabla} u_h$.

    +

    Return the inverse and transposed version $J^{-\mathrm T}$ of the Jacobian of the mapping between the unit to the real cell defined as $J_{ij} = d x_i / d\hat x_j$. The $(i,j)$ entry of the returned tensor contains $d\hat x_j/dx_i$, i.e., columns refer to reference space coordinates and rows to real cell coordinates. Thus, the returned tensor represents a covariant transformation, which is used in the FEEvaluationBase::get_gradient() function to transform the unit cell gradients to gradients on the real cell by a multiplication $J^{-\mathrm
+T} \hat{\nabla} u_h$.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEFaceEvaluation.html differs (JavaScript source, ASCII text, with very long lines (1540)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEFaceEvaluation.html 2023-11-25 15:25:52.136714550 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEFaceEvaluation.html 2023-11-25 15:25:52.136714550 +0100 @@ -1744,8 +1744,8 @@
    -

    Return the derivative of a finite element function at quadrature point number q_point after a call to FEEvaluation::evaluate(EvaluationFlags::gradients) the direction normal to the face: $\boldsymbol \nabla u(\mathbf x_q) \cdot \mathbf n(\mathbf
-x_q)$

    +

    Return the derivative of a finite element function at quadrature point number q_point after a call to FEEvaluation::evaluate(EvaluationFlags::gradients) the direction normal to the face: $\boldsymbol \nabla u(\mathbf x_q) \cdot \mathbf n(\mathbf
+x_q)$

    This call is equivalent to calling get_gradient() * normal_vector() but will use a more efficient internal representation of data.

    Note
    The derived class FEEvaluationAccess overloads this operation with specializations for the scalar case (n_components == 1) and for the vector-valued case (n_components == dim).
    @@ -2039,7 +2039,7 @@
    -

    Return the curl of the vector field, $\nabla \times v$ after a call to evaluate(EvaluationFlags::gradients).

    +

    Return the curl of the vector field, $\nabla \times v$ after a call to evaluate(EvaluationFlags::gradients).

    Note
    Only available for the vector-valued case (n_components == dim).
    @@ -2558,8 +2558,8 @@
    -

    Return the inverse and transposed version $J^{-\mathrm T}$ of the Jacobian of the mapping between the unit to the real cell defined as $J_{ij} = d x_i / d\hat x_j$. The $(i,j)$ entry of the returned tensor contains $d\hat x_j/dx_i$, i.e., columns refer to reference space coordinates and rows to real cell coordinates. Thus, the returned tensor represents a covariant transformation, which is used in the FEEvaluationBase::get_gradient() function to transform the unit cell gradients to gradients on the real cell by a multiplication $J^{-\mathrm
-T} \hat{\nabla} u_h$.

    +

    Return the inverse and transposed version $J^{-\mathrm T}$ of the Jacobian of the mapping between the unit to the real cell defined as $J_{ij} = d x_i / d\hat x_j$. The $(i,j)$ entry of the returned tensor contains $d\hat x_j/dx_i$, i.e., columns refer to reference space coordinates and rows to real cell coordinates. Thus, the returned tensor represents a covariant transformation, which is used in the FEEvaluationBase::get_gradient() function to transform the unit cell gradients to gradients on the real cell by a multiplication $J^{-\mathrm
+T} \hat{\nabla} u_h$.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEFaceValues.html differs (JavaScript source, ASCII text, with very long lines (878)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEFaceValues.html 2023-11-25 15:25:52.180047000 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEFaceValues.html 2023-11-25 15:25:52.183380267 +0100 @@ -551,7 +551,7 @@
    -

    Constructor. This constructor is equivalent to the other one except that it makes the object use a $Q_1$ mapping (i.e., an object of type MappingQ(1)) implicitly.

    +

    Constructor. This constructor is equivalent to the other one except that it makes the object use a $Q_1$ mapping (i.e., an object of type MappingQ(1)) implicitly.

    @@ -996,7 +996,7 @@

    If the shape function is vector-valued, then this returns the only non- zero component. If the shape function has more than one non-zero component (i.e. it is not primitive), then throw an exception of type ExcShapeFunctionNotPrimitive. In that case, use the shape_value_component() function.

    Parameters
    - +
    iNumber of the shape function $\varphi_i$ to be evaluated. Note that this number runs from zero to dofs_per_cell, even in the case of an FEFaceValues or FESubfaceValues object.
    iNumber of the shape function $\varphi_i$ to be evaluated. Note that this number runs from zero to dofs_per_cell, even in the case of an FEFaceValues or FESubfaceValues object.
    q_pointNumber of the quadrature point at which function is to be evaluated
    @@ -1046,7 +1046,7 @@

    Compute one vector component of the value of a shape function at a quadrature point. If the finite element is scalar, then only component zero is allowed and the return value equals that of the shape_value() function. If the finite element is vector valued but all shape functions are primitive (i.e. they are non-zero in only one component), then the value returned by shape_value() equals that of this function for exactly one component. This function is therefore only of greater interest if the shape function is not primitive, but then it is necessary since the other function cannot be used.

    Parameters
    - +
    iNumber of the shape function $\varphi_i$ to be evaluated.
    iNumber of the shape function $\varphi_i$ to be evaluated.
    q_pointNumber of the quadrature point at which function is to be evaluated.
    componentvector component to be evaluated.
    @@ -1094,7 +1094,7 @@

    The same holds for the arguments of this function as for the shape_value() function.

    Parameters
    - +
    iNumber of the shape function $\varphi_i$ to be evaluated.
    iNumber of the shape function $\varphi_i$ to be evaluated.
    q_pointNumber of the quadrature point at which function is to be evaluated.
    @@ -1351,17 +1351,17 @@
    -

    Return the values of a finite element function at the quadrature points of the current cell, face, or subface (selected the last time the reinit() function was called). That is, if the first argument fe_function is a vector of nodal values of a finite element function $u_h(\mathbf x)$ defined on a DoFHandler object, then the output vector (the second argument, values) is the vector of values $u_h(\mathbf x_q^K)$ where $x_q^K$ are the quadrature points on the current cell $K$. This function is first discussed in the Results section of step-4, and the related get_function_gradients() function is also used in step-15 along with numerous other tutorial programs.

    +

    Return the values of a finite element function at the quadrature points of the current cell, face, or subface (selected the last time the reinit() function was called). That is, if the first argument fe_function is a vector of nodal values of a finite element function $u_h(\mathbf x)$ defined on a DoFHandler object, then the output vector (the second argument, values) is the vector of values $u_h(\mathbf x_q^K)$ where $x_q^K$ are the quadrature points on the current cell $K$. This function is first discussed in the Results section of step-4, and the related get_function_gradients() function is also used in step-15 along with numerous other tutorial programs.

    If the current cell is not active (i.e., it has children), then the finite element function is, strictly speaking, defined by shape functions that live on these child cells. Rather than evaluating the shape functions on the child cells, with the quadrature points defined on the current cell, this function first interpolates the finite element function to shape functions defined on the current cell, and then evaluates this interpolated function.

    This function may only be used if the finite element in use is a scalar one, i.e. has only one vector component. To get values of multi-component elements, there is another get_function_values() below, returning a vector of vectors of results.

    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]valuesThe values of the function specified by fe_function at the quadrature points of the current cell. The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the values of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the solution vector.
    [out]valuesThe values of the function specified by fe_function at the quadrature points of the current cell. The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the values of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the solution vector.
    -
    Postcondition
    values[q] will contain the value of the field described by fe_function at the $q$th quadrature point.
    +
    Postcondition
    values[q] will contain the value of the field described by fe_function at the $q$th quadrature point.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1406,7 +1406,7 @@

    This function does the same as the other get_function_values(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    values[q] is a vector of values of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by values[q] equals the number of components of the finite element, i.e. values[q](c) returns the value of the $c$th vector component at the $q$th quadrature point.
    +
    Postcondition
    values[q] is a vector of values of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by values[q] equals the number of components of the finite element, i.e. values[q](c) returns the value of the $c$th vector component at the $q$th quadrature point.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 3320 of file fe_values.cc.

    @@ -1614,16 +1614,16 @@
    -

    Return the gradients of a finite element function at the quadrature points of the current cell, face, or subface (selected the last time the reinit() function was called). That is, if the first argument fe_function is a vector of nodal values of a finite element function $u_h(\mathbf x)$ defined on a DoFHandler object, then the output vector (the second argument, values) is the vector of values $\nabla u_h(\mathbf x_q^K)$ where $x_q^K$ are the quadrature points on the current cell $K$. This function is first discussed in the Results section of step-4, and it is also used in step-15 along with numerous other tutorial programs.

    +

    Return the gradients of a finite element function at the quadrature points of the current cell, face, or subface (selected the last time the reinit() function was called). That is, if the first argument fe_function is a vector of nodal values of a finite element function $u_h(\mathbf x)$ defined on a DoFHandler object, then the output vector (the second argument, values) is the vector of values $\nabla u_h(\mathbf x_q^K)$ where $x_q^K$ are the quadrature points on the current cell $K$. This function is first discussed in the Results section of step-4, and it is also used in step-15 along with numerous other tutorial programs.

    This function may only be used if the finite element in use is a scalar one, i.e. has only one vector component. There is a corresponding function of the same name for vector-valued finite elements.

    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]gradientsThe gradients of the function specified by fe_function at the quadrature points of the current cell. The gradients are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the gradients of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    [out]gradientsThe gradients of the function specified by fe_function at the quadrature points of the current cell. The gradients are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the gradients of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    -
    Postcondition
    gradients[q] will contain the gradient of the field described by fe_function at the $q$th quadrature point. gradients[q][d] represents the derivative in coordinate direction $d$ at quadrature point $q$.
    +
    Postcondition
    gradients[q] will contain the gradient of the field described by fe_function at the $q$th quadrature point. gradients[q][d] represents the derivative in coordinate direction $d$ at quadrature point $q$.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1668,7 +1668,7 @@

    This function does the same as the other get_function_gradients(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    gradients[q] is a vector of gradients of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by gradients[q] equals the number of components of the finite element, i.e. gradients[q][c] returns the gradient of the $c$th vector component at the $q$th quadrature point. Consequently, gradients[q][c][d] is the derivative in coordinate direction $d$ of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    +
    Postcondition
    gradients[q] is a vector of gradients of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by gradients[q] equals the number of components of the finite element, i.e. gradients[q][c] returns the gradient of the $c$th vector component at the $q$th quadrature point. Consequently, gradients[q][c][d] is the derivative in coordinate direction $d$ of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 3463 of file fe_values.cc.

    @@ -1816,11 +1816,11 @@
    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]hessiansThe Hessians of the function specified by fe_function at the quadrature points of the current cell. The Hessians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Hessians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    [out]hessiansThe Hessians of the function specified by fe_function at the quadrature points of the current cell. The Hessians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Hessians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    -
    Postcondition
    hessians[q] will contain the Hessian of the field described by fe_function at the $q$th quadrature point. hessians[q][i][j] represents the $(i,j)$th component of the matrix of second derivatives at quadrature point $q$.
    +
    Postcondition
    hessians[q] will contain the Hessian of the field described by fe_function at the $q$th quadrature point. hessians[q][i][j] represents the $(i,j)$th component of the matrix of second derivatives at quadrature point $q$.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1870,7 +1870,7 @@

    This function does the same as the other get_function_hessians(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    hessians[q] is a vector of Hessians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by hessians[q] equals the number of components of the finite element, i.e. hessians[q][c] returns the Hessian of the $c$th vector component at the $q$th quadrature point. Consequently, hessians[q][c][i][j] is the $(i,j)$th component of the matrix of second derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    +
    Postcondition
    hessians[q] is a vector of Hessians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by hessians[q] equals the number of components of the finite element, i.e. hessians[q][c] returns the Hessian of the $c$th vector component at the $q$th quadrature point. Consequently, hessians[q][c][i][j] is the $(i,j)$th component of the matrix of second derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 3576 of file fe_values.cc.

    @@ -2019,11 +2019,11 @@
    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]laplaciansThe Laplacians of the function specified by fe_function at the quadrature points of the current cell. The Laplacians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Laplacians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the input vector.
    [out]laplaciansThe Laplacians of the function specified by fe_function at the quadrature points of the current cell. The Laplacians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Laplacians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the input vector.
    -
    Postcondition
    laplacians[q] will contain the Laplacian of the field described by fe_function at the $q$th quadrature point.
    +
    Postcondition
    laplacians[q] will contain the Laplacian of the field described by fe_function at the $q$th quadrature point.
    For each component of the output vector, there holds laplacians[q]=trace(hessians[q]), where hessians would be the output of the get_function_hessians() function.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    @@ -2070,7 +2070,7 @@

    This function does the same as the other get_function_laplacians(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    laplacians[q] is a vector of Laplacians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by laplacians[q] equals the number of components of the finite element, i.e. laplacians[q][c] returns the Laplacian of the $c$th vector component at the $q$th quadrature point.
    +
    Postcondition
    laplacians[q] is a vector of Laplacians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by laplacians[q] equals the number of components of the finite element, i.e. laplacians[q][c] returns the Laplacian of the $c$th vector component at the $q$th quadrature point.
    For each component of the output vector, there holds laplacians[q][c]=trace(hessians[q][c]), where hessians would be the output of the get_function_hessians() function.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2270,11 +2270,11 @@
    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]third_derivativesThe third derivatives of the function specified by fe_function at the quadrature points of the current cell. The third derivatives are computed in real space (as opposed to on the unit cell). The object is assumed to already have the correct size. The data type stored by this output vector must be what you get when you multiply the third derivatives of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    [out]third_derivativesThe third derivatives of the function specified by fe_function at the quadrature points of the current cell. The third derivatives are computed in real space (as opposed to on the unit cell). The object is assumed to already have the correct size. The data type stored by this output vector must be what you get when you multiply the third derivatives of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    -
    Postcondition
    third_derivatives[q] will contain the third derivatives of the field described by fe_function at the $q$th quadrature point. third_derivatives[q][i][j][k] represents the $(i,j,k)$th component of the 3rd order tensor of third derivatives at quadrature point $q$.
    +
    Postcondition
    third_derivatives[q] will contain the third derivatives of the field described by fe_function at the $q$th quadrature point. third_derivatives[q][i][j][k] represents the $(i,j,k)$th component of the 3rd order tensor of third derivatives at quadrature point $q$.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_3rd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2324,7 +2324,7 @@

    This function does the same as the other get_function_third_derivatives(), but applied to multi-component (vector- valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    third_derivatives[q] is a vector of third derivatives of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by third_derivatives[q] equals the number of components of the finite element, i.e. third_derivatives[q][c] returns the third derivative of the $c$th vector component at the $q$th quadrature point. Consequently, third_derivatives[q][c][i][j][k] is the $(i,j,k)$th component of the tensor of third derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    +
    Postcondition
    third_derivatives[q] is a vector of third derivatives of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by third_derivatives[q] equals the number of components of the finite element, i.e. third_derivatives[q][c] returns the third derivative of the $c$th vector component at the $q$th quadrature point. Consequently, third_derivatives[q][c][i][j][k] is the $(i,j,k)$th component of the tensor of third derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_3rd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 3826 of file fe_values.cc.

    @@ -2681,7 +2681,7 @@

    Mapped quadrature weight. If this object refers to a volume evaluation (i.e. the derived class is of type FEValues), then this is the Jacobi determinant times the weight of the q_pointth unit quadrature point.

    For surface evaluations (i.e. classes FEFaceValues or FESubfaceValues), it is the mapped surface element times the weight of the quadrature point.

    -

    You can think of the quantity returned by this function as the volume or surface element $dx, ds$ in the integral that we implement here by quadrature.

    +

    You can think of the quantity returned by this function as the volume or surface element $dx, ds$ in the integral that we implement here by quadrature.

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_JxW_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2738,7 +2738,7 @@
    -

    Return the Jacobian of the transformation at the specified quadrature point, i.e. $J_{ij}=dx_i/d\hat x_j$

    +

    Return the Jacobian of the transformation at the specified quadrature point, i.e. $J_{ij}=dx_i/d\hat x_j$

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2796,7 +2796,7 @@
    -

    Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijk}=dJ_{jk}/d\hat x_i$.

    +

    Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijk}=dJ_{jk}/d\hat x_i$.

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_grads flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2854,7 +2854,7 @@
    -

    Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, pushed forward to the real cell coordinates, i.e. $G_{ijk}=dJ_{iJ}/d\hat x_K (J_{jJ})^{-1} (J_{kK})^{-1}$.

    +

    Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, pushed forward to the real cell coordinates, i.e. $G_{ijk}=dJ_{iJ}/d\hat x_K (J_{jJ})^{-1} (J_{kK})^{-1}$.

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_pushed_forward_grads flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEFaceValuesBase.html differs (JavaScript source, ASCII text, with very long lines (1019)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEFaceValuesBase.html 2023-11-25 15:25:52.223379453 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEFaceValuesBase.html 2023-11-25 15:25:52.223379453 +0100 @@ -675,7 +675,7 @@

    If the shape function is vector-valued, then this returns the only non- zero component. If the shape function has more than one non-zero component (i.e. it is not primitive), then throw an exception of type ExcShapeFunctionNotPrimitive. In that case, use the shape_value_component() function.

    Parameters
    - +
    iNumber of the shape function $\varphi_i$ to be evaluated. Note that this number runs from zero to dofs_per_cell, even in the case of an FEFaceValues or FESubfaceValues object.
    iNumber of the shape function $\varphi_i$ to be evaluated. Note that this number runs from zero to dofs_per_cell, even in the case of an FEFaceValues or FESubfaceValues object.
    q_pointNumber of the quadrature point at which function is to be evaluated
    @@ -725,7 +725,7 @@

    Compute one vector component of the value of a shape function at a quadrature point. If the finite element is scalar, then only component zero is allowed and the return value equals that of the shape_value() function. If the finite element is vector valued but all shape functions are primitive (i.e. they are non-zero in only one component), then the value returned by shape_value() equals that of this function for exactly one component. This function is therefore only of greater interest if the shape function is not primitive, but then it is necessary since the other function cannot be used.

    Parameters
    - +
    iNumber of the shape function $\varphi_i$ to be evaluated.
    iNumber of the shape function $\varphi_i$ to be evaluated.
    q_pointNumber of the quadrature point at which function is to be evaluated.
    componentvector component to be evaluated.
    @@ -773,7 +773,7 @@

    The same holds for the arguments of this function as for the shape_value() function.

    Parameters
    - +
    iNumber of the shape function $\varphi_i$ to be evaluated.
    iNumber of the shape function $\varphi_i$ to be evaluated.
    q_pointNumber of the quadrature point at which function is to be evaluated.
    @@ -1030,17 +1030,17 @@
    -

    Return the values of a finite element function at the quadrature points of the current cell, face, or subface (selected the last time the reinit() function was called). That is, if the first argument fe_function is a vector of nodal values of a finite element function $u_h(\mathbf x)$ defined on a DoFHandler object, then the output vector (the second argument, values) is the vector of values $u_h(\mathbf x_q^K)$ where $x_q^K$ are the quadrature points on the current cell $K$. This function is first discussed in the Results section of step-4, and the related get_function_gradients() function is also used in step-15 along with numerous other tutorial programs.

    +

    Return the values of a finite element function at the quadrature points of the current cell, face, or subface (selected the last time the reinit() function was called). That is, if the first argument fe_function is a vector of nodal values of a finite element function $u_h(\mathbf x)$ defined on a DoFHandler object, then the output vector (the second argument, values) is the vector of values $u_h(\mathbf x_q^K)$ where $x_q^K$ are the quadrature points on the current cell $K$. This function is first discussed in the Results section of step-4, and the related get_function_gradients() function is also used in step-15 along with numerous other tutorial programs.

    If the current cell is not active (i.e., it has children), then the finite element function is, strictly speaking, defined by shape functions that live on these child cells. Rather than evaluating the shape functions on the child cells, with the quadrature points defined on the current cell, this function first interpolates the finite element function to shape functions defined on the current cell, and then evaluates this interpolated function.

    This function may only be used if the finite element in use is a scalar one, i.e. has only one vector component. To get values of multi-component elements, there is another get_function_values() below, returning a vector of vectors of results.

    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]valuesThe values of the function specified by fe_function at the quadrature points of the current cell. The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the values of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the solution vector.
    [out]valuesThe values of the function specified by fe_function at the quadrature points of the current cell. The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the values of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the solution vector.
    -
    Postcondition
    values[q] will contain the value of the field described by fe_function at the $q$th quadrature point.
    +
    Postcondition
    values[q] will contain the value of the field described by fe_function at the $q$th quadrature point.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1085,7 +1085,7 @@

    This function does the same as the other get_function_values(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    values[q] is a vector of values of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by values[q] equals the number of components of the finite element, i.e. values[q](c) returns the value of the $c$th vector component at the $q$th quadrature point.
    +
    Postcondition
    values[q] is a vector of values of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by values[q] equals the number of components of the finite element, i.e. values[q](c) returns the value of the $c$th vector component at the $q$th quadrature point.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 3320 of file fe_values.cc.

    @@ -1293,16 +1293,16 @@
    -

    Return the gradients of a finite element function at the quadrature points of the current cell, face, or subface (selected the last time the reinit() function was called). That is, if the first argument fe_function is a vector of nodal values of a finite element function $u_h(\mathbf x)$ defined on a DoFHandler object, then the output vector (the second argument, values) is the vector of values $\nabla u_h(\mathbf x_q^K)$ where $x_q^K$ are the quadrature points on the current cell $K$. This function is first discussed in the Results section of step-4, and it is also used in step-15 along with numerous other tutorial programs.

    +

    Return the gradients of a finite element function at the quadrature points of the current cell, face, or subface (selected the last time the reinit() function was called). That is, if the first argument fe_function is a vector of nodal values of a finite element function $u_h(\mathbf x)$ defined on a DoFHandler object, then the output vector (the second argument, values) is the vector of values $\nabla u_h(\mathbf x_q^K)$ where $x_q^K$ are the quadrature points on the current cell $K$. This function is first discussed in the Results section of step-4, and it is also used in step-15 along with numerous other tutorial programs.

    This function may only be used if the finite element in use is a scalar one, i.e. has only one vector component. There is a corresponding function of the same name for vector-valued finite elements.

    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]gradientsThe gradients of the function specified by fe_function at the quadrature points of the current cell. The gradients are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the gradients of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    [out]gradientsThe gradients of the function specified by fe_function at the quadrature points of the current cell. The gradients are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the gradients of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    -
    Postcondition
    gradients[q] will contain the gradient of the field described by fe_function at the $q$th quadrature point. gradients[q][d] represents the derivative in coordinate direction $d$ at quadrature point $q$.
    +
    Postcondition
    gradients[q] will contain the gradient of the field described by fe_function at the $q$th quadrature point. gradients[q][d] represents the derivative in coordinate direction $d$ at quadrature point $q$.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1347,7 +1347,7 @@

    This function does the same as the other get_function_gradients(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    gradients[q] is a vector of gradients of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by gradients[q] equals the number of components of the finite element, i.e. gradients[q][c] returns the gradient of the $c$th vector component at the $q$th quadrature point. Consequently, gradients[q][c][d] is the derivative in coordinate direction $d$ of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    +
    Postcondition
    gradients[q] is a vector of gradients of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by gradients[q] equals the number of components of the finite element, i.e. gradients[q][c] returns the gradient of the $c$th vector component at the $q$th quadrature point. Consequently, gradients[q][c][d] is the derivative in coordinate direction $d$ of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 3463 of file fe_values.cc.

    @@ -1495,11 +1495,11 @@
    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]hessiansThe Hessians of the function specified by fe_function at the quadrature points of the current cell. The Hessians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Hessians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    [out]hessiansThe Hessians of the function specified by fe_function at the quadrature points of the current cell. The Hessians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Hessians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    -
    Postcondition
    hessians[q] will contain the Hessian of the field described by fe_function at the $q$th quadrature point. hessians[q][i][j] represents the $(i,j)$th component of the matrix of second derivatives at quadrature point $q$.
    +
    Postcondition
    hessians[q] will contain the Hessian of the field described by fe_function at the $q$th quadrature point. hessians[q][i][j] represents the $(i,j)$th component of the matrix of second derivatives at quadrature point $q$.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1549,7 +1549,7 @@

    This function does the same as the other get_function_hessians(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    hessians[q] is a vector of Hessians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by hessians[q] equals the number of components of the finite element, i.e. hessians[q][c] returns the Hessian of the $c$th vector component at the $q$th quadrature point. Consequently, hessians[q][c][i][j] is the $(i,j)$th component of the matrix of second derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    +
    Postcondition
    hessians[q] is a vector of Hessians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by hessians[q] equals the number of components of the finite element, i.e. hessians[q][c] returns the Hessian of the $c$th vector component at the $q$th quadrature point. Consequently, hessians[q][c][i][j] is the $(i,j)$th component of the matrix of second derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 3576 of file fe_values.cc.

    @@ -1698,11 +1698,11 @@
    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]laplaciansThe Laplacians of the function specified by fe_function at the quadrature points of the current cell. The Laplacians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Laplacians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the input vector.
    [out]laplaciansThe Laplacians of the function specified by fe_function at the quadrature points of the current cell. The Laplacians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Laplacians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the input vector.
    -
    Postcondition
    laplacians[q] will contain the Laplacian of the field described by fe_function at the $q$th quadrature point.
    +
    Postcondition
    laplacians[q] will contain the Laplacian of the field described by fe_function at the $q$th quadrature point.
    For each component of the output vector, there holds laplacians[q]=trace(hessians[q]), where hessians would be the output of the get_function_hessians() function.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    @@ -1749,7 +1749,7 @@

    This function does the same as the other get_function_laplacians(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    laplacians[q] is a vector of Laplacians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by laplacians[q] equals the number of components of the finite element, i.e. laplacians[q][c] returns the Laplacian of the $c$th vector component at the $q$th quadrature point.
    +
    Postcondition
    laplacians[q] is a vector of Laplacians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by laplacians[q] equals the number of components of the finite element, i.e. laplacians[q][c] returns the Laplacian of the $c$th vector component at the $q$th quadrature point.
    For each component of the output vector, there holds laplacians[q][c]=trace(hessians[q][c]), where hessians would be the output of the get_function_hessians() function.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1949,11 +1949,11 @@
    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]third_derivativesThe third derivatives of the function specified by fe_function at the quadrature points of the current cell. The third derivatives are computed in real space (as opposed to on the unit cell). The object is assumed to already have the correct size. The data type stored by this output vector must be what you get when you multiply the third derivatives of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    [out]third_derivativesThe third derivatives of the function specified by fe_function at the quadrature points of the current cell. The third derivatives are computed in real space (as opposed to on the unit cell). The object is assumed to already have the correct size. The data type stored by this output vector must be what you get when you multiply the third derivatives of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    -
    Postcondition
    third_derivatives[q] will contain the third derivatives of the field described by fe_function at the $q$th quadrature point. third_derivatives[q][i][j][k] represents the $(i,j,k)$th component of the 3rd order tensor of third derivatives at quadrature point $q$.
    +
    Postcondition
    third_derivatives[q] will contain the third derivatives of the field described by fe_function at the $q$th quadrature point. third_derivatives[q][i][j][k] represents the $(i,j,k)$th component of the 3rd order tensor of third derivatives at quadrature point $q$.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_3rd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2003,7 +2003,7 @@

    This function does the same as the other get_function_third_derivatives(), but applied to multi-component (vector- valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    third_derivatives[q] is a vector of third derivatives of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by third_derivatives[q] equals the number of components of the finite element, i.e. third_derivatives[q][c] returns the third derivative of the $c$th vector component at the $q$th quadrature point. Consequently, third_derivatives[q][c][i][j][k] is the $(i,j,k)$th component of the tensor of third derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    +
    Postcondition
    third_derivatives[q] is a vector of third derivatives of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by third_derivatives[q] equals the number of components of the finite element, i.e. third_derivatives[q][c] returns the third derivative of the $c$th vector component at the $q$th quadrature point. Consequently, third_derivatives[q][c][i][j][k] is the $(i,j,k)$th component of the tensor of third derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_3rd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 3826 of file fe_values.cc.

    @@ -2360,7 +2360,7 @@

    Mapped quadrature weight. If this object refers to a volume evaluation (i.e. the derived class is of type FEValues), then this is the Jacobi determinant times the weight of the q_pointth unit quadrature point.

    For surface evaluations (i.e. classes FEFaceValues or FESubfaceValues), it is the mapped surface element times the weight of the quadrature point.

    -

    You can think of the quantity returned by this function as the volume or surface element $dx, ds$ in the integral that we implement here by quadrature.

    +

    You can think of the quantity returned by this function as the volume or surface element $dx, ds$ in the integral that we implement here by quadrature.

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_JxW_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2417,7 +2417,7 @@
    -

    Return the Jacobian of the transformation at the specified quadrature point, i.e. $J_{ij}=dx_i/d\hat x_j$

    +

    Return the Jacobian of the transformation at the specified quadrature point, i.e. $J_{ij}=dx_i/d\hat x_j$

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2475,7 +2475,7 @@
    -

    Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijk}=dJ_{jk}/d\hat x_i$.

    +

    Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijk}=dJ_{jk}/d\hat x_i$.

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_grads flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2533,7 +2533,7 @@
    -

    Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, pushed forward to the real cell coordinates, i.e. $G_{ijk}=dJ_{iJ}/d\hat x_K (J_{jJ})^{-1} (J_{kK})^{-1}$.

    +

    Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, pushed forward to the real cell coordinates, i.e. $G_{ijk}=dJ_{iJ}/d\hat x_K (J_{jJ})^{-1} (J_{kK})^{-1}$.

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_pushed_forward_grads flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2591,7 +2591,7 @@
    -

    Return the third derivative of the transformation from unit to real cell, i.e. the second derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijkl}=\frac{d^2J_{ij}}{d\hat x_k d\hat x_l}$.

    +

    Return the third derivative of the transformation from unit to real cell, i.e. the second derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijkl}=\frac{d^2J_{ij}}{d\hat x_k d\hat x_l}$.

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_2nd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEInterfaceValues.html differs (JavaScript source, ASCII text, with very long lines (1222)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEInterfaceValues.html 2023-11-25 15:25:52.250045575 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEInterfaceValues.html 2023-11-25 15:25:52.250045575 +0100 @@ -523,8 +523,8 @@
  • If the q_index and mapping_index arguments to this function are explicitly specified (rather than leaving them at their default values), then these indices will be used to select which element of the hp::QCollection and hp::MappingCollection passed to the constructor should serve as the quadrature and mapping to be used.
  • If one of these arguments is left at its default value, then the function will need to choose a quadrature and/or mapping that is appropriate for the two finite element spaces used on the two cells adjacent to the current interface. As the first choice, if the quadrature or mapping collection we are considering has only one element, then that is clearly the one that should be used.
  • If the quadrature or mapping collection have multiple elements, then we need to dig further. For quadrature objects, we can compare whether the two quadrature objects that correspond to the active_fe_index values of the two adjacent cells are identical (i.e., have quadrature points at the same locations, and have the same weights). If this is so, then it does not matter which one of the two we take, and we choose one or the other.
  • -
  • If this has still not helped, we try to find out which of the two finite element spaces on the two adjacent cells is "larger" (say, if you had used $Q_2$ and $Q_4$ elements on the two adjacent cells, then the $Q_4$ element is the larger one); the determination of which space is "larger" is made using the hp::FECollection::find_dominated_fe() function, which is not necessarily intended for this kind of query, but yields a result that serves just fine for our purposes here. We then operate on the assumption that the quadrature object associated with the "larger" of the two spaces is the appropriate one to use for the face that separates these two spaces.
      -
    • If this function returns that one of the two elements in question is dominated by the other, then presumably it is "larger" one and we take the quadrature formula and mapping that corresponds to this "larger" element is. For example, for the $Q_2$ element mentioned above, one would generally use a QGauss(3) quadrature formula, whereas for the $Q_4$ element, one would use QGauss(5). To integrate jump and average terms on the interface between cells using these two elements, QGauss(5) is appropriate. Because, typically, people will order elements in the hp::FECollection in the same order as the quadrature and mapping objects in hp::QCollection and hp::MappingCollection, this function will use the index of the "larger" element in the hp::FECollection to also index into the hp::QCollection and hp::MappingCollection to retrieve quadrature and mapping objects appropriate for the current face.
    • +
    • If this has still not helped, we try to find out which of the two finite element spaces on the two adjacent cells is "larger" (say, if you had used $Q_2$ and $Q_4$ elements on the two adjacent cells, then the $Q_4$ element is the larger one); the determination of which space is "larger" is made using the hp::FECollection::find_dominated_fe() function, which is not necessarily intended for this kind of query, but yields a result that serves just fine for our purposes here. We then operate on the assumption that the quadrature object associated with the "larger" of the two spaces is the appropriate one to use for the face that separates these two spaces.
        +
      • If this function returns that one of the two elements in question is dominated by the other, then presumably it is "larger" one and we take the quadrature formula and mapping that corresponds to this "larger" element is. For example, for the $Q_2$ element mentioned above, one would generally use a QGauss(3) quadrature formula, whereas for the $Q_4$ element, one would use QGauss(5). To integrate jump and average terms on the interface between cells using these two elements, QGauss(5) is appropriate. Because, typically, people will order elements in the hp::FECollection in the same order as the quadrature and mapping objects in hp::QCollection and hp::MappingCollection, this function will use the index of the "larger" element in the hp::FECollection to also index into the hp::QCollection and hp::MappingCollection to retrieve quadrature and mapping objects appropriate for the current face.
      • There are cases where neither element dominates the other. For example, if one uses $Q_2\times Q_1$ and $Q_1\times Q_2$ elements on neighboring cells, neither of the two spaces dominates the other – or, in the context of the current function, neither space is "larger" than the other. In that case, there is no way for the current function to determine quadrature and mapping objects associated with the two elements are the appropriate ones. If that happens, you will get an error – and the only way to avoid the error is to explicitly specify for these interfaces which quadrature and mapping objects you want to use, by providing non-default values for the q_index and mapping_index arguments to this function.
    • @@ -865,7 +865,7 @@
  • Mapped quadrature weight. This value equals the mapped surface element times the weight of the quadrature point.

    -

    You can think of the quantity returned by this function as the surface element $ds$ in the integral that we implement here by quadrature.

    +

    You can think of the quantity returned by this function as the surface element $ds$ in the integral that we implement here by quadrature.

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_JxW_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1151,9 +1151,9 @@
    -

    Return the jump $\jump{u}=u_{\text{cell0}} - u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of component component.

    +

    Return the jump $\jump{u}=u_{\text{cell0}} - u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of component component.

    Note that one can define the jump in different ways (the value "there" minus the value "here", or the other way around; both are used in the finite element literature). The definition here uses "value here minus value there", as seen from the first cell.

    -

    If this is a boundary face (at_boundary() returns true), then $\jump{u}=u_{\text{cell0}}$, that is "the value here (minus zero)".

    +

    If this is a boundary face (at_boundary() returns true), then $\jump{u}=u_{\text{cell0}}$, that is "the value here (minus zero)".

    Note
    The name of the function is supposed to be read as "the jump (singular) in the values (plural: one or two possible values) of the shape function (singular)".
    @@ -1225,9 +1225,9 @@
    -

    Return the jump in the gradient $\jump{\nabla u}=\nabla u_{\text{cell0}} -
-\nabla u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of component component.

    -

    If this is a boundary face (at_boundary() returns true), then $\jump{\nabla u}=\nabla u_{\text{cell0}}$.

    +

    Return the jump in the gradient $\jump{\nabla u}=\nabla u_{\text{cell0}} -
+\nabla u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of component component.

    +

    If this is a boundary face (at_boundary() returns true), then $\jump{\nabla u}=\nabla u_{\text{cell0}}$.

    Note
    The name of the function is supposed to be read as "the jump (singular) in the gradients (plural: one or two possible gradients) of the shape function (singular)".
    @@ -1299,9 +1299,9 @@
    -

    Return the jump in the Hessian $\jump{\nabla^2 u} = \nabla^2
-u_{\text{cell0}} - \nabla^2 u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of component component.

    -

    If this is a boundary face (at_boundary() returns true), then $\jump{\nabla^2 u} = \nabla^2 u_{\text{cell0}}$.

    +

    Return the jump in the Hessian $\jump{\nabla^2 u} = \nabla^2
+u_{\text{cell0}} - \nabla^2 u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of component component.

    +

    If this is a boundary face (at_boundary() returns true), then $\jump{\nabla^2 u} = \nabla^2 u_{\text{cell0}}$.

    Note
    The name of the function is supposed to be read as "the jump (singular) in the Hessians (plural: one or two possible values for the derivative) of the shape function (singular)".
    @@ -1373,9 +1373,9 @@
    -

    Return the jump in the third derivative $\jump{\nabla^3 u} = \nabla^3
-u_{\text{cell0}} - \nabla^3 u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of component component.

    -

    If this is a boundary face (at_boundary() returns true), then $\jump{\nabla^3 u} = \nabla^3 u_{\text{cell0}}$.

    +

    Return the jump in the third derivative $\jump{\nabla^3 u} = \nabla^3
+u_{\text{cell0}} - \nabla^3 u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of component component.

    +

    If this is a boundary face (at_boundary() returns true), then $\jump{\nabla^3 u} = \nabla^3 u_{\text{cell0}}$.

    Note
    The name of the function is supposed to be read as "the jump (singular) in the third derivatives (plural: one or two possible values for the derivative) of the shape function (singular)".
    @@ -1447,9 +1447,9 @@
    -

    Return the average $\average{u}=\frac{1}{2}u_{\text{cell0}} +
-\frac{1}{2}u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of component component.

    -

    If this is a boundary face (at_boundary() returns true), then $\average{u}=u_{\text{cell0}}$.

    +

    Return the average $\average{u}=\frac{1}{2}u_{\text{cell0}} +
+\frac{1}{2}u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of component component.

    +

    If this is a boundary face (at_boundary() returns true), then $\average{u}=u_{\text{cell0}}$.

    Note
    The name of the function is supposed to be read as "the average (singular) of the values (plural: one or two possible values) of the shape function (singular)".
    @@ -1521,9 +1521,9 @@
    -

    Return the average of the gradient $\average{\nabla u} = \frac{1}{2}\nabla
-u_{\text{cell0}} + \frac{1}{2} \nabla u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of component component.

    -

    If this is a boundary face (at_boundary() returns true), then $\average{\nabla u}=\nabla u_{\text{cell0}}$.

    +

    Return the average of the gradient $\average{\nabla u} = \frac{1}{2}\nabla
+u_{\text{cell0}} + \frac{1}{2} \nabla u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of component component.

    +

    If this is a boundary face (at_boundary() returns true), then $\average{\nabla u}=\nabla u_{\text{cell0}}$.

    Note
    The name of the function is supposed to be read as "the average (singular) of the gradients (plural: one or two possible values for the gradient) of the shape function (singular)".
    @@ -1595,10 +1595,10 @@
    -

    Return the average of the Hessian $\average{\nabla^2 u} =
+<p>Return the average of the Hessian <picture><source srcset=$\average{\nabla^2 u} =
 \frac{1}{2}\nabla^2 u_{\text{cell0}} + \frac{1}{2} \nabla^2
-u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of component component.

    -

    If this is a boundary face (at_boundary() returns true), then $\average{\nabla^2 u}=\nabla^2 u_{\text{cell0}}$.

    +u_{\text{cell1}}$" src="form_1094.png"/> on the interface for the shape function interface_dof_index at the quadrature point q_point of component component.

    +

    If this is a boundary face (at_boundary() returns true), then $\average{\nabla^2 u}=\nabla^2 u_{\text{cell0}}$.

    Note
    The name of the function is supposed to be read as "the average (singular) of the Hessians (plural: one or two possible values for the second derivatives) of the shape function (singular)".
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEInterfaceViews_1_1Scalar.html differs (JavaScript source, ASCII text, with very long lines (610)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEInterfaceViews_1_1Scalar.html 2023-11-25 15:25:52.270045168 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEInterfaceViews_1_1Scalar.html 2023-11-25 15:25:52.273378436 +0100 @@ -477,7 +477,7 @@
    -

    Return the jump $\jump{u}=u_1 - u_2$ on the interface for the shape function interface_dof_index in the quadrature point q_point of the component selected by this view.

    +

    Return the jump $\jump{u}=u_1 - u_2$ on the interface for the shape function interface_dof_index in the quadrature point q_point of the component selected by this view.

    Note
    The name of the function is supposed to be read as "the jump (singular) in the values (plural: one or two possible values) of the shape function (singular)".
    @@ -539,7 +539,7 @@
    -

    Return the jump of the gradient $\jump{nabla u}$ on the interface for the shape function interface_dof_index in the quadrature point q_point of the component selected by this view.

    +

    Return the jump of the gradient $\jump{nabla u}$ on the interface for the shape function interface_dof_index in the quadrature point q_point of the component selected by this view.

    Note
    The name of the function is supposed to be read as "the jump (singular) in the gradients (plural: one or two possible gradients) of the shape function (singular)".
    @@ -601,8 +601,8 @@
    -

    Return the jump in the gradient $\jump{\nabla u}=\nabla u_{\text{cell0}}
-- \nabla u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of the component selected by this view.

    +

    Return the jump in the gradient $\jump{\nabla u}=\nabla u_{\text{cell0}}
+- \nabla u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of the component selected by this view.

    Note
    The name of the function is supposed to be read as "the jump (singular) in the Hessians (plural: one or two possible values for the second derivative) of the shape function (singular)".
    @@ -664,8 +664,8 @@
    -

    Return the jump in the third derivative $\jump{\nabla^3 u} = \nabla^3
-u_{\text{cell0}} - \nabla^3 u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of the component selected by this view.

    +

    Return the jump in the third derivative $\jump{\nabla^3 u} = \nabla^3
+u_{\text{cell0}} - \nabla^3 u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of the component selected by this view.

    Note
    The name of the function is supposed to be read as "the jump (singular) in the third derivatives (plural: one or two possible values for the third derivative) of the shape function (singular)".
    @@ -727,7 +727,7 @@
    -

    Return the average value $\average{u}=\frac{1}{2}(u_1 + u_2)$ on the interface for the shape function interface_dof_index in the quadrature point q_point of the component selected by this view.

    +

    Return the average value $\average{u}=\frac{1}{2}(u_1 + u_2)$ on the interface for the shape function interface_dof_index in the quadrature point q_point of the component selected by this view.

    Note
    The name of the function is supposed to be read as "the average (singular) of the values (plural: one or two possible values) of the shape function (singular)".
    @@ -819,7 +819,7 @@
    -

    Return the average of the gradient $\average{\nabla u}$ on the interface for the shape function interface_dof_index in the quadrature point q_point of the component selected by this view.

    +

    Return the average of the gradient $\average{\nabla u}$ on the interface for the shape function interface_dof_index in the quadrature point q_point of the component selected by this view.

    Note
    The name of the function is supposed to be read as "the average (singular) of the gradients (plural: one or two possible values of the derivative) of the shape function (singular)".
    @@ -881,9 +881,9 @@
    -

    Return the average of the Hessian $\average{\nabla^2 u} =
+<p>Return the average of the Hessian <picture><source srcset=$\average{\nabla^2 u} =
 \frac{1}{2}\nabla^2 u_{\text{cell0}} + \frac{1}{2} \nabla^2
-u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of the component selected by this view.

    +u_{\text{cell1}}$" src="form_1094.png"/> on the interface for the shape function interface_dof_index at the quadrature point q_point of the component selected by this view.

    Note
    The name of the function is supposed to be read as "the average (singular) in the Hessians (plural: one or two possible values of the second derivative) of the shape function (singular)".
    @@ -956,7 +956,7 @@

    Return the values of the selected scalar component of the finite element function characterized by fe_function at the quadrature points of the cell interface selected the last time the reinit function of the FEInterfaceValues object was called.

    The argument here_or_there selects between the value on cell 0 (here, true) and cell 1 (there, false). You can also interpret it as "upstream" (true) and "downstream" (false) as defined by the direction of the normal vector in this quadrature point. If here_or_there is true, the values from the first cell of the interface is used.

    -

    The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1042,7 +1042,7 @@

    Return the jump in the values of the selected scalar component of the finite element function characterized by fe_function at the quadrature points of the cell interface selected the last time the reinit function of the FEInterfaceValues object was called.

    -

    The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1108,7 +1108,7 @@

    Return the jump in the gradients of the selected scalar components of the finite element function characterized by fe_function at the quadrature points of the cell interface selected the last time the reinit function of the FEInterfaceValues object was called.

    -

    The data type stored by the output vector must be what you get when you multiply the gradients of shape functions (i.e., gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the gradients of shape functions (i.e., gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1174,7 +1174,7 @@

    Return the jump in the Hessians of the selected scalar component of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEInterfaceValues object was called.

    -

    The data type stored by the output vector must be what you get when you multiply the Hessians of shape functions (i.e., hessian_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the Hessians of shape functions (i.e., hessian_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1240,7 +1240,7 @@

    Return the jump in the third derivatives of the selected scalar component of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEInterfaceValues object was called.

    -

    The data type stored by the output vector must be what you get when you multiply the third derivatives of shape functions (i.e., third_derivative_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the third derivatives of shape functions (i.e., third_derivative_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_third_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1306,7 +1306,7 @@

    Return the average of the values of the selected scalar component of the finite element function characterized by fe_function at the quadrature points of the cell interface selected the last time the reinit function of the FEInterfaceValues object was called.

    -

    The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1372,7 +1372,7 @@

    Return the average of the gradients of the selected scalar components of the finite element function characterized by fe_function at the quadrature points of the cell interface selected the last time the reinit function of the FEInterfaceValues object was called.

    -

    The data type stored by the output vector must be what you get when you multiply the gradients of shape functions (i.e., gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the gradients of shape functions (i.e., gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1438,7 +1438,7 @@

    Return the average of the Hessians of the selected scalar component of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEInterfaceValues object was called.

    -

    The data type stored by the output vector must be what you get when you multiply the Hessians of shape functions (i.e., hessian_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the Hessians of shape functions (i.e., hessian_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEInterfaceViews_1_1Vector.html differs (JavaScript source, ASCII text, with very long lines (610)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEInterfaceViews_1_1Vector.html 2023-11-25 15:25:52.293378028 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEInterfaceViews_1_1Vector.html 2023-11-25 15:25:52.293378028 +0100 @@ -475,7 +475,7 @@
    -

    Return the jump vector $[\mathbf{u}]=\mathbf{u_1} - \mathbf{u_2}$ on the interface for the shape function interface_dof_index in the quadrature point q_point.

    +

    Return the jump vector $[\mathbf{u}]=\mathbf{u_1} - \mathbf{u_2}$ on the interface for the shape function interface_dof_index in the quadrature point q_point.

    Note
    The name of the function is supposed to be read as "the jump (singular) in the values (plural: one or two possible values) of the shape function (singular)".
    @@ -537,8 +537,8 @@
    -

    Return the jump of the gradient (a tensor of rank 2) $\jump{\nabla
-\mathbf{u}}$ on the interface for the shape function interface_dof_index in the quadrature point q_point.

    +

    Return the jump of the gradient (a tensor of rank 2) $\jump{\nabla
+\mathbf{u}}$ on the interface for the shape function interface_dof_index in the quadrature point q_point.

    Note
    The name of the function is supposed to be read as "the jump (singular) in the gradients (plural: one or two possible gradients) of the shape function (singular)".
    @@ -600,8 +600,8 @@
    -

    Return the jump in the gradient $\jump{\nabla u}=\nabla u_{\text{cell0}}
-- \nabla u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of the component selected by this view.

    +

    Return the jump in the gradient $\jump{\nabla u}=\nabla u_{\text{cell0}}
+- \nabla u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of the component selected by this view.

    Note
    The name of the function is supposed to be read as "the jump (singular) in the Hessians (plural: one or two possible values for the second derivative) of the shape function (singular)".
    @@ -663,8 +663,8 @@
    -

    Return the jump in the third derivative $\jump{\nabla^3 u} = \nabla^3
-u_{\text{cell0}} - \nabla^3 u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of the component selected by this view.

    +

    Return the jump in the third derivative $\jump{\nabla^3 u} = \nabla^3
+u_{\text{cell0}} - \nabla^3 u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of the component selected by this view.

    Note
    The name of the function is supposed to be read as "the jump (singular) in the third derivatives (plural: one or two possible values for the third derivative) of the shape function (singular)".
    @@ -726,8 +726,8 @@
    -

    Return the average vector $\average{\mathbf{u}}=\frac{1}{2}(\mathbf{u_1}
-+ \mathbf{u_2})$ on the interface for the shape function interface_dof_index in the quadrature point q_point.

    +

    Return the average vector $\average{\mathbf{u}}=\frac{1}{2}(\mathbf{u_1}
++ \mathbf{u_2})$ on the interface for the shape function interface_dof_index in the quadrature point q_point.

    Note
    The name of the function is supposed to be read as "the average (singular) of the values (plural: one or two possible values) of the shape function (singular)".
    @@ -789,8 +789,8 @@
    -

    Return the average of the gradient (a tensor of rank 2) $\average{\nabla
-\mathbf{u}}$ on the interface for the shape function interface_dof_index in the quadrature point q_point.

    +

    Return the average of the gradient (a tensor of rank 2) $\average{\nabla
+\mathbf{u}}$ on the interface for the shape function interface_dof_index in the quadrature point q_point.

    Note
    The name of the function is supposed to be read as "the average (singular) of the gradients (plural: one or two possible values of the derivative) of the shape function (singular)".
    @@ -852,9 +852,9 @@
    -

    Return the average of the Hessian $\average{\nabla^2 u} =
+<p>Return the average of the Hessian <picture><source srcset=$\average{\nabla^2 u} =
 \frac{1}{2}\nabla^2 u_{\text{cell0}} + \frac{1}{2} \nabla^2
-u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of the component selected by this view.

    +u_{\text{cell1}}$" src="form_1094.png"/> on the interface for the shape function interface_dof_index at the quadrature point q_point of the component selected by this view.

    Note
    The name of the function is supposed to be read as "the average (singular) in the Hessians (plural: one or two possible values of the second derivative) of the shape function (singular)".
    @@ -927,7 +927,7 @@

    Return the values of the selected vector component of the finite element function characterized by fe_function at the quadrature points of the cell interface selected the last time the reinit function of the FEInterfaceValues object was called.

    The argument here_or_there selects between the value on cell 0 (here, true) and cell 1 (there, false). You can also interpret it as "upstream" (true) and "downstream" (false) as defined by the direction of the normal vector in this quadrature point. If here_or_there is true, the values from the first cell of the interface is used.

    -

    The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1013,7 +1013,7 @@

    Return the jump in the values of the selected vector component of the finite element function characterized by fe_function at the quadrature points of the cell interface selected the last time the reinit function of the FEInterfaceValues object was called.

    -

    The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1079,7 +1079,7 @@

    Return the jump in the gradients of the selected vector components of the finite element function characterized by fe_function at the quadrature points of the cell interface selected the last time the reinit function of the FEInterfaceValues object was called.

    -

    The data type stored by the output vector must be what you get when you multiply the gradients of shape functions (i.e., gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the gradients of shape functions (i.e., gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1145,7 +1145,7 @@

    Return the jump in the Hessians of the selected vector component of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEInterfaceValues object was called.

    -

    The data type stored by the output vector must be what you get when you multiply the Hessians of shape functions (i.e., hessian_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the Hessians of shape functions (i.e., hessian_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1211,7 +1211,7 @@

    Return the jump in the third derivatives of the selected vector component of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEInterfaceValues object was called.

    -

    The data type stored by the output vector must be what you get when you multiply the third derivatives of shape functions (i.e., third_derivative_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the third derivatives of shape functions (i.e., third_derivative_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_third_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1277,7 +1277,7 @@

    Return the average of the values of the selected vector component of the finite element function characterized by fe_function at the quadrature points of the cell interface selected the last time the reinit function of the FEInterfaceValues object was called.

    -

    The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1343,7 +1343,7 @@

    Return the average of the gradients of the selected vector components of the finite element function characterized by fe_function at the quadrature points of the cell interface selected the last time the reinit function of the FEInterfaceValues object was called.

    -

    The data type stored by the output vector must be what you get when you multiply the gradients of shape functions (i.e., gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the gradients of shape functions (i.e., gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1409,7 +1409,7 @@

    Return the average of the Hessians of the selected vector component of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEInterfaceValues object was called.

    -

    The data type stored by the output vector must be what you get when you multiply the Hessians of shape functions (i.e., hessian_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the Hessians of shape functions (i.e., hessian_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFESeries_1_1Fourier.html differs (JavaScript source, ASCII text, with very long lines (1113)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFESeries_1_1Fourier.html 2023-11-25 15:25:52.310044356 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFESeries_1_1Fourier.html 2023-11-25 15:25:52.310044356 +0100 @@ -198,25 +198,25 @@

    Detailed Description

    template<int dim, int spacedim = dim>
    -class FESeries::Fourier< dim, spacedim >

    A class to calculate expansion of a scalar FE (or a single component of vector-valued FE) field into Fourier series on a reference element. The exponential form of the Fourier series is based on completeness and Hermitian orthogonality of the set of exponential functions $ \phi_{\bf k}({\bf x}) = \exp(2 \pi i\, {\bf k} \cdot {\bf x})$. For example in 1d the L2-orthogonality condition reads

    -\[
+class FESeries::Fourier< dim, spacedim ></div><p>A class to calculate expansion of a scalar FE (or a single component of vector-valued FE) field into <a class=Fourier series on a reference element. The exponential form of the Fourier series is based on completeness and Hermitian orthogonality of the set of exponential functions $ \phi_{\bf k}({\bf x}) = \exp(2 \pi i\, {\bf k} \cdot {\bf x})$. For example in 1d the L2-orthogonality condition reads

    +\[
   \int_0^1 \phi_k(x) \phi_l^\ast(x) dx=\delta_{kl}.
-\] +\]" src="form_1176.png"/>

    -

    Note that $ \phi_{\bf k} = \phi_{-\bf k}^\ast $.

    +

    Note that $ \phi_{\bf k} = \phi_{-\bf k}^\ast $.

    The arbitrary scalar FE field on the reference element can be expanded in the complete orthogonal exponential basis as

    -\[
+<picture><source srcset=\[
    u({\bf x})
    = \sum_{\bf k} c_{\bf k} \phi_{\bf k}({\bf x}).
-\] +\]" src="form_1178.png"/>

    From the orthogonality property of the basis, it follows that

    -\[
+<picture><source srcset=\[
    c_{\bf k} =
    \int_{[0,1]^d} u({\bf x}) \phi_{\bf k}^\ast ({\bf x}) d{\bf x}\,.
-\] +\]" src="form_1179.png"/>

    -

    It is this complex-valued expansion coefficients, that are calculated by this class. Note that $ u({\bf x}) = \sum_i u_i N_i({\bf x})$, where $ N_i({\bf x}) $ are real-valued FiniteElement shape functions. Consequently $ c_{\bf k} \equiv c_{-\bf k}^\ast $ and we only need to compute $ c_{\bf k} $ for positive indices $ \bf k $ .

    +

    It is this complex-valued expansion coefficients, that are calculated by this class. Note that $ u({\bf x}) = \sum_i u_i N_i({\bf x})$, where $ N_i({\bf x}) $ are real-valued FiniteElement shape functions. Consequently $ c_{\bf k} \equiv c_{-\bf k}^\ast $ and we only need to compute $ c_{\bf k} $ for positive indices $ \bf k $ .

    Definition at line 90 of file fe_series.h.

    Member Typedef Documentation

    @@ -882,7 +882,7 @@
    -

    Angular frequencies $ 2 \pi {\bf k} $ .

    +

    Angular frequencies $ 2 \pi {\bf k} $ .

    Definition at line 196 of file fe_series.h.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFESeries_1_1Legendre.html differs (JavaScript source, ASCII text, with very long lines (1015)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFESeries_1_1Legendre.html 2023-11-25 15:25:52.330043949 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFESeries_1_1Legendre.html 2023-11-25 15:25:52.326710681 +0100 @@ -195,39 +195,39 @@
    template<int dim, int spacedim = dim>
    class FESeries::Legendre< dim, spacedim >

    A class to calculate expansion of a scalar FE (or a single component of vector-valued FE) field into series of Legendre functions on a reference element.

    Legendre functions are solutions to Legendre's differential equation

    -\[
+<picture><source srcset=\[
    \frac{d}{dx}\left([1-x^2] \frac{d}{dx} P_n(x)\right) +
    n[n+1] P_n(x) = 0
-\] +\]" src="form_1185.png"/>

    and can be expressed using Rodrigues' formula

    -\[
+<picture><source srcset=\[
    P_n(x) = \frac{1}{2^n n!} \frac{d^n}{dx^n}[x^2-1]^n.
-\] +\]" src="form_1186.png"/>

    -

    These polynomials are orthogonal with respect to the $ L^2 $ inner product on the interval $ [-1;1] $

    -\[
+<p> These polynomials are orthogonal with respect to the <picture><source srcset=$ L^2 $ inner product on the interval $ [-1;1] $

    +\[
    \int_{-1}^1 P_m(x) P_n(x) = \frac{2}{2n + 1} \delta_{mn}
-\] +\]" src="form_1189.png"/>

    -

    and are complete. A family of $ L^2 $-orthogonal polynomials on $ [0;1] $ can be constructed via

    -\[
+<p> and are complete. A family of <picture><source srcset=$ L^2 $-orthogonal polynomials on $ [0;1] $ can be constructed via

    +\[
    \widetilde P_m = \sqrt{2} P_m(2x-1).
-\] +\]" src="form_1191.png"/>

    -

    An arbitrary scalar FE field on the reference element $ [0;1] $ can be expanded in the complete orthogonal basis as

    -\[
+<p>An arbitrary scalar FE field on the reference element <picture><source srcset=$ [0;1] $ can be expanded in the complete orthogonal basis as

    +\[
    u(x)
    = \sum_{m} c_m \widetilde P_{m}(x).
-\] +\]" src="form_1192.png"/>

    From the orthogonality property of the basis, it follows that

    -\[
+<picture><source srcset=\[
    c_m = \frac{2m+1}{2}
    \int_0^1 u(x) \widetilde P_m(x) dx .
-\] +\]" src="form_1193.png"/>

    -

    This class calculates coefficients $ c_{\bf k} $ using $ dim $-dimensional Legendre polynomials constructed from $ \widetilde P_m(x) $ using tensor product rule.

    +

    This class calculates coefficients $ c_{\bf k} $ using $ dim $-dimensional Legendre polynomials constructed from $ \widetilde P_m(x) $ using tensor product rule.

    Definition at line 260 of file fe_series.h.

    Member Typedef Documentation

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFESubfaceValues.html differs (JavaScript source, ASCII text, with very long lines (825)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFESubfaceValues.html 2023-11-25 15:25:52.370043135 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFESubfaceValues.html 2023-11-25 15:25:52.370043135 +0100 @@ -556,7 +556,7 @@
    -

    Constructor. This constructor is equivalent to the other one except that it makes the object use a $Q_1$ mapping (i.e., an object of type MappingQ(1)) implicitly.

    +

    Constructor. This constructor is equivalent to the other one except that it makes the object use a $Q_1$ mapping (i.e., an object of type MappingQ(1)) implicitly.

    @@ -1033,7 +1033,7 @@

    If the shape function is vector-valued, then this returns the only non- zero component. If the shape function has more than one non-zero component (i.e. it is not primitive), then throw an exception of type ExcShapeFunctionNotPrimitive. In that case, use the shape_value_component() function.

    Parameters
    - +
    iNumber of the shape function $\varphi_i$ to be evaluated. Note that this number runs from zero to dofs_per_cell, even in the case of an FEFaceValues or FESubfaceValues object.
    iNumber of the shape function $\varphi_i$ to be evaluated. Note that this number runs from zero to dofs_per_cell, even in the case of an FEFaceValues or FESubfaceValues object.
    q_pointNumber of the quadrature point at which function is to be evaluated
    @@ -1083,7 +1083,7 @@

    Compute one vector component of the value of a shape function at a quadrature point. If the finite element is scalar, then only component zero is allowed and the return value equals that of the shape_value() function. If the finite element is vector valued but all shape functions are primitive (i.e. they are non-zero in only one component), then the value returned by shape_value() equals that of this function for exactly one component. This function is therefore only of greater interest if the shape function is not primitive, but then it is necessary since the other function cannot be used.

    Parameters
    - +
    iNumber of the shape function $\varphi_i$ to be evaluated.
    iNumber of the shape function $\varphi_i$ to be evaluated.
    q_pointNumber of the quadrature point at which function is to be evaluated.
    componentvector component to be evaluated.
    @@ -1131,7 +1131,7 @@

    The same holds for the arguments of this function as for the shape_value() function.

    Parameters
    - +
    iNumber of the shape function $\varphi_i$ to be evaluated.
    iNumber of the shape function $\varphi_i$ to be evaluated.
    q_pointNumber of the quadrature point at which function is to be evaluated.
    @@ -1388,17 +1388,17 @@
    -

    Return the values of a finite element function at the quadrature points of the current cell, face, or subface (selected the last time the reinit() function was called). That is, if the first argument fe_function is a vector of nodal values of a finite element function $u_h(\mathbf x)$ defined on a DoFHandler object, then the output vector (the second argument, values) is the vector of values $u_h(\mathbf x_q^K)$ where $x_q^K$ are the quadrature points on the current cell $K$. This function is first discussed in the Results section of step-4, and the related get_function_gradients() function is also used in step-15 along with numerous other tutorial programs.

    +

    Return the values of a finite element function at the quadrature points of the current cell, face, or subface (selected the last time the reinit() function was called). That is, if the first argument fe_function is a vector of nodal values of a finite element function $u_h(\mathbf x)$ defined on a DoFHandler object, then the output vector (the second argument, values) is the vector of values $u_h(\mathbf x_q^K)$ where $x_q^K$ are the quadrature points on the current cell $K$. This function is first discussed in the Results section of step-4, and the related get_function_gradients() function is also used in step-15 along with numerous other tutorial programs.

    If the current cell is not active (i.e., it has children), then the finite element function is, strictly speaking, defined by shape functions that live on these child cells. Rather than evaluating the shape functions on the child cells, with the quadrature points defined on the current cell, this function first interpolates the finite element function to shape functions defined on the current cell, and then evaluates this interpolated function.

    This function may only be used if the finite element in use is a scalar one, i.e. has only one vector component. To get values of multi-component elements, there is another get_function_values() below, returning a vector of vectors of results.

    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]valuesThe values of the function specified by fe_function at the quadrature points of the current cell. The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the values of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the solution vector.
    [out]valuesThe values of the function specified by fe_function at the quadrature points of the current cell. The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the values of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the solution vector.
    -
    Postcondition
    values[q] will contain the value of the field described by fe_function at the $q$th quadrature point.
    +
    Postcondition
    values[q] will contain the value of the field described by fe_function at the $q$th quadrature point.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1443,7 +1443,7 @@

    This function does the same as the other get_function_values(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    values[q] is a vector of values of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by values[q] equals the number of components of the finite element, i.e. values[q](c) returns the value of the $c$th vector component at the $q$th quadrature point.
    +
    Postcondition
    values[q] is a vector of values of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by values[q] equals the number of components of the finite element, i.e. values[q](c) returns the value of the $c$th vector component at the $q$th quadrature point.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 3320 of file fe_values.cc.

    @@ -1651,16 +1651,16 @@
    -

    Return the gradients of a finite element function at the quadrature points of the current cell, face, or subface (selected the last time the reinit() function was called). That is, if the first argument fe_function is a vector of nodal values of a finite element function $u_h(\mathbf x)$ defined on a DoFHandler object, then the output vector (the second argument, values) is the vector of values $\nabla u_h(\mathbf x_q^K)$ where $x_q^K$ are the quadrature points on the current cell $K$. This function is first discussed in the Results section of step-4, and it is also used in step-15 along with numerous other tutorial programs.

    +

    Return the gradients of a finite element function at the quadrature points of the current cell, face, or subface (selected the last time the reinit() function was called). That is, if the first argument fe_function is a vector of nodal values of a finite element function $u_h(\mathbf x)$ defined on a DoFHandler object, then the output vector (the second argument, values) is the vector of values $\nabla u_h(\mathbf x_q^K)$ where $x_q^K$ are the quadrature points on the current cell $K$. This function is first discussed in the Results section of step-4, and it is also used in step-15 along with numerous other tutorial programs.

    This function may only be used if the finite element in use is a scalar one, i.e. has only one vector component. There is a corresponding function of the same name for vector-valued finite elements.

    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]gradientsThe gradients of the function specified by fe_function at the quadrature points of the current cell. The gradients are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the gradients of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    [out]gradientsThe gradients of the function specified by fe_function at the quadrature points of the current cell. The gradients are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the gradients of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    -
    Postcondition
    gradients[q] will contain the gradient of the field described by fe_function at the $q$th quadrature point. gradients[q][d] represents the derivative in coordinate direction $d$ at quadrature point $q$.
    +
    Postcondition
    gradients[q] will contain the gradient of the field described by fe_function at the $q$th quadrature point. gradients[q][d] represents the derivative in coordinate direction $d$ at quadrature point $q$.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1705,7 +1705,7 @@

    This function does the same as the other get_function_gradients(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    gradients[q] is a vector of gradients of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by gradients[q] equals the number of components of the finite element, i.e. gradients[q][c] returns the gradient of the $c$th vector component at the $q$th quadrature point. Consequently, gradients[q][c][d] is the derivative in coordinate direction $d$ of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    +
    Postcondition
    gradients[q] is a vector of gradients of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by gradients[q] equals the number of components of the finite element, i.e. gradients[q][c] returns the gradient of the $c$th vector component at the $q$th quadrature point. Consequently, gradients[q][c][d] is the derivative in coordinate direction $d$ of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 3463 of file fe_values.cc.

    @@ -1853,11 +1853,11 @@
    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]hessiansThe Hessians of the function specified by fe_function at the quadrature points of the current cell. The Hessians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Hessians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    [out]hessiansThe Hessians of the function specified by fe_function at the quadrature points of the current cell. The Hessians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Hessians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    -
    Postcondition
    hessians[q] will contain the Hessian of the field described by fe_function at the $q$th quadrature point. hessians[q][i][j] represents the $(i,j)$th component of the matrix of second derivatives at quadrature point $q$.
    +
    Postcondition
    hessians[q] will contain the Hessian of the field described by fe_function at the $q$th quadrature point. hessians[q][i][j] represents the $(i,j)$th component of the matrix of second derivatives at quadrature point $q$.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1907,7 +1907,7 @@

    This function does the same as the other get_function_hessians(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    hessians[q] is a vector of Hessians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by hessians[q] equals the number of components of the finite element, i.e. hessians[q][c] returns the Hessian of the $c$th vector component at the $q$th quadrature point. Consequently, hessians[q][c][i][j] is the $(i,j)$th component of the matrix of second derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    +
    Postcondition
    hessians[q] is a vector of Hessians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by hessians[q] equals the number of components of the finite element, i.e. hessians[q][c] returns the Hessian of the $c$th vector component at the $q$th quadrature point. Consequently, hessians[q][c][i][j] is the $(i,j)$th component of the matrix of second derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 3576 of file fe_values.cc.

    @@ -2056,11 +2056,11 @@
    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]laplaciansThe Laplacians of the function specified by fe_function at the quadrature points of the current cell. The Laplacians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Laplacians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the input vector.
    [out]laplaciansThe Laplacians of the function specified by fe_function at the quadrature points of the current cell. The Laplacians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Laplacians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the input vector.
    -
    Postcondition
    laplacians[q] will contain the Laplacian of the field described by fe_function at the $q$th quadrature point.
    +
    Postcondition
    laplacians[q] will contain the Laplacian of the field described by fe_function at the $q$th quadrature point.
    For each component of the output vector, there holds laplacians[q]=trace(hessians[q]), where hessians would be the output of the get_function_hessians() function.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    @@ -2107,7 +2107,7 @@

    This function does the same as the other get_function_laplacians(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    laplacians[q] is a vector of Laplacians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by laplacians[q] equals the number of components of the finite element, i.e. laplacians[q][c] returns the Laplacian of the $c$th vector component at the $q$th quadrature point.
    +
    Postcondition
    laplacians[q] is a vector of Laplacians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by laplacians[q] equals the number of components of the finite element, i.e. laplacians[q][c] returns the Laplacian of the $c$th vector component at the $q$th quadrature point.
    For each component of the output vector, there holds laplacians[q][c]=trace(hessians[q][c]), where hessians would be the output of the get_function_hessians() function.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2307,11 +2307,11 @@
    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]third_derivativesThe third derivatives of the function specified by fe_function at the quadrature points of the current cell. The third derivatives are computed in real space (as opposed to on the unit cell). The object is assumed to already have the correct size. The data type stored by this output vector must be what you get when you multiply the third derivatives of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    [out]third_derivativesThe third derivatives of the function specified by fe_function at the quadrature points of the current cell. The third derivatives are computed in real space (as opposed to on the unit cell). The object is assumed to already have the correct size. The data type stored by this output vector must be what you get when you multiply the third derivatives of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    -
    Postcondition
    third_derivatives[q] will contain the third derivatives of the field described by fe_function at the $q$th quadrature point. third_derivatives[q][i][j][k] represents the $(i,j,k)$th component of the 3rd order tensor of third derivatives at quadrature point $q$.
    +
    Postcondition
    third_derivatives[q] will contain the third derivatives of the field described by fe_function at the $q$th quadrature point. third_derivatives[q][i][j][k] represents the $(i,j,k)$th component of the 3rd order tensor of third derivatives at quadrature point $q$.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_3rd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2361,7 +2361,7 @@

    This function does the same as the other get_function_third_derivatives(), but applied to multi-component (vector- valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    third_derivatives[q] is a vector of third derivatives of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by third_derivatives[q] equals the number of components of the finite element, i.e. third_derivatives[q][c] returns the third derivative of the $c$th vector component at the $q$th quadrature point. Consequently, third_derivatives[q][c][i][j][k] is the $(i,j,k)$th component of the tensor of third derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    +
    Postcondition
    third_derivatives[q] is a vector of third derivatives of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by third_derivatives[q] equals the number of components of the finite element, i.e. third_derivatives[q][c] returns the third derivative of the $c$th vector component at the $q$th quadrature point. Consequently, third_derivatives[q][c][i][j][k] is the $(i,j,k)$th component of the tensor of third derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_3rd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 3826 of file fe_values.cc.

    @@ -2718,7 +2718,7 @@

    Mapped quadrature weight. If this object refers to a volume evaluation (i.e. the derived class is of type FEValues), then this is the Jacobi determinant times the weight of the q_pointth unit quadrature point.

    For surface evaluations (i.e. classes FEFaceValues or FESubfaceValues), it is the mapped surface element times the weight of the quadrature point.

    -

    You can think of the quantity returned by this function as the volume or surface element $dx, ds$ in the integral that we implement here by quadrature.

    +

    You can think of the quantity returned by this function as the volume or surface element $dx, ds$ in the integral that we implement here by quadrature.

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_JxW_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2775,7 +2775,7 @@
    -

    Return the Jacobian of the transformation at the specified quadrature point, i.e. $J_{ij}=dx_i/d\hat x_j$

    +

    Return the Jacobian of the transformation at the specified quadrature point, i.e. $J_{ij}=dx_i/d\hat x_j$

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2833,7 +2833,7 @@
    -

    Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijk}=dJ_{jk}/d\hat x_i$.

    +

    Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijk}=dJ_{jk}/d\hat x_i$.

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_grads flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2891,7 +2891,7 @@
    -

    Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, pushed forward to the real cell coordinates, i.e. $G_{ijk}=dJ_{iJ}/d\hat x_K (J_{jJ})^{-1} (J_{kK})^{-1}$.

    +

    Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, pushed forward to the real cell coordinates, i.e. $G_{ijk}=dJ_{iJ}/d\hat x_K (J_{jJ})^{-1} (J_{kK})^{-1}$.

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_pushed_forward_grads flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFESystem.html differs (JavaScript source, ASCII text, with very long lines (1148)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFESystem.html 2023-11-25 15:25:52.430041914 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFESystem.html 2023-11-25 15:25:52.430041914 +0100 @@ -499,31 +499,31 @@

    Detailed Description

    template<int dim, int spacedim = dim>
    -class FESystem< dim, spacedim >

    This class provides an interface to group several elements together into one, vector-valued element. As example, consider the Taylor-Hood element that is used for the solution of the Stokes and Navier-Stokes equations: There, the velocity (of which there are as many components as the dimension $d$ of the domain) is discretized with $Q_2$ elements and the pressure with $Q_1$ elements. Mathematically, the finite element space for the coupled problem is then often written as $V_h = Q_2^d \times Q_1$ where the exponentiation is understood to be the tensor product of spaces – i.e., in 2d, we have $V_h=Q_2\times Q_2\times Q_1$ – and tensor products lead to vectors where each component of the vector-valued function space corresponds to a scalar function in one of the $Q_2$ or $Q_1$ spaces. Using the FESystem class, this space is created using

    FESystem<dim> taylor_hood_fe (FE_Q<dim>(2)^dim, // velocity components
    +class FESystem< dim, spacedim >

    This class provides an interface to group several elements together into one, vector-valued element. As example, consider the Taylor-Hood element that is used for the solution of the Stokes and Navier-Stokes equations: There, the velocity (of which there are as many components as the dimension $d$ of the domain) is discretized with $Q_2$ elements and the pressure with $Q_1$ elements. Mathematically, the finite element space for the coupled problem is then often written as $V_h = Q_2^d \times Q_1$ where the exponentiation is understood to be the tensor product of spaces – i.e., in 2d, we have $V_h=Q_2\times Q_2\times Q_1$ – and tensor products lead to vectors where each component of the vector-valued function space corresponds to a scalar function in one of the $Q_2$ or $Q_1$ spaces. Using the FESystem class, this space is created using

    FESystem<dim> taylor_hood_fe (FE_Q<dim>(2)^dim, // velocity components
    FE_Q<dim>(1)); // pressure component
    Definition: fe_q.h:551
    -

    The creation of this element here corresponds to taking tensor-product powers of the $Q_2$ element in the first line of the list of arguments to the FESystem constructor, and then concatenation via another tensor product with the element in the second line. This kind of construction is used, for example, in the step-22 tutorial program.

    -

    Similarly, step-8 solves an elasticity equation where we need to solve for the displacement of a solid object. The displacement again has $d$ components if the domain is $d$-dimensional, and so the combined finite element is created using

    FESystem<dim> displacement_fe (FE_Q<dim>(1)^dim);
    -

    where now each (vector) component of the combined element corresponds to a $Q_1$ space.

    +

    The creation of this element here corresponds to taking tensor-product powers of the $Q_2$ element in the first line of the list of arguments to the FESystem constructor, and then concatenation via another tensor product with the element in the second line. This kind of construction is used, for example, in the step-22 tutorial program.

    +

    Similarly, step-8 solves an elasticity equation where we need to solve for the displacement of a solid object. The displacement again has $d$ components if the domain is $d$-dimensional, and so the combined finite element is created using

    FESystem<dim> displacement_fe (FE_Q<dim>(1)^dim);
    +

    where now each (vector) component of the combined element corresponds to a $Q_1$ space.

    To the outside world, FESystem objects look just like a usual finite element object, they just happen to be composed of several other finite elements that are possibly of different type. These "base elements" can themselves have multiple components and, in particular, could also be vector-valued – for example, if one of the base elements is an FESystem itself (see also below). An example is given in the documentation of namespace FETools::Compositing, when using the "tensor product" strategy.

    Vector valued elements are discussed in a number of tutorial programs, for example step-8, step-20, step-21, step-22, and in particular in the Handling vector valued problems module.

    Note
    The material presented here is also discussed in video lecture 19, video lecture 20. (All video lectures are also available here.)

    FESystem, components and blocks

    -

    An FESystem, except in the most trivial case, produces a vector-valued finite element with several components. The number of components n_components() corresponds to the dimension of the solution function in the PDE system, and correspondingly also to the number of equations your PDE system has. For example, the mixed Laplace system covered in step-20 has $d+1$ components in $d$ space dimensions: the scalar pressure and the $d$ components of the velocity vector. Similarly, the elasticity equation covered in step-8 has $d$ components in $d$ space dimensions. In general, the number of components of a FESystem element is the accumulated number of components of all base elements times their multiplicities. A bit more on components is also given in the glossary entry on components.

    +

    An FESystem, except in the most trivial case, produces a vector-valued finite element with several components. The number of components n_components() corresponds to the dimension of the solution function in the PDE system, and correspondingly also to the number of equations your PDE system has. For example, the mixed Laplace system covered in step-20 has $d+1$ components in $d$ space dimensions: the scalar pressure and the $d$ components of the velocity vector. Similarly, the elasticity equation covered in step-8 has $d$ components in $d$ space dimensions. In general, the number of components of a FESystem element is the accumulated number of components of all base elements times their multiplicities. A bit more on components is also given in the glossary entry on components.

    While the concept of components is important from the viewpoint of a partial differential equation, the finite element side looks a bit different Since not only FESystem, but also vector-valued elements like FE_RaviartThomas, have several components. The concept needed here is a block. Each block encompasses the set of degrees of freedom associated with a single base element of an FESystem, where base elements with multiplicities count multiple times. These blocks are usually addressed using the information in DoFHandler::block_info(). The number of blocks of a FESystem object is simply the sum of all multiplicities of base elements and is given by n_blocks().

    For example, the FESystem for the Taylor-Hood element for the three-dimensional Stokes problem can be built using the code

    const FE_Q<3> u(2);
    const FE_Q<3> p(1);
    FESystem<3> sys1(u,3, p,1);

    or more concisely via

    FESystem<3> sys1(FE_Q<3>(2),3,
    FE_Q<3>(1),1);
    -

    or even shorter (mimicking the mathematical notation that we are dealing with a $Q_2^3 \times Q_1$ element):

    FESystem<3> sys1(FE_Q<3>(2)^3,
    +

    or even shorter (mimicking the mathematical notation that we are dealing with a $Q_2^3 \times Q_1$ element):

    FESystem<3> sys1(FE_Q<3>(2)^3,
    FE_Q<3>(1));

    This example creates an FESystem sys1 with four components, three for the velocity components and one for the pressure, and also four blocks with the degrees of freedom of each of the velocity components and the pressure in a separate block each. The number of blocks is four since the first base element is repeated three times.

    On the other hand, a Taylor-Hood element can also be constructed using

    FESystem<3> U(u,3);
    FESystem<3> sys2(U, p);
    -

    The FESystem sys2 created here has the same four components, but the degrees of freedom are distributed into only two blocks. The first block has all velocity degrees of freedom from U, while the second block contains the pressure degrees of freedom. Note that while U itself has 3 blocks, the FESystem sys2 does not attempt to split U into its base elements but considers it a block of its own. By blocking all velocities into one system first as in sys2, we achieve the same block structure that would be generated if instead of using a $Q_2^3$ element for the velocities we had used vector-valued base elements, for instance like using a mixed discretization of Darcy's law using

    +

    The FESystem sys2 created here has the same four components, but the degrees of freedom are distributed into only two blocks. The first block has all velocity degrees of freedom from U, while the second block contains the pressure degrees of freedom. Note that while U itself has 3 blocks, the FESystem sys2 does not attempt to split U into its base elements but considers it a block of its own. By blocking all velocities into one system first as in sys2, we achieve the same block structure that would be generated if instead of using a $Q_2^3$ element for the velocities we had used vector-valued base elements, for instance like using a mixed discretization of Darcy's law using

    FE_DGQ<3> p(1);
    FESystem<3> sys3(u, p);
    @@ -4197,7 +4197,7 @@

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -4305,7 +4305,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -4464,7 +4464,7 @@
    component_maskThe mask that selects individual components of the finite element

    Return a vector of generalized support points.

    -
    Note
    The vector returned by this function is always a minimal set of unique support points. This is in contrast to the behavior of get_unit_support_points() that returns a repeated list of unit support points for an FESystem of numerous (Lagrangian) base elements. As a consequence, it is possible to have fewer generalized support points than degrees of freedom in the element. An example is the element FESystem<dim>(FE_Q<dim>(1), 2), which has two copies of the $Q_1$ element. In 2d, each copy has 4 degrees of freedom, and each copy has its support points in the four vertices of the cell. While the get_support_points() function would return a vector of size 8 in which each of the vertices is listed twice, this function strips out the duplicates and returns a vector of length 4 in which each vertex is listed only once. This is possible because the purpose of this function is to return a list of points so that it is possible to interpolate an arbitrary function onto the finite element space, and this is possible by knowing the two components of the function in question at the four vertices of the cell – it is not necessary to ask for this information twice at each vertex.
    +
    Note
    The vector returned by this function is always a minimal set of unique support points. This is in contrast to the behavior of get_unit_support_points() that returns a repeated list of unit support points for an FESystem of numerous (Lagrangian) base elements. As a consequence, it is possible to have fewer generalized support points than degrees of freedom in the element. An example is the element FESystem<dim>(FE_Q<dim>(1), 2), which has two copies of the $Q_1$ element. In 2d, each copy has 4 degrees of freedom, and each copy has its support points in the four vertices of the cell. While the get_support_points() function would return a vector of size 8 in which each of the vertices is listed twice, this function strips out the duplicates and returns a vector of length 4 in which each vertex is listed only once. This is possible because the purpose of this function is to return a list of points so that it is possible to interpolate an arbitrary function onto the finite element space, and this is possible by knowing the two components of the function in question at the four vertices of the cell – it is not necessary to ask for this information twice at each vertex.

    See the glossary entry on generalized support points for more information.

    @@ -4523,9 +4523,9 @@

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEValues.html differs (JavaScript source, ASCII text, with very long lines (858)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEValues.html 2023-11-25 15:25:52.470041100 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEValues.html 2023-11-25 15:25:52.470041100 +0100 @@ -538,7 +538,7 @@
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).
    -

    Constructor. This constructor is equivalent to the other one except that it makes the object use a $Q_1$ mapping (i.e., an object of type MappingQ(1)) implicitly.

    +

    Constructor. This constructor is equivalent to the other one except that it makes the object use a $Q_1$ mapping (i.e., an object of type MappingQ(1)) implicitly.

    @@ -776,7 +776,7 @@

    If the shape function is vector-valued, then this returns the only non- zero component. If the shape function has more than one non-zero component (i.e. it is not primitive), then throw an exception of type ExcShapeFunctionNotPrimitive. In that case, use the shape_value_component() function.

    Parameters
    - +
    iNumber of the shape function $\varphi_i$ to be evaluated. Note that this number runs from zero to dofs_per_cell, even in the case of an FEFaceValues or FESubfaceValues object.
    iNumber of the shape function $\varphi_i$ to be evaluated. Note that this number runs from zero to dofs_per_cell, even in the case of an FEFaceValues or FESubfaceValues object.
    q_pointNumber of the quadrature point at which function is to be evaluated
    @@ -826,7 +826,7 @@

    Compute one vector component of the value of a shape function at a quadrature point. If the finite element is scalar, then only component zero is allowed and the return value equals that of the shape_value() function. If the finite element is vector valued but all shape functions are primitive (i.e. they are non-zero in only one component), then the value returned by shape_value() equals that of this function for exactly one component. This function is therefore only of greater interest if the shape function is not primitive, but then it is necessary since the other function cannot be used.

    Parameters
    - +
    iNumber of the shape function $\varphi_i$ to be evaluated.
    iNumber of the shape function $\varphi_i$ to be evaluated.
    q_pointNumber of the quadrature point at which function is to be evaluated.
    componentvector component to be evaluated.
    @@ -874,7 +874,7 @@

    The same holds for the arguments of this function as for the shape_value() function.

    Parameters
    - +
    iNumber of the shape function $\varphi_i$ to be evaluated.
    iNumber of the shape function $\varphi_i$ to be evaluated.
    q_pointNumber of the quadrature point at which function is to be evaluated.
    @@ -1131,17 +1131,17 @@
    -

    Return the values of a finite element function at the quadrature points of the current cell, face, or subface (selected the last time the reinit() function was called). That is, if the first argument fe_function is a vector of nodal values of a finite element function $u_h(\mathbf x)$ defined on a DoFHandler object, then the output vector (the second argument, values) is the vector of values $u_h(\mathbf x_q^K)$ where $x_q^K$ are the quadrature points on the current cell $K$. This function is first discussed in the Results section of step-4, and the related get_function_gradients() function is also used in step-15 along with numerous other tutorial programs.

    +

    Return the values of a finite element function at the quadrature points of the current cell, face, or subface (selected the last time the reinit() function was called). That is, if the first argument fe_function is a vector of nodal values of a finite element function $u_h(\mathbf x)$ defined on a DoFHandler object, then the output vector (the second argument, values) is the vector of values $u_h(\mathbf x_q^K)$ where $x_q^K$ are the quadrature points on the current cell $K$. This function is first discussed in the Results section of step-4, and the related get_function_gradients() function is also used in step-15 along with numerous other tutorial programs.

    If the current cell is not active (i.e., it has children), then the finite element function is, strictly speaking, defined by shape functions that live on these child cells. Rather than evaluating the shape functions on the child cells, with the quadrature points defined on the current cell, this function first interpolates the finite element function to shape functions defined on the current cell, and then evaluates this interpolated function.

    This function may only be used if the finite element in use is a scalar one, i.e. has only one vector component. To get values of multi-component elements, there is another get_function_values() below, returning a vector of vectors of results.

    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]valuesThe values of the function specified by fe_function at the quadrature points of the current cell. The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the values of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the solution vector.
    [out]valuesThe values of the function specified by fe_function at the quadrature points of the current cell. The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the values of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the solution vector.
    -
    Postcondition
    values[q] will contain the value of the field described by fe_function at the $q$th quadrature point.
    +
    Postcondition
    values[q] will contain the value of the field described by fe_function at the $q$th quadrature point.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1186,7 +1186,7 @@

    This function does the same as the other get_function_values(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    values[q] is a vector of values of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by values[q] equals the number of components of the finite element, i.e. values[q](c) returns the value of the $c$th vector component at the $q$th quadrature point.
    +
    Postcondition
    values[q] is a vector of values of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by values[q] equals the number of components of the finite element, i.e. values[q](c) returns the value of the $c$th vector component at the $q$th quadrature point.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 3320 of file fe_values.cc.

    @@ -1394,16 +1394,16 @@
    -

    Return the gradients of a finite element function at the quadrature points of the current cell, face, or subface (selected the last time the reinit() function was called). That is, if the first argument fe_function is a vector of nodal values of a finite element function $u_h(\mathbf x)$ defined on a DoFHandler object, then the output vector (the second argument, values) is the vector of values $\nabla u_h(\mathbf x_q^K)$ where $x_q^K$ are the quadrature points on the current cell $K$. This function is first discussed in the Results section of step-4, and it is also used in step-15 along with numerous other tutorial programs.

    +

    Return the gradients of a finite element function at the quadrature points of the current cell, face, or subface (selected the last time the reinit() function was called). That is, if the first argument fe_function is a vector of nodal values of a finite element function $u_h(\mathbf x)$ defined on a DoFHandler object, then the output vector (the second argument, values) is the vector of values $\nabla u_h(\mathbf x_q^K)$ where $x_q^K$ are the quadrature points on the current cell $K$. This function is first discussed in the Results section of step-4, and it is also used in step-15 along with numerous other tutorial programs.

    This function may only be used if the finite element in use is a scalar one, i.e. has only one vector component. There is a corresponding function of the same name for vector-valued finite elements.

    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]gradientsThe gradients of the function specified by fe_function at the quadrature points of the current cell. The gradients are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the gradients of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    [out]gradientsThe gradients of the function specified by fe_function at the quadrature points of the current cell. The gradients are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the gradients of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    -
    Postcondition
    gradients[q] will contain the gradient of the field described by fe_function at the $q$th quadrature point. gradients[q][d] represents the derivative in coordinate direction $d$ at quadrature point $q$.
    +
    Postcondition
    gradients[q] will contain the gradient of the field described by fe_function at the $q$th quadrature point. gradients[q][d] represents the derivative in coordinate direction $d$ at quadrature point $q$.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1448,7 +1448,7 @@

    This function does the same as the other get_function_gradients(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    gradients[q] is a vector of gradients of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by gradients[q] equals the number of components of the finite element, i.e. gradients[q][c] returns the gradient of the $c$th vector component at the $q$th quadrature point. Consequently, gradients[q][c][d] is the derivative in coordinate direction $d$ of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    +
    Postcondition
    gradients[q] is a vector of gradients of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by gradients[q] equals the number of components of the finite element, i.e. gradients[q][c] returns the gradient of the $c$th vector component at the $q$th quadrature point. Consequently, gradients[q][c][d] is the derivative in coordinate direction $d$ of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 3463 of file fe_values.cc.

    @@ -1596,11 +1596,11 @@
    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]hessiansThe Hessians of the function specified by fe_function at the quadrature points of the current cell. The Hessians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Hessians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    [out]hessiansThe Hessians of the function specified by fe_function at the quadrature points of the current cell. The Hessians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Hessians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    -
    Postcondition
    hessians[q] will contain the Hessian of the field described by fe_function at the $q$th quadrature point. hessians[q][i][j] represents the $(i,j)$th component of the matrix of second derivatives at quadrature point $q$.
    +
    Postcondition
    hessians[q] will contain the Hessian of the field described by fe_function at the $q$th quadrature point. hessians[q][i][j] represents the $(i,j)$th component of the matrix of second derivatives at quadrature point $q$.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1650,7 +1650,7 @@

    This function does the same as the other get_function_hessians(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    hessians[q] is a vector of Hessians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by hessians[q] equals the number of components of the finite element, i.e. hessians[q][c] returns the Hessian of the $c$th vector component at the $q$th quadrature point. Consequently, hessians[q][c][i][j] is the $(i,j)$th component of the matrix of second derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    +
    Postcondition
    hessians[q] is a vector of Hessians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by hessians[q] equals the number of components of the finite element, i.e. hessians[q][c] returns the Hessian of the $c$th vector component at the $q$th quadrature point. Consequently, hessians[q][c][i][j] is the $(i,j)$th component of the matrix of second derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 3576 of file fe_values.cc.

    @@ -1799,11 +1799,11 @@
    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]laplaciansThe Laplacians of the function specified by fe_function at the quadrature points of the current cell. The Laplacians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Laplacians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the input vector.
    [out]laplaciansThe Laplacians of the function specified by fe_function at the quadrature points of the current cell. The Laplacians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Laplacians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the input vector.
    -
    Postcondition
    laplacians[q] will contain the Laplacian of the field described by fe_function at the $q$th quadrature point.
    +
    Postcondition
    laplacians[q] will contain the Laplacian of the field described by fe_function at the $q$th quadrature point.
    For each component of the output vector, there holds laplacians[q]=trace(hessians[q]), where hessians would be the output of the get_function_hessians() function.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    @@ -1850,7 +1850,7 @@

    This function does the same as the other get_function_laplacians(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    laplacians[q] is a vector of Laplacians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by laplacians[q] equals the number of components of the finite element, i.e. laplacians[q][c] returns the Laplacian of the $c$th vector component at the $q$th quadrature point.
    +
    Postcondition
    laplacians[q] is a vector of Laplacians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by laplacians[q] equals the number of components of the finite element, i.e. laplacians[q][c] returns the Laplacian of the $c$th vector component at the $q$th quadrature point.
    For each component of the output vector, there holds laplacians[q][c]=trace(hessians[q][c]), where hessians would be the output of the get_function_hessians() function.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2050,11 +2050,11 @@
    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]third_derivativesThe third derivatives of the function specified by fe_function at the quadrature points of the current cell. The third derivatives are computed in real space (as opposed to on the unit cell). The object is assumed to already have the correct size. The data type stored by this output vector must be what you get when you multiply the third derivatives of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    [out]third_derivativesThe third derivatives of the function specified by fe_function at the quadrature points of the current cell. The third derivatives are computed in real space (as opposed to on the unit cell). The object is assumed to already have the correct size. The data type stored by this output vector must be what you get when you multiply the third derivatives of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    -
    Postcondition
    third_derivatives[q] will contain the third derivatives of the field described by fe_function at the $q$th quadrature point. third_derivatives[q][i][j][k] represents the $(i,j,k)$th component of the 3rd order tensor of third derivatives at quadrature point $q$.
    +
    Postcondition
    third_derivatives[q] will contain the third derivatives of the field described by fe_function at the $q$th quadrature point. third_derivatives[q][i][j][k] represents the $(i,j,k)$th component of the 3rd order tensor of third derivatives at quadrature point $q$.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_3rd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2104,7 +2104,7 @@

    This function does the same as the other get_function_third_derivatives(), but applied to multi-component (vector- valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    third_derivatives[q] is a vector of third derivatives of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by third_derivatives[q] equals the number of components of the finite element, i.e. third_derivatives[q][c] returns the third derivative of the $c$th vector component at the $q$th quadrature point. Consequently, third_derivatives[q][c][i][j][k] is the $(i,j,k)$th component of the tensor of third derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    +
    Postcondition
    third_derivatives[q] is a vector of third derivatives of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by third_derivatives[q] equals the number of components of the finite element, i.e. third_derivatives[q][c] returns the third derivative of the $c$th vector component at the $q$th quadrature point. Consequently, third_derivatives[q][c][i][j][k] is the $(i,j,k)$th component of the tensor of third derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_3rd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 3826 of file fe_values.cc.

    @@ -2461,7 +2461,7 @@

    Mapped quadrature weight. If this object refers to a volume evaluation (i.e. the derived class is of type FEValues), then this is the Jacobi determinant times the weight of the q_pointth unit quadrature point.

    For surface evaluations (i.e. classes FEFaceValues or FESubfaceValues), it is the mapped surface element times the weight of the quadrature point.

    -

    You can think of the quantity returned by this function as the volume or surface element $dx, ds$ in the integral that we implement here by quadrature.

    +

    You can think of the quantity returned by this function as the volume or surface element $dx, ds$ in the integral that we implement here by quadrature.

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_JxW_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2518,7 +2518,7 @@
    -

    Return the Jacobian of the transformation at the specified quadrature point, i.e. $J_{ij}=dx_i/d\hat x_j$

    +

    Return the Jacobian of the transformation at the specified quadrature point, i.e. $J_{ij}=dx_i/d\hat x_j$

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2576,7 +2576,7 @@
    -

    Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijk}=dJ_{jk}/d\hat x_i$.

    +

    Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijk}=dJ_{jk}/d\hat x_i$.

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_grads flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2634,7 +2634,7 @@
    -

    Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, pushed forward to the real cell coordinates, i.e. $G_{ijk}=dJ_{iJ}/d\hat x_K (J_{jJ})^{-1} (J_{kK})^{-1}$.

    +

    Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, pushed forward to the real cell coordinates, i.e. $G_{ijk}=dJ_{iJ}/d\hat x_K (J_{jJ})^{-1} (J_{kK})^{-1}$.

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_pushed_forward_grads flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEValuesBase.html differs (JavaScript source, ASCII text, with very long lines (908)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEValuesBase.html 2023-11-25 15:25:52.510040284 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEValuesBase.html 2023-11-25 15:25:52.510040284 +0100 @@ -627,7 +627,7 @@

    If the shape function is vector-valued, then this returns the only non- zero component. If the shape function has more than one non-zero component (i.e. it is not primitive), then throw an exception of type ExcShapeFunctionNotPrimitive. In that case, use the shape_value_component() function.

    Parameters
    - +
    iNumber of the shape function $\varphi_i$ to be evaluated. Note that this number runs from zero to dofs_per_cell, even in the case of an FEFaceValues or FESubfaceValues object.
    iNumber of the shape function $\varphi_i$ to be evaluated. Note that this number runs from zero to dofs_per_cell, even in the case of an FEFaceValues or FESubfaceValues object.
    q_pointNumber of the quadrature point at which function is to be evaluated
    @@ -669,7 +669,7 @@

    Compute one vector component of the value of a shape function at a quadrature point. If the finite element is scalar, then only component zero is allowed and the return value equals that of the shape_value() function. If the finite element is vector valued but all shape functions are primitive (i.e. they are non-zero in only one component), then the value returned by shape_value() equals that of this function for exactly one component. This function is therefore only of greater interest if the shape function is not primitive, but then it is necessary since the other function cannot be used.

    Parameters
    - +
    iNumber of the shape function $\varphi_i$ to be evaluated.
    iNumber of the shape function $\varphi_i$ to be evaluated.
    q_pointNumber of the quadrature point at which function is to be evaluated.
    componentvector component to be evaluated.
    @@ -709,7 +709,7 @@

    The same holds for the arguments of this function as for the shape_value() function.

    Parameters
    - +
    iNumber of the shape function $\varphi_i$ to be evaluated.
    iNumber of the shape function $\varphi_i$ to be evaluated.
    q_pointNumber of the quadrature point at which function is to be evaluated.
    @@ -918,17 +918,17 @@
    -

    Return the values of a finite element function at the quadrature points of the current cell, face, or subface (selected the last time the reinit() function was called). That is, if the first argument fe_function is a vector of nodal values of a finite element function $u_h(\mathbf x)$ defined on a DoFHandler object, then the output vector (the second argument, values) is the vector of values $u_h(\mathbf x_q^K)$ where $x_q^K$ are the quadrature points on the current cell $K$. This function is first discussed in the Results section of step-4, and the related get_function_gradients() function is also used in step-15 along with numerous other tutorial programs.

    +

    Return the values of a finite element function at the quadrature points of the current cell, face, or subface (selected the last time the reinit() function was called). That is, if the first argument fe_function is a vector of nodal values of a finite element function $u_h(\mathbf x)$ defined on a DoFHandler object, then the output vector (the second argument, values) is the vector of values $u_h(\mathbf x_q^K)$ where $x_q^K$ are the quadrature points on the current cell $K$. This function is first discussed in the Results section of step-4, and the related get_function_gradients() function is also used in step-15 along with numerous other tutorial programs.

    If the current cell is not active (i.e., it has children), then the finite element function is, strictly speaking, defined by shape functions that live on these child cells. Rather than evaluating the shape functions on the child cells, with the quadrature points defined on the current cell, this function first interpolates the finite element function to shape functions defined on the current cell, and then evaluates this interpolated function.

    This function may only be used if the finite element in use is a scalar one, i.e. has only one vector component. To get values of multi-component elements, there is another get_function_values() below, returning a vector of vectors of results.

    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]valuesThe values of the function specified by fe_function at the quadrature points of the current cell. The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the values of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the solution vector.
    [out]valuesThe values of the function specified by fe_function at the quadrature points of the current cell. The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the values of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the solution vector.
    -
    Postcondition
    values[q] will contain the value of the field described by fe_function at the $q$th quadrature point.
    +
    Postcondition
    values[q] will contain the value of the field described by fe_function at the $q$th quadrature point.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -965,7 +965,7 @@

    This function does the same as the other get_function_values(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    values[q] is a vector of values of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by values[q] equals the number of components of the finite element, i.e. values[q](c) returns the value of the $c$th vector component at the $q$th quadrature point.
    +
    Postcondition
    values[q] is a vector of values of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by values[q] equals the number of components of the finite element, i.e. values[q](c) returns the value of the $c$th vector component at the $q$th quadrature point.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 3320 of file fe_values.cc.

    @@ -1141,16 +1141,16 @@
    -

    Return the gradients of a finite element function at the quadrature points of the current cell, face, or subface (selected the last time the reinit() function was called). That is, if the first argument fe_function is a vector of nodal values of a finite element function $u_h(\mathbf x)$ defined on a DoFHandler object, then the output vector (the second argument, values) is the vector of values $\nabla u_h(\mathbf x_q^K)$ where $x_q^K$ are the quadrature points on the current cell $K$. This function is first discussed in the Results section of step-4, and it is also used in step-15 along with numerous other tutorial programs.

    +

    Return the gradients of a finite element function at the quadrature points of the current cell, face, or subface (selected the last time the reinit() function was called). That is, if the first argument fe_function is a vector of nodal values of a finite element function $u_h(\mathbf x)$ defined on a DoFHandler object, then the output vector (the second argument, values) is the vector of values $\nabla u_h(\mathbf x_q^K)$ where $x_q^K$ are the quadrature points on the current cell $K$. This function is first discussed in the Results section of step-4, and it is also used in step-15 along with numerous other tutorial programs.

    This function may only be used if the finite element in use is a scalar one, i.e. has only one vector component. There is a corresponding function of the same name for vector-valued finite elements.

    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]gradientsThe gradients of the function specified by fe_function at the quadrature points of the current cell. The gradients are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the gradients of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    [out]gradientsThe gradients of the function specified by fe_function at the quadrature points of the current cell. The gradients are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the gradients of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    -
    Postcondition
    gradients[q] will contain the gradient of the field described by fe_function at the $q$th quadrature point. gradients[q][d] represents the derivative in coordinate direction $d$ at quadrature point $q$.
    +
    Postcondition
    gradients[q] will contain the gradient of the field described by fe_function at the $q$th quadrature point. gradients[q][d] represents the derivative in coordinate direction $d$ at quadrature point $q$.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1187,7 +1187,7 @@

    This function does the same as the other get_function_gradients(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    gradients[q] is a vector of gradients of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by gradients[q] equals the number of components of the finite element, i.e. gradients[q][c] returns the gradient of the $c$th vector component at the $q$th quadrature point. Consequently, gradients[q][c][d] is the derivative in coordinate direction $d$ of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    +
    Postcondition
    gradients[q] is a vector of gradients of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by gradients[q] equals the number of components of the finite element, i.e. gradients[q][c] returns the gradient of the $c$th vector component at the $q$th quadrature point. Consequently, gradients[q][c][d] is the derivative in coordinate direction $d$ of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 3463 of file fe_values.cc.

    @@ -1311,11 +1311,11 @@
    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]hessiansThe Hessians of the function specified by fe_function at the quadrature points of the current cell. The Hessians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Hessians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    [out]hessiansThe Hessians of the function specified by fe_function at the quadrature points of the current cell. The Hessians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Hessians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    -
    Postcondition
    hessians[q] will contain the Hessian of the field described by fe_function at the $q$th quadrature point. hessians[q][i][j] represents the $(i,j)$th component of the matrix of second derivatives at quadrature point $q$.
    +
    Postcondition
    hessians[q] will contain the Hessian of the field described by fe_function at the $q$th quadrature point. hessians[q][i][j] represents the $(i,j)$th component of the matrix of second derivatives at quadrature point $q$.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1357,7 +1357,7 @@

    This function does the same as the other get_function_hessians(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    hessians[q] is a vector of Hessians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by hessians[q] equals the number of components of the finite element, i.e. hessians[q][c] returns the Hessian of the $c$th vector component at the $q$th quadrature point. Consequently, hessians[q][c][i][j] is the $(i,j)$th component of the matrix of second derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    +
    Postcondition
    hessians[q] is a vector of Hessians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by hessians[q] equals the number of components of the finite element, i.e. hessians[q][c] returns the Hessian of the $c$th vector component at the $q$th quadrature point. Consequently, hessians[q][c][i][j] is the $(i,j)$th component of the matrix of second derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 3576 of file fe_values.cc.

    @@ -1482,11 +1482,11 @@
    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]laplaciansThe Laplacians of the function specified by fe_function at the quadrature points of the current cell. The Laplacians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Laplacians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the input vector.
    [out]laplaciansThe Laplacians of the function specified by fe_function at the quadrature points of the current cell. The Laplacians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Laplacians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the input vector.
    -
    Postcondition
    laplacians[q] will contain the Laplacian of the field described by fe_function at the $q$th quadrature point.
    +
    Postcondition
    laplacians[q] will contain the Laplacian of the field described by fe_function at the $q$th quadrature point.
    For each component of the output vector, there holds laplacians[q]=trace(hessians[q]), where hessians would be the output of the get_function_hessians() function.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    @@ -1525,7 +1525,7 @@

    This function does the same as the other get_function_laplacians(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    laplacians[q] is a vector of Laplacians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by laplacians[q] equals the number of components of the finite element, i.e. laplacians[q][c] returns the Laplacian of the $c$th vector component at the $q$th quadrature point.
    +
    Postcondition
    laplacians[q] is a vector of Laplacians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by laplacians[q] equals the number of components of the finite element, i.e. laplacians[q][c] returns the Laplacian of the $c$th vector component at the $q$th quadrature point.
    For each component of the output vector, there holds laplacians[q][c]=trace(hessians[q][c]), where hessians would be the output of the get_function_hessians() function.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1693,11 +1693,11 @@
    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]third_derivativesThe third derivatives of the function specified by fe_function at the quadrature points of the current cell. The third derivatives are computed in real space (as opposed to on the unit cell). The object is assumed to already have the correct size. The data type stored by this output vector must be what you get when you multiply the third derivatives of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    [out]third_derivativesThe third derivatives of the function specified by fe_function at the quadrature points of the current cell. The third derivatives are computed in real space (as opposed to on the unit cell). The object is assumed to already have the correct size. The data type stored by this output vector must be what you get when you multiply the third derivatives of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    -
    Postcondition
    third_derivatives[q] will contain the third derivatives of the field described by fe_function at the $q$th quadrature point. third_derivatives[q][i][j][k] represents the $(i,j,k)$th component of the 3rd order tensor of third derivatives at quadrature point $q$.
    +
    Postcondition
    third_derivatives[q] will contain the third derivatives of the field described by fe_function at the $q$th quadrature point. third_derivatives[q][i][j][k] represents the $(i,j,k)$th component of the 3rd order tensor of third derivatives at quadrature point $q$.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_3rd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1739,7 +1739,7 @@

    This function does the same as the other get_function_third_derivatives(), but applied to multi-component (vector- valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    third_derivatives[q] is a vector of third derivatives of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by third_derivatives[q] equals the number of components of the finite element, i.e. third_derivatives[q][c] returns the third derivative of the $c$th vector component at the $q$th quadrature point. Consequently, third_derivatives[q][c][i][j][k] is the $(i,j,k)$th component of the tensor of third derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    +
    Postcondition
    third_derivatives[q] is a vector of third derivatives of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by third_derivatives[q] equals the number of components of the finite element, i.e. third_derivatives[q][c] returns the third derivative of the $c$th vector component at the $q$th quadrature point. Consequently, third_derivatives[q][c][i][j][k] is the $(i,j,k)$th component of the tensor of third derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_3rd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 3826 of file fe_values.cc.

    @@ -2022,7 +2022,7 @@

    Mapped quadrature weight. If this object refers to a volume evaluation (i.e. the derived class is of type FEValues), then this is the Jacobi determinant times the weight of the q_pointth unit quadrature point.

    For surface evaluations (i.e. classes FEFaceValues or FESubfaceValues), it is the mapped surface element times the weight of the quadrature point.

    -

    You can think of the quantity returned by this function as the volume or surface element $dx, ds$ in the integral that we implement here by quadrature.

    +

    You can think of the quantity returned by this function as the volume or surface element $dx, ds$ in the integral that we implement here by quadrature.

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_JxW_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2063,7 +2063,7 @@
    -

    Return the Jacobian of the transformation at the specified quadrature point, i.e. $J_{ij}=dx_i/d\hat x_j$

    +

    Return the Jacobian of the transformation at the specified quadrature point, i.e. $J_{ij}=dx_i/d\hat x_j$

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2105,7 +2105,7 @@
    -

    Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijk}=dJ_{jk}/d\hat x_i$.

    +

    Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijk}=dJ_{jk}/d\hat x_i$.

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_grads flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2147,7 +2147,7 @@
    -

    Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, pushed forward to the real cell coordinates, i.e. $G_{ijk}=dJ_{iJ}/d\hat x_K (J_{jJ})^{-1} (J_{kK})^{-1}$.

    +

    Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, pushed forward to the real cell coordinates, i.e. $G_{ijk}=dJ_{iJ}/d\hat x_K (J_{jJ})^{-1} (J_{kK})^{-1}$.

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_pushed_forward_grads flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2189,7 +2189,7 @@
    -

    Return the third derivative of the transformation from unit to real cell, i.e. the second derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijkl}=\frac{d^2J_{ij}}{d\hat x_k d\hat x_l}$.

    +

    Return the third derivative of the transformation from unit to real cell, i.e. the second derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijkl}=\frac{d^2J_{ij}}{d\hat x_k d\hat x_l}$.

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_2nd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEValuesViews_1_1Scalar.html differs (JavaScript source, ASCII text, with very long lines (902)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEValuesViews_1_1Scalar.html 2023-11-25 15:25:52.533373145 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEValuesViews_1_1Scalar.html 2023-11-25 15:25:52.533373145 +0100 @@ -734,7 +734,7 @@

    Return the values of the selected scalar component of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

    This function is the equivalent of the FEValuesBase::get_function_values function but it only works on the selected scalar component.

    -

    The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 1545 of file fe_values.cc.

    @@ -819,7 +819,7 @@

    Return the gradients of the selected scalar component of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

    This function is the equivalent of the FEValuesBase::get_function_gradients function but it only works on the selected scalar component.

    -

    The data type stored by the output vector must be what you get when you multiply the gradients of shape functions (i.e., gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the gradients of shape functions (i.e., gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 1600 of file fe_values.cc.

    @@ -890,7 +890,7 @@

    Return the Hessians of the selected scalar component of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

    This function is the equivalent of the FEValuesBase::get_function_hessians function but it only works on the selected scalar component.

    -

    The data type stored by the output vector must be what you get when you multiply the Hessians of shape functions (i.e., hessian_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the Hessians of shape functions (i.e., hessian_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 1654 of file fe_values.cc.

    @@ -961,7 +961,7 @@

    Return the Laplacians of the selected scalar component of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called. The Laplacians are the trace of the Hessians.

    This function is the equivalent of the FEValuesBase::get_function_laplacians function but it only works on the selected scalar component.

    -

    The data type stored by the output vector must be what you get when you multiply the Laplacians of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the Laplacians of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 1708 of file fe_values.cc.

    @@ -1032,7 +1032,7 @@

    Return the third derivatives of the selected scalar component of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

    This function is the equivalent of the FEValuesBase::get_function_third_derivatives function but it only works on the selected scalar component.

    -

    The data type stored by the output vector must be what you get when you multiply the third derivatives of shape functions (i.e., third_derivative_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the third derivatives of shape functions (i.e., third_derivative_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_third_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 1762 of file fe_values.cc.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEValuesViews_1_1SymmetricTensor_3_012_00_01dim_00_01spacedim_01_4.html differs (JavaScript source, ASCII text, with very long lines (902)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEValuesViews_1_1SymmetricTensor_3_012_00_01dim_00_01spacedim_01_4.html 2023-11-25 15:25:52.553372738 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEValuesViews_1_1SymmetricTensor_3_012_00_01dim_00_01spacedim_01_4.html 2023-11-25 15:25:52.553372738 +0100 @@ -154,9 +154,9 @@

    Detailed Description

    template<int dim, int spacedim>
    class FEValuesViews::SymmetricTensor< 2, dim, spacedim >

    A class representing a view to a set of (dim*dim + dim)/2 components forming a symmetric second-order tensor from a vector-valued finite element. Views are discussed in the Handling vector valued problems module.

    -

    This class allows to query the value and divergence of (components of) shape functions and solutions representing symmetric tensors. The divergence of a symmetric tensor $S_{ij}, 0\le i,j<\text{dim}$ is defined as $d_i = \sum_j \frac{\partial S_{ij}}{\partial x_j}, 0\le
-i<\text{dim}$, which due to the symmetry of the tensor is also $d_i =
-\sum_j \frac{\partial S_{ji}}{\partial x_j}$. In other words, it due to the symmetry of $S$ it does not matter whether we apply the nabla operator by row or by column to get the divergence.

    +

    This class allows to query the value and divergence of (components of) shape functions and solutions representing symmetric tensors. The divergence of a symmetric tensor $S_{ij}, 0\le i,j<\text{dim}$ is defined as $d_i = \sum_j \frac{\partial S_{ij}}{\partial x_j}, 0\le
+i<\text{dim}$, which due to the symmetry of the tensor is also $d_i =
+\sum_j \frac{\partial S_{ji}}{\partial x_j}$. In other words, it due to the symmetry of $S$ it does not matter whether we apply the nabla operator by row or by column to get the divergence.

    You get an object of this type if you apply a FEValuesExtractors::SymmetricTensor to an FEValues, FEFaceValues or FESubfaceValues object.

    Definition at line 1477 of file fe_values.h.

    @@ -507,7 +507,7 @@

    Return the values of the selected vector components of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

    This function is the equivalent of the FEValuesBase::get_function_values function but it only works on the selected vector components.

    -

    The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 2263 of file fe_values.cc.

    @@ -593,7 +593,7 @@

    Return the divergence of the selected vector components of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

    There is no equivalent function such as FEValuesBase::get_function_divergences in the FEValues classes but the information can be obtained from FEValuesBase::get_function_gradients, of course.

    See the general discussion of this class for a definition of the divergence.

    -

    The data type stored by the output vector must be what you get when you multiply the divergences of shape functions (i.e., divergence_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the divergences of shape functions (i.e., divergence_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 2317 of file fe_values.cc.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEValuesViews_1_1Tensor_3_012_00_01dim_00_01spacedim_01_4.html differs (JavaScript source, ASCII text, with very long lines (902)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEValuesViews_1_1Tensor_3_012_00_01dim_00_01spacedim_01_4.html 2023-11-25 15:25:52.570039063 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEValuesViews_1_1Tensor_3_012_00_01dim_00_01spacedim_01_4.html 2023-11-25 15:25:52.570039063 +0100 @@ -167,8 +167,8 @@

    Detailed Description

    template<int dim, int spacedim>
    class FEValuesViews::Tensor< 2, dim, spacedim >

    A class representing a view to a set of dim*dim components forming a second-order tensor from a vector-valued finite element. Views are discussed in the Handling vector valued problems module.

    -

    This class allows to query the value, gradient and divergence of (components of) shape functions and solutions representing tensors. The divergence of a tensor $T_{ij},\, 0\le i,j<\text{dim}$ is defined as $d_i =
-\sum_j \frac{\partial T_{ij}}{\partial x_j}, \, 0\le i<\text{dim}$, whereas its gradient is $G_{ijk} = \frac{\partial T_{ij}}{\partial x_k}$.

    +

    This class allows to query the value, gradient and divergence of (components of) shape functions and solutions representing tensors. The divergence of a tensor $T_{ij},\, 0\le i,j<\text{dim}$ is defined as $d_i =
+\sum_j \frac{\partial T_{ij}}{\partial x_j}, \, 0\le i<\text{dim}$, whereas its gradient is $G_{ijk} = \frac{\partial T_{ij}}{\partial x_k}$.

    You get an object of this type if you apply a FEValuesExtractors::Tensor to an FEValues, FEFaceValues or FESubfaceValues object.

    Definition at line 1815 of file fe_values.h.

    @@ -619,7 +619,7 @@

    Return the values of the selected vector components of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

    This function is the equivalent of the FEValuesBase::get_function_values function but it only works on the selected vector components.

    -

    The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 2373 of file fe_values.cc.

    @@ -705,7 +705,7 @@

    Return the divergence of the selected vector components of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

    There is no equivalent function such as FEValuesBase::get_function_divergences in the FEValues classes but the information can be obtained from FEValuesBase::get_function_gradients, of course.

    See the general discussion of this class for a definition of the divergence.

    -

    The data type stored by the output vector must be what you get when you multiply the divergences of shape functions (i.e., divergence_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the divergences of shape functions (i.e., divergence_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 2427 of file fe_values.cc.

    @@ -776,7 +776,7 @@

    Return the gradient of the selected vector components of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

    See the general discussion of this class for a definition of the gradient.

    -

    The data type stored by the output vector must be what you get when you multiply the gradients of shape functions (i.e., gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the gradients of shape functions (i.e., gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 2482 of file fe_values.cc.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEValuesViews_1_1Vector.html differs (JavaScript source, ASCII text, with very long lines (857)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEValuesViews_1_1Vector.html 2023-11-25 15:25:52.593371923 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFEValuesViews_1_1Vector.html 2023-11-25 15:25:52.593371923 +0100 @@ -228,8 +228,8 @@
    template<int dim, int spacedim = dim>
    class FEValuesViews::Vector< dim, spacedim >

    A class representing a view to a set of spacedim components forming a vector part of a vector-valued finite element. Views are discussed in the Handling vector valued problems module.

    Note that in the current context, a vector is meant in the sense physics uses it: it has spacedim components that behave in specific ways under coordinate system transformations. Examples include velocity or displacement fields. This is opposed to how mathematics uses the word "vector" (and how we use this word in other contexts in the library, for example in the Vector class), where it really stands for a collection of numbers. An example of this latter use of the word could be the set of concentrations of chemical species in a flame; however, these are really just a collection of scalar variables, since they do not change if the coordinate system is rotated, unlike the components of a velocity vector, and consequently, this class should not be used for this context.

    -

    This class allows to query the value, gradient and divergence of (components of) shape functions and solutions representing vectors. The gradient of a vector $d_{k}, 0\le k<\text{dim}$ is defined as $S_{ij} =
-\frac{\partial d_{i}}{\partial x_j}, 0\le i,j<\text{dim}$.

    +

    This class allows to query the value, gradient and divergence of (components of) shape functions and solutions representing vectors. The gradient of a vector $d_{k}, 0\le k<\text{dim}$ is defined as $S_{ij} =
+\frac{\partial d_{i}}{\partial x_j}, 0\le i,j<\text{dim}$.

    You get an object of this type if you apply a FEValuesExtractors::Vector to an FEValues, FEFaceValues or FESubfaceValues object.

    Definition at line 675 of file fe_values.h.

    @@ -287,8 +287,8 @@

    An alias for the type of symmetrized gradients of the view this class represents. Here, for a set of dim components of the finite element, the symmetrized gradient is a SymmetricTensor<2,spacedim>.

    -

    The symmetric gradient of a vector field $\mathbf v$ is defined as $\varepsilon(\mathbf v)=\frac 12 (\nabla \mathbf v + \nabla \mathbf
-v^T)$.

    +

    The symmetric gradient of a vector field $\mathbf v$ is defined as $\varepsilon(\mathbf v)=\frac 12 (\nabla \mathbf v + \nabla \mathbf
+v^T)$.

    Definition at line 705 of file fe_values.h.

    @@ -827,8 +827,8 @@

    Return the symmetric gradient (a symmetric tensor of rank 2) of the vector component selected by this view, for the shape function and quadrature point selected by the arguments.

    -

    The symmetric gradient is defined as $\frac 12 [(\nabla \phi_i(x_q)) +
-(\nabla \phi_i(x_q))^T]$, where $\phi_i$ represents the dim components selected from the FEValuesBase object, and $x_q$ is the location of the $q$-th quadrature point.

    +

    The symmetric gradient is defined as $\frac 12 [(\nabla \phi_i(x_q)) +
+(\nabla \phi_i(x_q))^T]$, where $\phi_i$ represents the dim components selected from the FEValuesBase object, and $x_q$ is the location of the $q$-th quadrature point.

    Note
    The meaning of the arguments is as documented for the value() function.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -893,16 +893,16 @@

    Return the vector curl of the vector components selected by this view, for the shape function and quadrature point selected by the arguments. For 1d this function does not make any sense. Thus it is not implemented for spacedim=1. In 2d the curl is defined as

    -\begin{equation*}
+<picture><source srcset=\begin{equation*}
 \operatorname{curl}(u) \dealcoloneq \frac{du_2}{dx} -\frac{du_1}{dy},
-\end{equation*} +\end{equation*}" src="form_1247.png"/>

    whereas in 3d it is given by

    -\begin{equation*}
+<picture><source srcset=\begin{equation*}
 \operatorname{curl}(u) \dealcoloneq \left( \begin{array}{c}
 \frac{du_3}{dy}-\frac{du_2}{dz}\\ \frac{du_1}{dz}-\frac{du_3}{dx}\\
 \frac{du_2}{dx}-\frac{du_1}{dy} \end{array} \right).
-\end{equation*} +\end{equation*}" src="form_1248.png"/>

    Note
    The meaning of the arguments is as documented for the value() function.
    @@ -1004,7 +1004,7 @@

    Return the values of the selected vector components of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

    This function is the equivalent of the FEValuesBase::get_function_values function but it only works on the selected vector components.

    -

    The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 1818 of file fe_values.cc.

    @@ -1089,7 +1089,7 @@

    Return the gradients of the selected vector components of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

    This function is the equivalent of the FEValuesBase::get_function_gradients function but it only works on the selected vector components.

    -

    The data type stored by the output vector must be what you get when you multiply the gradients of shape functions (i.e., gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the gradients of shape functions (i.e., gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 1872 of file fe_values.cc.

    @@ -1159,10 +1159,10 @@

    Return the symmetrized gradients of the selected vector components of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

    -

    The symmetric gradient of a vector field $\mathbf v$ is defined as $\varepsilon(\mathbf v)=\frac 12 (\nabla \mathbf v + \nabla \mathbf
-v^T)$.

    +

    The symmetric gradient of a vector field $\mathbf v$ is defined as $\varepsilon(\mathbf v)=\frac 12 (\nabla \mathbf v + \nabla \mathbf
+v^T)$.

    Note
    There is no equivalent function such as FEValuesBase::get_function_symmetric_gradients in the FEValues classes but the information can be obtained from FEValuesBase::get_function_gradients, of course.
    -

    The data type stored by the output vector must be what you get when you multiply the symmetric gradients of shape functions (i.e., symmetric_gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the symmetric gradients of shape functions (i.e., symmetric_gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 1926 of file fe_values.cc.

    @@ -1233,7 +1233,7 @@

    Return the divergence of the selected vector components of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

    There is no equivalent function such as FEValuesBase::get_function_divergences in the FEValues classes but the information can be obtained from FEValuesBase::get_function_gradients, of course.

    -

    The data type stored by the output vector must be what you get when you multiply the divergences of shape functions (i.e., divergence_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the divergences of shape functions (i.e., divergence_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 1982 of file fe_values.cc.

    @@ -1304,7 +1304,7 @@

    Return the curl of the selected vector components of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

    There is no equivalent function such as FEValuesBase::get_function_curls in the FEValues classes but the information can be obtained from FEValuesBase::get_function_gradients, of course.

    -

    The data type stored by the output vector must be what you get when you multiply the curls of shape functions (i.e., curl_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the curls of shape functions (i.e., curl_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 2037 of file fe_values.cc.

    @@ -1375,7 +1375,7 @@

    Return the Hessians of the selected vector components of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

    This function is the equivalent of the FEValuesBase::get_function_hessians function but it only works on the selected vector components.

    -

    The data type stored by the output vector must be what you get when you multiply the Hessians of shape functions (i.e., hessian_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the Hessians of shape functions (i.e., hessian_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 2091 of file fe_values.cc.

    @@ -1446,7 +1446,7 @@

    Return the Laplacians of the selected vector components of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called. The Laplacians are the trace of the Hessians.

    This function is the equivalent of the FEValuesBase::get_function_laplacians function but it only works on the selected vector components.

    -

    The data type stored by the output vector must be what you get when you multiply the Laplacians of shape functions (i.e., laplacian_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the Laplacians of shape functions (i.e., laplacian_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 2145 of file fe_values.cc.

    @@ -1517,7 +1517,7 @@

    Return the third derivatives of the selected scalar component of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

    This function is the equivalent of the FEValuesBase::get_function_third_derivatives function but it only works on the selected scalar component.

    -

    The data type stored by the output vector must be what you get when you multiply the third derivatives of shape functions (i.e., third_derivative_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the third derivatives of shape functions (i.e., third_derivative_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_third_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 2207 of file fe_values.cc.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__ABF.html differs (JavaScript source, ASCII text, with very long lines (752)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__ABF.html 2023-11-25 15:25:52.650037437 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__ABF.html 2023-11-25 15:25:52.650037437 +0100 @@ -763,11 +763,11 @@
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    @@ -3037,7 +3037,7 @@
    Returns
    The index of this degree of freedom within the set of degrees of freedom on the entire cell. The returned value will be between zero and dofs_per_cell.
    -
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.
    +
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.

    Reimplemented in FE_Q_Base< dim, spacedim >, FE_Q_Base< dim, dim >, FE_Q_Base< dim, spacedim >, FESystem< dim, spacedim >, FESystem< dim, dim >, and FESystem< dim, spacedim >.

    @@ -3758,7 +3758,7 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3866,7 +3866,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -4126,7 +4126,7 @@
    component_maskThe mask that selects individual components of the finite element

    Return a vector of generalized support points.

    -
    Note
    The vector returned by this function is always a minimal set of unique support points. This is in contrast to the behavior of get_unit_support_points() that returns a repeated list of unit support points for an FESystem of numerous (Lagrangian) base elements. As a consequence, it is possible to have fewer generalized support points than degrees of freedom in the element. An example is the element FESystem<dim>(FE_Q<dim>(1), 2), which has two copies of the $Q_1$ element. In 2d, each copy has 4 degrees of freedom, and each copy has its support points in the four vertices of the cell. While the get_support_points() function would return a vector of size 8 in which each of the vertices is listed twice, this function strips out the duplicates and returns a vector of length 4 in which each vertex is listed only once. This is possible because the purpose of this function is to return a list of points so that it is possible to interpolate an arbitrary function onto the finite element space, and this is possible by knowing the two components of the function in question at the four vertices of the cell – it is not necessary to ask for this information twice at each vertex.
    +
    Note
    The vector returned by this function is always a minimal set of unique support points. This is in contrast to the behavior of get_unit_support_points() that returns a repeated list of unit support points for an FESystem of numerous (Lagrangian) base elements. As a consequence, it is possible to have fewer generalized support points than degrees of freedom in the element. An example is the element FESystem<dim>(FE_Q<dim>(1), 2), which has two copies of the $Q_1$ element. In 2d, each copy has 4 degrees of freedom, and each copy has its support points in the four vertices of the cell. While the get_support_points() function would return a vector of size 8 in which each of the vertices is listed twice, this function strips out the duplicates and returns a vector of length 4 in which each vertex is listed only once. This is possible because the purpose of this function is to return a list of points so that it is possible to interpolate an arbitrary function onto the finite element space, and this is possible by knowing the two components of the function in question at the four vertices of the cell – it is not necessary to ask for this information twice at each vertex.

    See the glossary entry on generalized support points for more information.

    @@ -4185,9 +4185,9 @@

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__BDM.html differs (JavaScript source, ASCII text, with very long lines (752)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__BDM.html 2023-11-25 15:25:52.706702951 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__BDM.html 2023-11-25 15:25:52.706702951 +0100 @@ -730,11 +730,11 @@
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    @@ -2980,7 +2980,7 @@
    Returns
    The index of this degree of freedom within the set of degrees of freedom on the entire cell. The returned value will be between zero and dofs_per_cell.
    -
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.
    +
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.

    Reimplemented in FE_Q_Base< dim, spacedim >, FE_Q_Base< dim, dim >, FE_Q_Base< dim, spacedim >, FESystem< dim, spacedim >, FESystem< dim, dim >, and FESystem< dim, spacedim >.

    @@ -3701,7 +3701,7 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3809,7 +3809,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -4069,7 +4069,7 @@
    component_maskThe mask that selects individual components of the finite element

    Return a vector of generalized support points.

    -
    Note
    The vector returned by this function is always a minimal set of unique support points. This is in contrast to the behavior of get_unit_support_points() that returns a repeated list of unit support points for an FESystem of numerous (Lagrangian) base elements. As a consequence, it is possible to have fewer generalized support points than degrees of freedom in the element. An example is the element FESystem<dim>(FE_Q<dim>(1), 2), which has two copies of the $Q_1$ element. In 2d, each copy has 4 degrees of freedom, and each copy has its support points in the four vertices of the cell. While the get_support_points() function would return a vector of size 8 in which each of the vertices is listed twice, this function strips out the duplicates and returns a vector of length 4 in which each vertex is listed only once. This is possible because the purpose of this function is to return a list of points so that it is possible to interpolate an arbitrary function onto the finite element space, and this is possible by knowing the two components of the function in question at the four vertices of the cell – it is not necessary to ask for this information twice at each vertex.
    +
    Note
    The vector returned by this function is always a minimal set of unique support points. This is in contrast to the behavior of get_unit_support_points() that returns a repeated list of unit support points for an FESystem of numerous (Lagrangian) base elements. As a consequence, it is possible to have fewer generalized support points than degrees of freedom in the element. An example is the element FESystem<dim>(FE_Q<dim>(1), 2), which has two copies of the $Q_1$ element. In 2d, each copy has 4 degrees of freedom, and each copy has its support points in the four vertices of the cell. While the get_support_points() function would return a vector of size 8 in which each of the vertices is listed twice, this function strips out the duplicates and returns a vector of length 4 in which each vertex is listed only once. This is possible because the purpose of this function is to return a list of points so that it is possible to interpolate an arbitrary function onto the finite element space, and this is possible by knowing the two components of the function in question at the four vertices of the cell – it is not necessary to ask for this information twice at each vertex.

    See the glossary entry on generalized support points for more information.

    @@ -4128,9 +4128,9 @@

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__BernardiRaugel.html differs (JavaScript source, ASCII text, with very long lines (752)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__BernardiRaugel.html 2023-11-25 15:25:52.766701730 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__BernardiRaugel.html 2023-11-25 15:25:52.766701730 +0100 @@ -489,8 +489,8 @@

    Detailed Description

    template<int dim>
    class FE_BernardiRaugel< dim >

    The Bernardi-Raugel element.

    -

    This class implements the non-standard Bernardi-Raugel (BR) element that can be used as one part of a stable velocity/pressure pair for the Stokes equation. The BR element can be seen as either an enriched version of the $Q_1^d$ element with added bubble functions on each edge (in 2d) or face (in 3d), or as a reduced version of the $Q_2^d$ element. It addresses the fact that the $Q_1^d\times
-Q_0$ combination is not inf-sup stable (requiring a larger velocity space), and that the $Q_2^d\times Q_1$ combination is stable but sub-optimal since the velocity space is too large relative to the pressure space to provide additional accuracy commensurate with the cost of the large number of velocity unknowns.

    +

    This class implements the non-standard Bernardi-Raugel (BR) element that can be used as one part of a stable velocity/pressure pair for the Stokes equation. The BR element can be seen as either an enriched version of the $Q_1^d$ element with added bubble functions on each edge (in 2d) or face (in 3d), or as a reduced version of the $Q_2^d$ element. It addresses the fact that the $Q_1^d\times
+Q_0$ combination is not inf-sup stable (requiring a larger velocity space), and that the $Q_2^d\times Q_1$ combination is stable but sub-optimal since the velocity space is too large relative to the pressure space to provide additional accuracy commensurate with the cost of the large number of velocity unknowns.

    The element was introduced in the following paper:

    @article{BR85,
    author = {Christine Bernardi and Genevi{\`e}ve Raugel},
    title = {Analysis of some finite elements for the {S}tokes problem},
    @@ -505,7 +505,7 @@
    }

    Degrees of freedom

    The BR1 element has dim degrees of freedom on each vertex and 1 on each face. The shape functions are ordered by the $(Q_1)^d$ shape functions supported on each vertex, increasing according to vertex ordering on the element in GeometryInfo, then the bubble functions follow in the ordering given in PolynomialsBernardiRaugel.

    -

    This element only has 1 degree (degree $p=1$) because it yields an LBB stable pair BR1-P0 for Stokes problems which is lower degree than the Taylor-Hood element. The pair is sometimes referred to as an enriched P1-P0 element or a reduced P2-P0 element.

    +

    This element only has 1 degree (degree $p=1$) because it yields an LBB stable pair BR1-P0 for Stokes problems which is lower degree than the Taylor-Hood element. The pair is sometimes referred to as an enriched P1-P0 element or a reduced P2-P0 element.

    This element does not support hanging nodes or multigrid in the current implementation.

    Some numerical experiments have shown that this element may converge with first-order accuracy when using the BR1-Q0 pair for the mixed Laplace equation in step-20.

    @@ -637,7 +637,7 @@

    Constructor for the Bernardi-Raugel element of degree p. The only supported degree is 1.

      -
    • p: The degree of the element $p=1$ for $BR_1$.
    • +
    • p: The degree of the element $p=1$ for $BR_1$.

    Definition at line 44 of file fe_bernardi_raugel.cc.

    @@ -741,11 +741,11 @@
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    @@ -2955,7 +2955,7 @@
    Returns
    The index of this degree of freedom within the set of degrees of freedom on the entire cell. The returned value will be between zero and dofs_per_cell.
    -
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.
    +
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.

    Reimplemented in FE_Q_Base< dim, spacedim >, FE_Q_Base< dim, dim >, FE_Q_Base< dim, spacedim >, FESystem< dim, spacedim >, FESystem< dim, dim >, and FESystem< dim, spacedim >.

    @@ -3676,7 +3676,7 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3784,7 +3784,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -4044,7 +4044,7 @@
    component_maskThe mask that selects individual components of the finite element

    Return a vector of generalized support points.

    -
    Note
    The vector returned by this function is always a minimal set of unique support points. This is in contrast to the behavior of get_unit_support_points() that returns a repeated list of unit support points for an FESystem of numerous (Lagrangian) base elements. As a consequence, it is possible to have fewer generalized support points than degrees of freedom in the element. An example is the element FESystem<dim>(FE_Q<dim>(1), 2), which has two copies of the $Q_1$ element. In 2d, each copy has 4 degrees of freedom, and each copy has its support points in the four vertices of the cell. While the get_support_points() function would return a vector of size 8 in which each of the vertices is listed twice, this function strips out the duplicates and returns a vector of length 4 in which each vertex is listed only once. This is possible because the purpose of this function is to return a list of points so that it is possible to interpolate an arbitrary function onto the finite element space, and this is possible by knowing the two components of the function in question at the four vertices of the cell – it is not necessary to ask for this information twice at each vertex.
    +
    Note
    The vector returned by this function is always a minimal set of unique support points. This is in contrast to the behavior of get_unit_support_points() that returns a repeated list of unit support points for an FESystem of numerous (Lagrangian) base elements. As a consequence, it is possible to have fewer generalized support points than degrees of freedom in the element. An example is the element FESystem<dim>(FE_Q<dim>(1), 2), which has two copies of the $Q_1$ element. In 2d, each copy has 4 degrees of freedom, and each copy has its support points in the four vertices of the cell. While the get_support_points() function would return a vector of size 8 in which each of the vertices is listed twice, this function strips out the duplicates and returns a vector of length 4 in which each vertex is listed only once. This is possible because the purpose of this function is to return a list of points so that it is possible to interpolate an arbitrary function onto the finite element space, and this is possible by knowing the two components of the function in question at the four vertices of the cell – it is not necessary to ask for this information twice at each vertex.

    See the glossary entry on generalized support points for more information.

    @@ -4103,9 +4103,9 @@

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__Bernstein.html differs (JavaScript source, ASCII text, with very long lines (759)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__Bernstein.html 2023-11-25 15:25:52.820033975 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__Bernstein.html 2023-11-25 15:25:52.823367243 +0100 @@ -2633,17 +2633,17 @@

    Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

    Before the correction, the Hessians would be given by

    -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1140.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1142.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    @@ -2687,21 +2687,21 @@

    Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

    Before the correction, the third derivatives would be given by

    -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1144.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1145.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    @@ -3903,7 +3903,7 @@
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -4011,7 +4011,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -4240,7 +4240,7 @@
    component_maskThe mask that selects individual components of the finite element

    Return a vector of generalized support points.

    -
    Note
    The vector returned by this function is always a minimal set of unique support points. This is in contrast to the behavior of get_unit_support_points() that returns a repeated list of unit support points for an FESystem of numerous (Lagrangian) base elements. As a consequence, it is possible to have fewer generalized support points than degrees of freedom in the element. An example is the element FESystem<dim>(FE_Q<dim>(1), 2), which has two copies of the $Q_1$ element. In 2d, each copy has 4 degrees of freedom, and each copy has its support points in the four vertices of the cell. While the get_support_points() function would return a vector of size 8 in which each of the vertices is listed twice, this function strips out the duplicates and returns a vector of length 4 in which each vertex is listed only once. This is possible because the purpose of this function is to return a list of points so that it is possible to interpolate an arbitrary function onto the finite element space, and this is possible by knowing the two components of the function in question at the four vertices of the cell – it is not necessary to ask for this information twice at each vertex.
    +
    Note
    The vector returned by this function is always a minimal set of unique support points. This is in contrast to the behavior of get_unit_support_points() that returns a repeated list of unit support points for an FESystem of numerous (Lagrangian) base elements. As a consequence, it is possible to have fewer generalized support points than degrees of freedom in the element. An example is the element FESystem<dim>(FE_Q<dim>(1), 2), which has two copies of the $Q_1$ element. In 2d, each copy has 4 degrees of freedom, and each copy has its support points in the four vertices of the cell. While the get_support_points() function would return a vector of size 8 in which each of the vertices is listed twice, this function strips out the duplicates and returns a vector of length 4 in which each vertex is listed only once. This is possible because the purpose of this function is to return a list of points so that it is possible to interpolate an arbitrary function onto the finite element space, and this is possible by knowing the two components of the function in question at the four vertices of the cell – it is not necessary to ask for this information twice at each vertex.

    See the glossary entry on generalized support points for more information.

    @@ -4299,9 +4299,9 @@

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    @@ -4346,11 +4346,11 @@
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__DGBDM.html differs (JavaScript source, ASCII text, with very long lines (752)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__DGBDM.html 2023-11-25 15:25:52.880032754 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__DGBDM.html 2023-11-25 15:25:52.880032754 +0100 @@ -2851,7 +2851,7 @@
    Returns
    The index of this degree of freedom within the set of degrees of freedom on the entire cell. The returned value will be between zero and dofs_per_cell.
    -
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.
    +
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.

    Reimplemented in FE_Q_Base< dim, spacedim >, FE_Q_Base< dim, dim >, FE_Q_Base< dim, spacedim >, FESystem< dim, spacedim >, FESystem< dim, dim >, and FESystem< dim, spacedim >.

    @@ -3572,7 +3572,7 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3680,7 +3680,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3940,7 +3940,7 @@
    component_maskThe mask that selects individual components of the finite element

    Return a vector of generalized support points.

    -
    Note
    The vector returned by this function is always a minimal set of unique support points. This is in contrast to the behavior of get_unit_support_points() that returns a repeated list of unit support points for an FESystem of numerous (Lagrangian) base elements. As a consequence, it is possible to have fewer generalized support points than degrees of freedom in the element. An example is the element FESystem<dim>(FE_Q<dim>(1), 2), which has two copies of the $Q_1$ element. In 2d, each copy has 4 degrees of freedom, and each copy has its support points in the four vertices of the cell. While the get_support_points() function would return a vector of size 8 in which each of the vertices is listed twice, this function strips out the duplicates and returns a vector of length 4 in which each vertex is listed only once. This is possible because the purpose of this function is to return a list of points so that it is possible to interpolate an arbitrary function onto the finite element space, and this is possible by knowing the two components of the function in question at the four vertices of the cell – it is not necessary to ask for this information twice at each vertex.
    +
    Note
    The vector returned by this function is always a minimal set of unique support points. This is in contrast to the behavior of get_unit_support_points() that returns a repeated list of unit support points for an FESystem of numerous (Lagrangian) base elements. As a consequence, it is possible to have fewer generalized support points than degrees of freedom in the element. An example is the element FESystem<dim>(FE_Q<dim>(1), 2), which has two copies of the $Q_1$ element. In 2d, each copy has 4 degrees of freedom, and each copy has its support points in the four vertices of the cell. While the get_support_points() function would return a vector of size 8 in which each of the vertices is listed twice, this function strips out the duplicates and returns a vector of length 4 in which each vertex is listed only once. This is possible because the purpose of this function is to return a list of points so that it is possible to interpolate an arbitrary function onto the finite element space, and this is possible by knowing the two components of the function in question at the four vertices of the cell – it is not necessary to ask for this information twice at each vertex.

    See the glossary entry on generalized support points for more information.

    @@ -3999,9 +3999,9 @@

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    @@ -4046,11 +4046,11 @@
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__DGNedelec.html differs (JavaScript source, ASCII text, with very long lines (752)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__DGNedelec.html 2023-11-25 15:25:52.936698268 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__DGNedelec.html 2023-11-25 15:25:52.936698268 +0100 @@ -2851,7 +2851,7 @@
    Returns
    The index of this degree of freedom within the set of degrees of freedom on the entire cell. The returned value will be between zero and dofs_per_cell.
    -
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.
    +
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.

    Reimplemented in FE_Q_Base< dim, spacedim >, FE_Q_Base< dim, dim >, FE_Q_Base< dim, spacedim >, FESystem< dim, spacedim >, FESystem< dim, dim >, and FESystem< dim, spacedim >.

    @@ -3572,7 +3572,7 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3680,7 +3680,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3940,7 +3940,7 @@
    component_maskThe mask that selects individual components of the finite element

    Return a vector of generalized support points.

    -
    Note
    The vector returned by this function is always a minimal set of unique support points. This is in contrast to the behavior of get_unit_support_points() that returns a repeated list of unit support points for an FESystem of numerous (Lagrangian) base elements. As a consequence, it is possible to have fewer generalized support points than degrees of freedom in the element. An example is the element FESystem<dim>(FE_Q<dim>(1), 2), which has two copies of the $Q_1$ element. In 2d, each copy has 4 degrees of freedom, and each copy has its support points in the four vertices of the cell. While the get_support_points() function would return a vector of size 8 in which each of the vertices is listed twice, this function strips out the duplicates and returns a vector of length 4 in which each vertex is listed only once. This is possible because the purpose of this function is to return a list of points so that it is possible to interpolate an arbitrary function onto the finite element space, and this is possible by knowing the two components of the function in question at the four vertices of the cell – it is not necessary to ask for this information twice at each vertex.
    +
    Note
    The vector returned by this function is always a minimal set of unique support points. This is in contrast to the behavior of get_unit_support_points() that returns a repeated list of unit support points for an FESystem of numerous (Lagrangian) base elements. As a consequence, it is possible to have fewer generalized support points than degrees of freedom in the element. An example is the element FESystem<dim>(FE_Q<dim>(1), 2), which has two copies of the $Q_1$ element. In 2d, each copy has 4 degrees of freedom, and each copy has its support points in the four vertices of the cell. While the get_support_points() function would return a vector of size 8 in which each of the vertices is listed twice, this function strips out the duplicates and returns a vector of length 4 in which each vertex is listed only once. This is possible because the purpose of this function is to return a list of points so that it is possible to interpolate an arbitrary function onto the finite element space, and this is possible by knowing the two components of the function in question at the four vertices of the cell – it is not necessary to ask for this information twice at each vertex.

    See the glossary entry on generalized support points for more information.

    @@ -3999,9 +3999,9 @@

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    @@ -4046,11 +4046,11 @@
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__DGP.html differs (JavaScript source, ASCII text, with very long lines (752)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__DGP.html 2023-11-25 15:25:52.993363783 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__DGP.html 2023-11-25 15:25:52.993363783 +0100 @@ -481,14 +481,14 @@
    This class is only partially implemented for the codimension one case (spacedim != dim ), since no passage of information between meshes of different refinement level is possible because the embedding and projection matrices are not computed in the class constructor.

    Transformation properties

    -

    It is worth noting that under a (bi-, tri-)linear mapping, the space described by this element does not contain $P(k)$, even if we use a basis of polynomials of degree $k$. Consequently, for example, on meshes with non-affine cells, a linear function can not be exactly represented by elements of type FE_DGP(1) or FE_DGPMonomial(1).

    +

    It is worth noting that under a (bi-, tri-)linear mapping, the space described by this element does not contain $P(k)$, even if we use a basis of polynomials of degree $k$. Consequently, for example, on meshes with non-affine cells, a linear function can not be exactly represented by elements of type FE_DGP(1) or FE_DGPMonomial(1).

    This can be understood by the following 2-d example: consider the cell with vertices at $(0,0),(1,0),(0,1),(s,s)$:

    -

    For this cell, a bilinear transformation $F$ produces the relations $x=\hat
-x+\hat x\hat y$ and $y=\hat y+\hat x\hat y$ that correlate reference coordinates $\hat x,\hat y$ and coordinates in real space $x,y$. Under this mapping, the constant function is clearly mapped onto itself, but the two other shape functions of the $P_1$ space, namely $\phi_1(\hat x,\hat
+<p>For this cell, a bilinear transformation <picture><source srcset=$F$ produces the relations $x=\hat
+x+\hat x\hat y$ and $y=\hat y+\hat x\hat y$ that correlate reference coordinates $\hat x,\hat y$ and coordinates in real space $x,y$. Under this mapping, the constant function is clearly mapped onto itself, but the two other shape functions of the $P_1$ space, namely $\phi_1(\hat x,\hat
 y)=\hat x$ and $\phi_2(\hat x,\hat y)=\hat y$ are mapped onto $\phi_1(x,y)=\frac{x-t}{t(s-1)},\phi_2(x,y)=t$ where $t=\frac{y}{s-x+sx+y-sy}$.

    -

    For the simple case that $s=1$, i.e. if the real cell is the unit square, the expressions can be simplified to $t=y$ and $\phi_1(x,y)=x,\phi_2(x,y)=y$. However, for all other cases, the functions $\phi_1(x,y),\phi_2(x,y)$ are not linear any more, and neither is any linear combination of them. Consequently, the linear functions are not within the range of the mapped $P_1$ polynomials.

    +

    For the simple case that $s=1$, i.e. if the real cell is the unit square, the expressions can be simplified to $t=y$ and $\phi_1(x,y)=x,\phi_2(x,y)=y$. However, for all other cases, the functions $\phi_1(x,y),\phi_2(x,y)$ are not linear any more, and neither is any linear combination of them. Consequently, the linear functions are not within the range of the mapped $P_1$ polynomials.

    Visualization of shape functions

    In 2d, the shape functions of this element look as follows.

    $P_0$ element

    @@ -504,7 +504,7 @@

    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.
    -

    $P_1$ element

    +

    $P_1$ element

    - @@ -528,7 +528,7 @@

    -
    @@ -516,9 +516,9 @@

    $P_1$ element, shape function 0

    +

    $P_1$ element, shape function 0

    -

    $P_1$ element, shape function 1

    +

    $P_1$ element, shape function 1

    $P_1$ element, shape function 2

    +

    $P_1$ element, shape function 2

    @@ -2573,17 +2573,17 @@

    Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

    Before the correction, the Hessians would be given by

    -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1140.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1142.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    @@ -2627,21 +2627,21 @@

    Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

    Before the correction, the third derivatives would be given by

    -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1144.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1145.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    @@ -3301,7 +3301,7 @@
    Returns
    The index of this degree of freedom within the set of degrees of freedom on the entire cell. The returned value will be between zero and dofs_per_cell.
    -
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.
    +
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.

    Reimplemented in FE_Q_Base< dim, spacedim >, FE_Q_Base< dim, dim >, FE_Q_Base< dim, spacedim >, FESystem< dim, spacedim >, FESystem< dim, dim >, and FESystem< dim, spacedim >.

    @@ -4022,7 +4022,7 @@

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -4130,7 +4130,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -4359,7 +4359,7 @@
    component_maskThe mask that selects individual components of the finite element

    Return a vector of generalized support points.

    -
    Note
    The vector returned by this function is always a minimal set of unique support points. This is in contrast to the behavior of get_unit_support_points() that returns a repeated list of unit support points for an FESystem of numerous (Lagrangian) base elements. As a consequence, it is possible to have fewer generalized support points than degrees of freedom in the element. An example is the element FESystem<dim>(FE_Q<dim>(1), 2), which has two copies of the $Q_1$ element. In 2d, each copy has 4 degrees of freedom, and each copy has its support points in the four vertices of the cell. While the get_support_points() function would return a vector of size 8 in which each of the vertices is listed twice, this function strips out the duplicates and returns a vector of length 4 in which each vertex is listed only once. This is possible because the purpose of this function is to return a list of points so that it is possible to interpolate an arbitrary function onto the finite element space, and this is possible by knowing the two components of the function in question at the four vertices of the cell – it is not necessary to ask for this information twice at each vertex.
    +
    Note
    The vector returned by this function is always a minimal set of unique support points. This is in contrast to the behavior of get_unit_support_points() that returns a repeated list of unit support points for an FESystem of numerous (Lagrangian) base elements. As a consequence, it is possible to have fewer generalized support points than degrees of freedom in the element. An example is the element FESystem<dim>(FE_Q<dim>(1), 2), which has two copies of the $Q_1$ element. In 2d, each copy has 4 degrees of freedom, and each copy has its support points in the four vertices of the cell. While the get_support_points() function would return a vector of size 8 in which each of the vertices is listed twice, this function strips out the duplicates and returns a vector of length 4 in which each vertex is listed only once. This is possible because the purpose of this function is to return a list of points so that it is possible to interpolate an arbitrary function onto the finite element space, and this is possible by knowing the two components of the function in question at the four vertices of the cell – it is not necessary to ask for this information twice at each vertex.

    See the glossary entry on generalized support points for more information.

    @@ -4418,9 +4418,9 @@

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    @@ -4465,11 +4465,11 @@
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__DGPMonomial.html differs (JavaScript source, ASCII text, with very long lines (752)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__DGPMonomial.html 2023-11-25 15:25:53.056695828 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__DGPMonomial.html 2023-11-25 15:25:53.056695828 +0100 @@ -491,14 +491,14 @@

    The basis functions for this element are chosen to be the monomials listed above. Note that this is the main difference to the FE_DGP class that uses a set of polynomials of complete degree p that form a Legendre basis on the unit square. Thus, there, the mass matrix is diagonal, if the grid cells are parallelograms. The basis here does not have this property; however, it is simpler to compute. On the other hand, this element has the additional disadvantage that the local cell matrices usually have a worse condition number than the ones originating from the FE_DGP element.

    This class is not implemented for the codimension one case (spacedim != dim).

    Transformation properties

    -

    It is worth noting that under a (bi-, tri-)linear mapping, the space described by this element does not contain $P(k)$, even if we use a basis of polynomials of degree $k$. Consequently, for example, on meshes with non-affine cells, a linear function can not be exactly represented by elements of type FE_DGP(1) or FE_DGPMonomial(1).

    +

    It is worth noting that under a (bi-, tri-)linear mapping, the space described by this element does not contain $P(k)$, even if we use a basis of polynomials of degree $k$. Consequently, for example, on meshes with non-affine cells, a linear function can not be exactly represented by elements of type FE_DGP(1) or FE_DGPMonomial(1).

    This can be understood by the following 2-d example: consider the cell with vertices at $(0,0),(1,0),(0,1),(s,s)$:

    -

    For this cell, a bilinear transformation $F$ produces the relations $x=\hat
-x+\hat x\hat y$ and $y=\hat y+\hat x\hat y$ that correlate reference coordinates $\hat x,\hat y$ and coordinates in real space $x,y$. Under this mapping, the constant function is clearly mapped onto itself, but the two other shape functions of the $P_1$ space, namely $\phi_1(\hat x,\hat
+<p>For this cell, a bilinear transformation <picture><source srcset=$F$ produces the relations $x=\hat
+x+\hat x\hat y$ and $y=\hat y+\hat x\hat y$ that correlate reference coordinates $\hat x,\hat y$ and coordinates in real space $x,y$. Under this mapping, the constant function is clearly mapped onto itself, but the two other shape functions of the $P_1$ space, namely $\phi_1(\hat x,\hat
 y)=\hat x$ and $\phi_2(\hat x,\hat y)=\hat y$ are mapped onto $\phi_1(x,y)=\frac{x-t}{t(s-1)},\phi_2(x,y)=t$ where $t=\frac{y}{s-x+sx+y-sy}$.

    -

    For the simple case that $s=1$, i.e. if the real cell is the unit square, the expressions can be simplified to $t=y$ and $\phi_1(x,y)=x,\phi_2(x,y)=y$. However, for all other cases, the functions $\phi_1(x,y),\phi_2(x,y)$ are not linear any more, and neither is any linear combination of them. Consequently, the linear functions are not within the range of the mapped $P_1$ polynomials.

    +

    For the simple case that $s=1$, i.e. if the real cell is the unit square, the expressions can be simplified to $t=y$ and $\phi_1(x,y)=x,\phi_2(x,y)=y$. However, for all other cases, the functions $\phi_1(x,y),\phi_2(x,y)$ are not linear any more, and neither is any linear combination of them. Consequently, the linear functions are not within the range of the mapped $P_1$ polynomials.

    Visualization of shape functions

    In 2d, the shape functions of this element look as follows.

    $P_0$ element

    @@ -514,7 +514,7 @@

    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.
    -

    $P_1$ element

    +

    $P_1$ element

    - @@ -538,7 +538,7 @@

    -
    @@ -526,9 +526,9 @@

    $P_1$ element, shape function 0

    +

    $P_1$ element, shape function 0

    -

    $P_1$ element, shape function 1

    +

    $P_1$ element, shape function 1

    $P_1$ element, shape function 2

    +

    $P_1$ element, shape function 2

    @@ -2727,17 +2727,17 @@

    Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

    Before the correction, the Hessians would be given by

    -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1140.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1142.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    @@ -2781,21 +2781,21 @@

    Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

    Before the correction, the third derivatives would be given by

    -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1144.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1145.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    @@ -3689,7 +3689,7 @@
    Returns
    The index of this degree of freedom within the set of degrees of freedom on the entire cell. The returned value will be between zero and dofs_per_cell.
    -
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.
    +
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.

    Reimplemented in FE_Q_Base< dim, spacedim >, FE_Q_Base< dim, dim >, FE_Q_Base< dim, spacedim >, FESystem< dim, spacedim >, FESystem< dim, dim >, and FESystem< dim, spacedim >.

    @@ -4410,7 +4410,7 @@

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -4518,7 +4518,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -4778,7 +4778,7 @@
    component_maskThe mask that selects individual components of the finite element

    Return a vector of generalized support points.

    -
    Note
    The vector returned by this function is always a minimal set of unique support points. This is in contrast to the behavior of get_unit_support_points() that returns a repeated list of unit support points for an FESystem of numerous (Lagrangian) base elements. As a consequence, it is possible to have fewer generalized support points than degrees of freedom in the element. An example is the element FESystem<dim>(FE_Q<dim>(1), 2), which has two copies of the $Q_1$ element. In 2d, each copy has 4 degrees of freedom, and each copy has its support points in the four vertices of the cell. While the get_support_points() function would return a vector of size 8 in which each of the vertices is listed twice, this function strips out the duplicates and returns a vector of length 4 in which each vertex is listed only once. This is possible because the purpose of this function is to return a list of points so that it is possible to interpolate an arbitrary function onto the finite element space, and this is possible by knowing the two components of the function in question at the four vertices of the cell – it is not necessary to ask for this information twice at each vertex.
    +
    Note
    The vector returned by this function is always a minimal set of unique support points. This is in contrast to the behavior of get_unit_support_points() that returns a repeated list of unit support points for an FESystem of numerous (Lagrangian) base elements. As a consequence, it is possible to have fewer generalized support points than degrees of freedom in the element. An example is the element FESystem<dim>(FE_Q<dim>(1), 2), which has two copies of the $Q_1$ element. In 2d, each copy has 4 degrees of freedom, and each copy has its support points in the four vertices of the cell. While the get_support_points() function would return a vector of size 8 in which each of the vertices is listed twice, this function strips out the duplicates and returns a vector of length 4 in which each vertex is listed only once. This is possible because the purpose of this function is to return a list of points so that it is possible to interpolate an arbitrary function onto the finite element space, and this is possible by knowing the two components of the function in question at the four vertices of the cell – it is not necessary to ask for this information twice at each vertex.

    See the glossary entry on generalized support points for more information.

    @@ -4837,9 +4837,9 @@

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    @@ -4884,11 +4884,11 @@
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__DGPNonparametric.html differs (JavaScript source, ASCII text, with very long lines (752)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__DGPNonparametric.html 2023-11-25 15:25:53.113361342 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__DGPNonparametric.html 2023-11-25 15:25:53.113361342 +0100 @@ -475,7 +475,7 @@
    template<int dim, int spacedim = dim>
    class FE_DGPNonparametric< dim, spacedim >

    Discontinuous finite elements evaluated at the mapped quadrature points.

    Warning: this class does not work properly, yet. Don't use it!

    -

    This finite element implements complete polynomial spaces, that is, $d$-dimensional polynomials of order $k$.

    +

    This finite element implements complete polynomial spaces, that is, $d$-dimensional polynomials of order $k$.

    The polynomials are not mapped. Therefore, they are constant, linear, quadratic, etc. on any grid cell.

    Since the polynomials are evaluated at the quadrature points of the actual grid cell, no grid transfer and interpolation matrices are available.

    The purpose of this class is experimental, therefore the implementation will remain incomplete.

    @@ -495,7 +495,7 @@

    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.
    -

    $P_1$ element

    +

    $P_1$ element

    - @@ -519,7 +519,7 @@

    -
    @@ -507,9 +507,9 @@

    $P_1$ element, shape function 0

    +

    $P_1$ element, shape function 0

    -

    $P_1$ element, shape function 1

    +

    $P_1$ element, shape function 1

    $P_1$ element, shape function 2

    +

    $P_1$ element, shape function 2

    @@ -2753,7 +2753,7 @@
    Returns
    The index of this degree of freedom within the set of degrees of freedom on the entire cell. The returned value will be between zero and dofs_per_cell.
    -
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.
    +
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.

    Reimplemented in FE_Q_Base< dim, spacedim >, FE_Q_Base< dim, dim >, FE_Q_Base< dim, spacedim >, FESystem< dim, spacedim >, FESystem< dim, dim >, and FESystem< dim, spacedim >.

    @@ -3474,7 +3474,7 @@

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3582,7 +3582,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3842,7 +3842,7 @@
    component_maskThe mask that selects individual components of the finite element

    Return a vector of generalized support points.

    -
    Note
    The vector returned by this function is always a minimal set of unique support points. This is in contrast to the behavior of get_unit_support_points() that returns a repeated list of unit support points for an FESystem of numerous (Lagrangian) base elements. As a consequence, it is possible to have fewer generalized support points than degrees of freedom in the element. An example is the element FESystem<dim>(FE_Q<dim>(1), 2), which has two copies of the $Q_1$ element. In 2d, each copy has 4 degrees of freedom, and each copy has its support points in the four vertices of the cell. While the get_support_points() function would return a vector of size 8 in which each of the vertices is listed twice, this function strips out the duplicates and returns a vector of length 4 in which each vertex is listed only once. This is possible because the purpose of this function is to return a list of points so that it is possible to interpolate an arbitrary function onto the finite element space, and this is possible by knowing the two components of the function in question at the four vertices of the cell – it is not necessary to ask for this information twice at each vertex.
    +
    Note
    The vector returned by this function is always a minimal set of unique support points. This is in contrast to the behavior of get_unit_support_points() that returns a repeated list of unit support points for an FESystem of numerous (Lagrangian) base elements. As a consequence, it is possible to have fewer generalized support points than degrees of freedom in the element. An example is the element FESystem<dim>(FE_Q<dim>(1), 2), which has two copies of the $Q_1$ element. In 2d, each copy has 4 degrees of freedom, and each copy has its support points in the four vertices of the cell. While the get_support_points() function would return a vector of size 8 in which each of the vertices is listed twice, this function strips out the duplicates and returns a vector of length 4 in which each vertex is listed only once. This is possible because the purpose of this function is to return a list of points so that it is possible to interpolate an arbitrary function onto the finite element space, and this is possible by knowing the two components of the function in question at the four vertices of the cell – it is not necessary to ask for this information twice at each vertex.

    See the glossary entry on generalized support points for more information.

    @@ -3901,9 +3901,9 @@

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    @@ -3948,11 +3948,11 @@
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__DGQ.html differs (JavaScript source, ASCII text, with very long lines (752)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__DGQ.html 2023-11-25 15:25:53.170026853 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__DGQ.html 2023-11-25 15:25:53.170026853 +0100 @@ -505,7 +505,7 @@ *

    with node 13 being placed in the interior of the hex.

    Note, however, that these are just the Lagrange interpolation points of the shape functions. Even though they may physically be on the boundary of the cell, they are logically in the interior since there are no continuity requirements for these shape functions across cell boundaries. While discontinuous, when restricted to a single cell the shape functions of this element are exactly the same as those of the FE_Q element where they are shown visually.

    Unit support point distribution and conditioning of interpolation

    -

    When constructing an FE_DGQ element at polynomial degrees one or two, equidistant support points at 0 and 1 (linear case) or 0, 0.5, and 1 (quadratic case) are used. The unit support or nodal points xi are those points where the jth Lagrange polynomial satisfies the $\delta_{ij}$ property, i.e., where one polynomial is one and all the others are zero. For higher polynomial degrees, the support points are non-equidistant by default, and chosen to be the support points of the (degree+1)-order Gauss-Lobatto quadrature rule. This point distribution yields well-conditioned Lagrange interpolation at arbitrary polynomial degrees. By contrast, polynomials based on equidistant points get increasingly ill-conditioned as the polynomial degree increases. In interpolation, this effect is known as the Runge phenomenon. For Galerkin methods, the Runge phenomenon is typically not visible in the solution quality but rather in the condition number of the associated system matrices. For example, the elemental mass matrix of equidistant points at degree 10 has condition number 2.6e6, whereas the condition number for Gauss-Lobatto points is around 400.

    +

    When constructing an FE_DGQ element at polynomial degrees one or two, equidistant support points at 0 and 1 (linear case) or 0, 0.5, and 1 (quadratic case) are used. The unit support or nodal points xi are those points where the jth Lagrange polynomial satisfies the $\delta_{ij}$ property, i.e., where one polynomial is one and all the others are zero. For higher polynomial degrees, the support points are non-equidistant by default, and chosen to be the support points of the (degree+1)-order Gauss-Lobatto quadrature rule. This point distribution yields well-conditioned Lagrange interpolation at arbitrary polynomial degrees. By contrast, polynomials based on equidistant points get increasingly ill-conditioned as the polynomial degree increases. In interpolation, this effect is known as the Runge phenomenon. For Galerkin methods, the Runge phenomenon is typically not visible in the solution quality but rather in the condition number of the associated system matrices. For example, the elemental mass matrix of equidistant points at degree 10 has condition number 2.6e6, whereas the condition number for Gauss-Lobatto points is around 400.

    The Gauss-Lobatto points in 1d include the end points 0 and +1 of the unit interval. The interior points are shifted towards the end points, which gives a denser point distribution close to the element boundary.

    Definition at line 112 of file fe_dgq.h.

    @@ -2547,17 +2547,17 @@

    Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

    Before the correction, the Hessians would be given by

    -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1140.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1142.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    @@ -2601,21 +2601,21 @@

    Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

    Before the correction, the third derivatives would be given by

    -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1144.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1145.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    @@ -3168,7 +3168,7 @@
    Returns
    The index of this degree of freedom within the set of degrees of freedom on the entire cell. The returned value will be between zero and dofs_per_cell.
    -
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.
    +
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.

    Reimplemented in FE_Q_Base< dim, spacedim >, FE_Q_Base< dim, dim >, FE_Q_Base< dim, spacedim >, FESystem< dim, spacedim >, FESystem< dim, dim >, and FESystem< dim, spacedim >.

    @@ -3889,7 +3889,7 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3997,7 +3997,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -4226,7 +4226,7 @@
    component_maskThe mask that selects individual components of the finite element

    Return a vector of generalized support points.

    -
    Note
    The vector returned by this function is always a minimal set of unique support points. This is in contrast to the behavior of get_unit_support_points() that returns a repeated list of unit support points for an FESystem of numerous (Lagrangian) base elements. As a consequence, it is possible to have fewer generalized support points than degrees of freedom in the element. An example is the element FESystem<dim>(FE_Q<dim>(1), 2), which has two copies of the $Q_1$ element. In 2d, each copy has 4 degrees of freedom, and each copy has its support points in the four vertices of the cell. While the get_support_points() function would return a vector of size 8 in which each of the vertices is listed twice, this function strips out the duplicates and returns a vector of length 4 in which each vertex is listed only once. This is possible because the purpose of this function is to return a list of points so that it is possible to interpolate an arbitrary function onto the finite element space, and this is possible by knowing the two components of the function in question at the four vertices of the cell – it is not necessary to ask for this information twice at each vertex.
    +
    Note
    The vector returned by this function is always a minimal set of unique support points. This is in contrast to the behavior of get_unit_support_points() that returns a repeated list of unit support points for an FESystem of numerous (Lagrangian) base elements. As a consequence, it is possible to have fewer generalized support points than degrees of freedom in the element. An example is the element FESystem<dim>(FE_Q<dim>(1), 2), which has two copies of the $Q_1$ element. In 2d, each copy has 4 degrees of freedom, and each copy has its support points in the four vertices of the cell. While the get_support_points() function would return a vector of size 8 in which each of the vertices is listed twice, this function strips out the duplicates and returns a vector of length 4 in which each vertex is listed only once. This is possible because the purpose of this function is to return a list of points so that it is possible to interpolate an arbitrary function onto the finite element space, and this is possible by knowing the two components of the function in question at the four vertices of the cell – it is not necessary to ask for this information twice at each vertex.

    See the glossary entry on generalized support points for more information.

    @@ -4285,9 +4285,9 @@

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__DGQArbitraryNodes.html differs (JavaScript source, ASCII text, with very long lines (752)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__DGQArbitraryNodes.html 2023-11-25 15:25:53.230025634 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__DGQArbitraryNodes.html 2023-11-25 15:25:53.223359103 +0100 @@ -2475,17 +2475,17 @@

    Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

    Before the correction, the Hessians would be given by

    -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1140.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1142.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    @@ -2529,21 +2529,21 @@

    Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

    Before the correction, the third derivatives would be given by

    -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1144.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1145.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    @@ -3096,7 +3096,7 @@
    Returns
    The index of this degree of freedom within the set of degrees of freedom on the entire cell. The returned value will be between zero and dofs_per_cell.
    -
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.
    +
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.

    Reimplemented in FE_Q_Base< dim, spacedim >, FE_Q_Base< dim, dim >, FE_Q_Base< dim, spacedim >, FESystem< dim, spacedim >, FESystem< dim, dim >, and FESystem< dim, spacedim >.

    @@ -3817,7 +3817,7 @@
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3925,7 +3925,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -4154,7 +4154,7 @@
    component_maskThe mask that selects individual components of the finite element

    Return a vector of generalized support points.

    -
    Note
    The vector returned by this function is always a minimal set of unique support points. This is in contrast to the behavior of get_unit_support_points() that returns a repeated list of unit support points for an FESystem of numerous (Lagrangian) base elements. As a consequence, it is possible to have fewer generalized support points than degrees of freedom in the element. An example is the element FESystem<dim>(FE_Q<dim>(1), 2), which has two copies of the $Q_1$ element. In 2d, each copy has 4 degrees of freedom, and each copy has its support points in the four vertices of the cell. While the get_support_points() function would return a vector of size 8 in which each of the vertices is listed twice, this function strips out the duplicates and returns a vector of length 4 in which each vertex is listed only once. This is possible because the purpose of this function is to return a list of points so that it is possible to interpolate an arbitrary function onto the finite element space, and this is possible by knowing the two components of the function in question at the four vertices of the cell – it is not necessary to ask for this information twice at each vertex.
    +
    Note
    The vector returned by this function is always a minimal set of unique support points. This is in contrast to the behavior of get_unit_support_points() that returns a repeated list of unit support points for an FESystem of numerous (Lagrangian) base elements. As a consequence, it is possible to have fewer generalized support points than degrees of freedom in the element. An example is the element FESystem<dim>(FE_Q<dim>(1), 2), which has two copies of the $Q_1$ element. In 2d, each copy has 4 degrees of freedom, and each copy has its support points in the four vertices of the cell. While the get_support_points() function would return a vector of size 8 in which each of the vertices is listed twice, this function strips out the duplicates and returns a vector of length 4 in which each vertex is listed only once. This is possible because the purpose of this function is to return a list of points so that it is possible to interpolate an arbitrary function onto the finite element space, and this is possible by knowing the two components of the function in question at the four vertices of the cell – it is not necessary to ask for this information twice at each vertex.

    See the glossary entry on generalized support points for more information.

    @@ -4213,9 +4213,9 @@

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__DGQHermite.html differs (JavaScript source, ASCII text, with very long lines (752)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__DGQHermite.html 2023-11-25 15:25:53.283357881 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__DGQHermite.html 2023-11-25 15:25:53.283357881 +0100 @@ -2479,17 +2479,17 @@

    Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

    Before the correction, the Hessians would be given by

    -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1140.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1142.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    @@ -2533,21 +2533,21 @@

    Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

    Before the correction, the third derivatives would be given by

    -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1144.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1145.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    @@ -3100,7 +3100,7 @@
    Returns
    The index of this degree of freedom within the set of degrees of freedom on the entire cell. The returned value will be between zero and dofs_per_cell.
    -
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.
    +
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.

    Reimplemented in FE_Q_Base< dim, spacedim >, FE_Q_Base< dim, dim >, FE_Q_Base< dim, spacedim >, FESystem< dim, spacedim >, FESystem< dim, dim >, and FESystem< dim, spacedim >.

    @@ -3821,7 +3821,7 @@
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3929,7 +3929,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -4158,7 +4158,7 @@
    component_maskThe mask that selects individual components of the finite element

    Return a vector of generalized support points.

    -
    Note
    The vector returned by this function is always a minimal set of unique support points. This is in contrast to the behavior of get_unit_support_points() that returns a repeated list of unit support points for an FESystem of numerous (Lagrangian) base elements. As a consequence, it is possible to have fewer generalized support points than degrees of freedom in the element. An example is the element FESystem<dim>(FE_Q<dim>(1), 2), which has two copies of the $Q_1$ element. In 2d, each copy has 4 degrees of freedom, and each copy has its support points in the four vertices of the cell. While the get_support_points() function would return a vector of size 8 in which each of the vertices is listed twice, this function strips out the duplicates and returns a vector of length 4 in which each vertex is listed only once. This is possible because the purpose of this function is to return a list of points so that it is possible to interpolate an arbitrary function onto the finite element space, and this is possible by knowing the two components of the function in question at the four vertices of the cell – it is not necessary to ask for this information twice at each vertex.
    +
    Note
    The vector returned by this function is always a minimal set of unique support points. This is in contrast to the behavior of get_unit_support_points() that returns a repeated list of unit support points for an FESystem of numerous (Lagrangian) base elements. As a consequence, it is possible to have fewer generalized support points than degrees of freedom in the element. An example is the element FESystem<dim>(FE_Q<dim>(1), 2), which has two copies of the $Q_1$ element. In 2d, each copy has 4 degrees of freedom, and each copy has its support points in the four vertices of the cell. While the get_support_points() function would return a vector of size 8 in which each of the vertices is listed twice, this function strips out the duplicates and returns a vector of length 4 in which each vertex is listed only once. This is possible because the purpose of this function is to return a list of points so that it is possible to interpolate an arbitrary function onto the finite element space, and this is possible by knowing the two components of the function in question at the four vertices of the cell – it is not necessary to ask for this information twice at each vertex.

    See the glossary entry on generalized support points for more information.

    @@ -4217,9 +4217,9 @@

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__DGQLegendre.html differs (JavaScript source, ASCII text, with very long lines (752)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__DGQLegendre.html 2023-11-25 15:25:53.336690130 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__DGQLegendre.html 2023-11-25 15:25:53.336690130 +0100 @@ -2477,17 +2477,17 @@

    Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

    Before the correction, the Hessians would be given by

    -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1140.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1142.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    @@ -2531,21 +2531,21 @@

    Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

    Before the correction, the third derivatives would be given by

    -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1144.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1145.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    @@ -3098,7 +3098,7 @@
    Returns
    The index of this degree of freedom within the set of degrees of freedom on the entire cell. The returned value will be between zero and dofs_per_cell.
    -
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.
    +
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.

    Reimplemented in FE_Q_Base< dim, spacedim >, FE_Q_Base< dim, dim >, FE_Q_Base< dim, spacedim >, FESystem< dim, spacedim >, FESystem< dim, dim >, and FESystem< dim, spacedim >.

    @@ -3819,7 +3819,7 @@
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3927,7 +3927,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -4156,7 +4156,7 @@
    component_maskThe mask that selects individual components of the finite element

    Return a vector of generalized support points.

    -
    Note
    The vector returned by this function is always a minimal set of unique support points. This is in contrast to the behavior of get_unit_support_points() that returns a repeated list of unit support points for an FESystem of numerous (Lagrangian) base elements. As a consequence, it is possible to have fewer generalized support points than degrees of freedom in the element. An example is the element FESystem<dim>(FE_Q<dim>(1), 2), which has two copies of the $Q_1$ element. In 2d, each copy has 4 degrees of freedom, and each copy has its support points in the four vertices of the cell. While the get_support_points() function would return a vector of size 8 in which each of the vertices is listed twice, this function strips out the duplicates and returns a vector of length 4 in which each vertex is listed only once. This is possible because the purpose of this function is to return a list of points so that it is possible to interpolate an arbitrary function onto the finite element space, and this is possible by knowing the two components of the function in question at the four vertices of the cell – it is not necessary to ask for this information twice at each vertex.
    +
    Note
    The vector returned by this function is always a minimal set of unique support points. This is in contrast to the behavior of get_unit_support_points() that returns a repeated list of unit support points for an FESystem of numerous (Lagrangian) base elements. As a consequence, it is possible to have fewer generalized support points than degrees of freedom in the element. An example is the element FESystem<dim>(FE_Q<dim>(1), 2), which has two copies of the $Q_1$ element. In 2d, each copy has 4 degrees of freedom, and each copy has its support points in the four vertices of the cell. While the get_support_points() function would return a vector of size 8 in which each of the vertices is listed twice, this function strips out the duplicates and returns a vector of length 4 in which each vertex is listed only once. This is possible because the purpose of this function is to return a list of points so that it is possible to interpolate an arbitrary function onto the finite element space, and this is possible by knowing the two components of the function in question at the four vertices of the cell – it is not necessary to ask for this information twice at each vertex.

    See the glossary entry on generalized support points for more information.

    @@ -4215,9 +4215,9 @@

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__DGRaviartThomas.html differs (JavaScript source, ASCII text, with very long lines (752)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__DGRaviartThomas.html 2023-11-25 15:25:53.393355641 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__DGRaviartThomas.html 2023-11-25 15:25:53.393355641 +0100 @@ -2851,7 +2851,7 @@
    Returns
    The index of this degree of freedom within the set of degrees of freedom on the entire cell. The returned value will be between zero and dofs_per_cell.
    -
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.
    +
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.

    Reimplemented in FE_Q_Base< dim, spacedim >, FE_Q_Base< dim, dim >, FE_Q_Base< dim, spacedim >, FESystem< dim, spacedim >, FESystem< dim, dim >, and FESystem< dim, spacedim >.

    @@ -3572,7 +3572,7 @@
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3680,7 +3680,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3940,7 +3940,7 @@
    component_maskThe mask that selects individual components of the finite element

    Return a vector of generalized support points.

    -
    Note
    The vector returned by this function is always a minimal set of unique support points. This is in contrast to the behavior of get_unit_support_points() that returns a repeated list of unit support points for an FESystem of numerous (Lagrangian) base elements. As a consequence, it is possible to have fewer generalized support points than degrees of freedom in the element. An example is the element FESystem<dim>(FE_Q<dim>(1), 2), which has two copies of the $Q_1$ element. In 2d, each copy has 4 degrees of freedom, and each copy has its support points in the four vertices of the cell. While the get_support_points() function would return a vector of size 8 in which each of the vertices is listed twice, this function strips out the duplicates and returns a vector of length 4 in which each vertex is listed only once. This is possible because the purpose of this function is to return a list of points so that it is possible to interpolate an arbitrary function onto the finite element space, and this is possible by knowing the two components of the function in question at the four vertices of the cell – it is not necessary to ask for this information twice at each vertex.
    +
    Note
    The vector returned by this function is always a minimal set of unique support points. This is in contrast to the behavior of get_unit_support_points() that returns a repeated list of unit support points for an FESystem of numerous (Lagrangian) base elements. As a consequence, it is possible to have fewer generalized support points than degrees of freedom in the element. An example is the element FESystem<dim>(FE_Q<dim>(1), 2), which has two copies of the $Q_1$ element. In 2d, each copy has 4 degrees of freedom, and each copy has its support points in the four vertices of the cell. While the get_support_points() function would return a vector of size 8 in which each of the vertices is listed twice, this function strips out the duplicates and returns a vector of length 4 in which each vertex is listed only once. This is possible because the purpose of this function is to return a list of points so that it is possible to interpolate an arbitrary function onto the finite element space, and this is possible by knowing the two components of the function in question at the four vertices of the cell – it is not necessary to ask for this information twice at each vertex.

    See the glossary entry on generalized support points for more information.

    @@ -3999,9 +3999,9 @@

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    @@ -4046,11 +4046,11 @@
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__DGVector.html differs (JavaScript source, ASCII text, with very long lines (752)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__DGVector.html 2023-11-25 15:25:53.453354423 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__DGVector.html 2023-11-25 15:25:53.453354423 +0100 @@ -2869,7 +2869,7 @@
    Returns
    The index of this degree of freedom within the set of degrees of freedom on the entire cell. The returned value will be between zero and dofs_per_cell.
    -
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.
    +
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.

    Reimplemented in FE_Q_Base< dim, spacedim >, FE_Q_Base< dim, dim >, FE_Q_Base< dim, spacedim >, FESystem< dim, spacedim >, FESystem< dim, dim >, and FESystem< dim, spacedim >.

    @@ -3590,7 +3590,7 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3698,7 +3698,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3958,7 +3958,7 @@
    component_maskThe mask that selects individual components of the finite element

    Return a vector of generalized support points.

    -
    Note
    The vector returned by this function is always a minimal set of unique support points. This is in contrast to the behavior of get_unit_support_points() that returns a repeated list of unit support points for an FESystem of numerous (Lagrangian) base elements. As a consequence, it is possible to have fewer generalized support points than degrees of freedom in the element. An example is the element FESystem<dim>(FE_Q<dim>(1), 2), which has two copies of the $Q_1$ element. In 2d, each copy has 4 degrees of freedom, and each copy has its support points in the four vertices of the cell. While the get_support_points() function would return a vector of size 8 in which each of the vertices is listed twice, this function strips out the duplicates and returns a vector of length 4 in which each vertex is listed only once. This is possible because the purpose of this function is to return a list of points so that it is possible to interpolate an arbitrary function onto the finite element space, and this is possible by knowing the two components of the function in question at the four vertices of the cell – it is not necessary to ask for this information twice at each vertex.
    +
    Note
    The vector returned by this function is always a minimal set of unique support points. This is in contrast to the behavior of get_unit_support_points() that returns a repeated list of unit support points for an FESystem of numerous (Lagrangian) base elements. As a consequence, it is possible to have fewer generalized support points than degrees of freedom in the element. An example is the element FESystem<dim>(FE_Q<dim>(1), 2), which has two copies of the $Q_1$ element. In 2d, each copy has 4 degrees of freedom, and each copy has its support points in the four vertices of the cell. While the get_support_points() function would return a vector of size 8 in which each of the vertices is listed twice, this function strips out the duplicates and returns a vector of length 4 in which each vertex is listed only once. This is possible because the purpose of this function is to return a list of points so that it is possible to interpolate an arbitrary function onto the finite element space, and this is possible by knowing the two components of the function in question at the four vertices of the cell – it is not necessary to ask for this information twice at each vertex.

    See the glossary entry on generalized support points for more information.

    @@ -4017,9 +4017,9 @@

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    @@ -4064,11 +4064,11 @@
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__Enriched.html differs (JavaScript source, ASCII text, with very long lines (864)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__Enriched.html 2023-11-25 15:25:53.510019933 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__Enriched.html 2023-11-25 15:25:53.510019933 +0100 @@ -483,12 +483,12 @@

    Detailed Description

    template<int dim, int spacedim = dim>
    class FE_Enriched< dim, spacedim >

    Implementation of a partition of unity finite element method (PUM) by Babuska and Melenk which enriches a standard finite element with an enrichment function multiplied with another (usually linear) finite element:

    -\[
+<picture><source srcset=\[
 U(\mathbf x) = \sum_i N_i(\mathbf x) U_i + \sum_j N_j(\mathbf x) \sum_k
-F_k(\mathbf x) U_{jk} \] +F_k(\mathbf x) U_{jk} \]" src="form_1077.png"/>

    -

    where $ N_i(\mathbf x) $ and $ N_j(\mathbf x) $ are the underlying finite elements (including the mapping from the isoparametric element to the real element); $ F_k(\mathbf x) $ are the scalar enrichment functions in real space (e.g. $ 1/r $, $ \exp(-r) $, etc); $ U_i $ and $
-U_{jk} $ are the standard and enriched DoFs. This allows to include in the finite element space a priori knowledge about the partial differential equation being solved which in turn improves the local approximation properties of the spaces. This can be useful for highly oscillatory solutions, problems with domain corners or on unbounded domains or sudden changes of boundary conditions. PUM method uses finite element spaces which satisfy the partition of unity property (e.g. FE_Q). Among other properties this makes the resulting space to reproduce enrichment functions exactly.

    +

    where $ N_i(\mathbf x) $ and $ N_j(\mathbf x) $ are the underlying finite elements (including the mapping from the isoparametric element to the real element); $ F_k(\mathbf x) $ are the scalar enrichment functions in real space (e.g. $ 1/r $, $ \exp(-r) $, etc); $ U_i $ and $
+U_{jk} $ are the standard and enriched DoFs. This allows to include in the finite element space a priori knowledge about the partial differential equation being solved which in turn improves the local approximation properties of the spaces. This can be useful for highly oscillatory solutions, problems with domain corners or on unbounded domains or sudden changes of boundary conditions. PUM method uses finite element spaces which satisfy the partition of unity property (e.g. FE_Q). Among other properties this makes the resulting space to reproduce enrichment functions exactly.

    The simplest constructor of this class takes two finite element objects and an enrichment function to be used. For example

    @@ -496,7 +496,7 @@
    Definition: fe_q.h:551

    In this case, standard DoFs are distributed by FE_Q<dim>(2), whereas enriched DoFs are coming from a single finite element FE_Q<dim>(1) used with a single enrichment function function. In this case, the total number of DoFs on the enriched element is the sum of DoFs from FE_Q<dim>(2) and FE_Q<dim>(1).

    -

    As an example of an enrichment function, consider $ \exp(-x) $, which leads to the following shape functions on the unit element:

    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.
    +

    As an example of an enrichment function, consider $ \exp(-x) $, which leads to the following shape functions on the unit element:

    @@ -509,7 +509,7 @@
    1d element, base and enriched shape functions. enriched shape function corresponding to the central vertex.

    Note that evaluation of gradients (hessians) of the enriched shape functions or the finite element field requires evaluation of gradients (gradients and hessians) of the enrichment functions:

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   U(\mathbf x)
     &= \sum_i N_i(\mathbf x) U_i
     + \sum_{j,k} N_j(\mathbf x) F_k(\mathbf x) U_{jk} \\
@@ -523,10 +523,10 @@
 F_k(\mathbf x) + \mathbf \nabla F_k(\mathbf x) \mathbf \nabla N_j(\mathbf x)
 + \mathbf \nabla N_j(\mathbf x) \mathbf \nabla F_k(\mathbf x) + N_j(\mathbf
 x) \mathbf \nabla \mathbf \nabla F_k(\mathbf x) \right] U_{jk}
-\end{align*} +\end{align*}" src="form_1086.png"/>

    Using enriched and non-enriched FEs together

    -

    In most applications it is beneficial to introduce enrichments only in some part of the domain (e.g. around a crack tip) and use standard FE (e.g. FE_Q) elsewhere. This can be achieved by using the hp-finite element framework in deal.II that allows for the use of different elements on different cells. To make the resulting space $C^0$ continuous, it is then necessary for the DoFHandler class and DoFTools::make_hanging_node_constraints() function to be able to figure out what to do at the interface between enriched and non-enriched cells. Specifically, we want the degrees of freedom corresponding to enriched shape functions to be zero at these interfaces. These classes and functions can not to do this automatically, but the effect can be achieved by using not just a regular FE_Q on cells without enrichment, but to wrap the FE_Q into an FE_Enriched object without actually enriching it. This can be done as follows:

    FE_Enriched<dim> fe_non_enriched(FE_Q<dim>(1));
    +

    In most applications it is beneficial to introduce enrichments only in some part of the domain (e.g. around a crack tip) and use standard FE (e.g. FE_Q) elsewhere. This can be achieved by using the hp-finite element framework in deal.II that allows for the use of different elements on different cells. To make the resulting space $C^0$ continuous, it is then necessary for the DoFHandler class and DoFTools::make_hanging_node_constraints() function to be able to figure out what to do at the interface between enriched and non-enriched cells. Specifically, we want the degrees of freedom corresponding to enriched shape functions to be zero at these interfaces. These classes and functions can not to do this automatically, but the effect can be achieved by using not just a regular FE_Q on cells without enrichment, but to wrap the FE_Q into an FE_Enriched object without actually enriching it. This can be done as follows:

    FE_Enriched<dim> fe_non_enriched(FE_Q<dim>(1));

    This constructor is equivalent to calling

    FE_Enriched<dim> fe_non_enriched(FE_Q<dim>(1),
    FE_Nothing<dim>(1,true),
    nullptr);
    @@ -2929,7 +2929,7 @@
    Returns
    The index of this degree of freedom within the set of degrees of freedom on the entire cell. The returned value will be between zero and dofs_per_cell.
    -
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.
    +
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.

    Reimplemented in FE_Q_Base< dim, spacedim >, FE_Q_Base< dim, dim >, FE_Q_Base< dim, spacedim >, FESystem< dim, spacedim >, FESystem< dim, dim >, and FESystem< dim, spacedim >.

    @@ -3618,7 +3618,7 @@

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3726,7 +3726,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3986,7 +3986,7 @@
    component_maskThe mask that selects individual components of the finite element

    Return a vector of generalized support points.

    -
    Note
    The vector returned by this function is always a minimal set of unique support points. This is in contrast to the behavior of get_unit_support_points() that returns a repeated list of unit support points for an FESystem of numerous (Lagrangian) base elements. As a consequence, it is possible to have fewer generalized support points than degrees of freedom in the element. An example is the element FESystem<dim>(FE_Q<dim>(1), 2), which has two copies of the $Q_1$ element. In 2d, each copy has 4 degrees of freedom, and each copy has its support points in the four vertices of the cell. While the get_support_points() function would return a vector of size 8 in which each of the vertices is listed twice, this function strips out the duplicates and returns a vector of length 4 in which each vertex is listed only once. This is possible because the purpose of this function is to return a list of points so that it is possible to interpolate an arbitrary function onto the finite element space, and this is possible by knowing the two components of the function in question at the four vertices of the cell – it is not necessary to ask for this information twice at each vertex.
    +
    Note
    The vector returned by this function is always a minimal set of unique support points. This is in contrast to the behavior of get_unit_support_points() that returns a repeated list of unit support points for an FESystem of numerous (Lagrangian) base elements. As a consequence, it is possible to have fewer generalized support points than degrees of freedom in the element. An example is the element FESystem<dim>(FE_Q<dim>(1), 2), which has two copies of the $Q_1$ element. In 2d, each copy has 4 degrees of freedom, and each copy has its support points in the four vertices of the cell. While the get_support_points() function would return a vector of size 8 in which each of the vertices is listed twice, this function strips out the duplicates and returns a vector of length 4 in which each vertex is listed only once. This is possible because the purpose of this function is to return a list of points so that it is possible to interpolate an arbitrary function onto the finite element space, and this is possible by knowing the two components of the function in question at the four vertices of the cell – it is not necessary to ask for this information twice at each vertex.

    See the glossary entry on generalized support points for more information.

    @@ -4045,9 +4045,9 @@

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    @@ -4092,11 +4092,11 @@
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__FaceP.html differs (JavaScript source, ASCII text, with very long lines (752)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__FaceP.html 2023-11-25 15:25:53.566685447 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__FaceP.html 2023-11-25 15:25:53.566685447 +0100 @@ -2936,7 +2936,7 @@
    Returns
    The index of this degree of freedom within the set of degrees of freedom on the entire cell. The returned value will be between zero and dofs_per_cell.
    -
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.
    +
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.

    Reimplemented in FE_Q_Base< dim, spacedim >, FE_Q_Base< dim, dim >, FE_Q_Base< dim, spacedim >, FESystem< dim, spacedim >, FESystem< dim, dim >, and FESystem< dim, spacedim >.

    @@ -3657,7 +3657,7 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3765,7 +3765,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3994,7 +3994,7 @@
    component_maskThe mask that selects individual components of the finite element

    Return a vector of generalized support points.

    -
    Note
    The vector returned by this function is always a minimal set of unique support points. This is in contrast to the behavior of get_unit_support_points() that returns a repeated list of unit support points for an FESystem of numerous (Lagrangian) base elements. As a consequence, it is possible to have fewer generalized support points than degrees of freedom in the element. An example is the element FESystem<dim>(FE_Q<dim>(1), 2), which has two copies of the $Q_1$ element. In 2d, each copy has 4 degrees of freedom, and each copy has its support points in the four vertices of the cell. While the get_support_points() function would return a vector of size 8 in which each of the vertices is listed twice, this function strips out the duplicates and returns a vector of length 4 in which each vertex is listed only once. This is possible because the purpose of this function is to return a list of points so that it is possible to interpolate an arbitrary function onto the finite element space, and this is possible by knowing the two components of the function in question at the four vertices of the cell – it is not necessary to ask for this information twice at each vertex.
    +
    Note
    The vector returned by this function is always a minimal set of unique support points. This is in contrast to the behavior of get_unit_support_points() that returns a repeated list of unit support points for an FESystem of numerous (Lagrangian) base elements. As a consequence, it is possible to have fewer generalized support points than degrees of freedom in the element. An example is the element FESystem<dim>(FE_Q<dim>(1), 2), which has two copies of the $Q_1$ element. In 2d, each copy has 4 degrees of freedom, and each copy has its support points in the four vertices of the cell. While the get_support_points() function would return a vector of size 8 in which each of the vertices is listed twice, this function strips out the duplicates and returns a vector of length 4 in which each vertex is listed only once. This is possible because the purpose of this function is to return a list of points so that it is possible to interpolate an arbitrary function onto the finite element space, and this is possible by knowing the two components of the function in question at the four vertices of the cell – it is not necessary to ask for this information twice at each vertex.

    See the glossary entry on generalized support points for more information.

    @@ -4053,9 +4053,9 @@

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    @@ -4100,11 +4100,11 @@
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__FaceP_3_011_00_01spacedim_01_4.html differs (JavaScript source, ASCII text, with very long lines (752)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__FaceP_3_011_00_01spacedim_01_4.html 2023-11-25 15:25:53.620017697 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__FaceP_3_011_00_01spacedim_01_4.html 2023-11-25 15:25:53.620017697 +0100 @@ -3140,7 +3140,7 @@
    Returns
    The index of this degree of freedom within the set of degrees of freedom on the entire cell. The returned value will be between zero and dofs_per_cell.
    -
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.
    +
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.
    @@ -3811,7 +3811,7 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3913,7 +3913,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -4124,7 +4124,7 @@
    component_maskThe mask that selects individual components of the finite element

    Return a vector of generalized support points.

    -
    Note
    The vector returned by this function is always a minimal set of unique support points. This is in contrast to the behavior of get_unit_support_points() that returns a repeated list of unit support points for an FESystem of numerous (Lagrangian) base elements. As a consequence, it is possible to have fewer generalized support points than degrees of freedom in the element. An example is the element FESystem<dim>(FE_Q<dim>(1), 2), which has two copies of the $Q_1$ element. In 2d, each copy has 4 degrees of freedom, and each copy has its support points in the four vertices of the cell. While the get_support_points() function would return a vector of size 8 in which each of the vertices is listed twice, this function strips out the duplicates and returns a vector of length 4 in which each vertex is listed only once. This is possible because the purpose of this function is to return a list of points so that it is possible to interpolate an arbitrary function onto the finite element space, and this is possible by knowing the two components of the function in question at the four vertices of the cell – it is not necessary to ask for this information twice at each vertex.
    +
    Note
    The vector returned by this function is always a minimal set of unique support points. This is in contrast to the behavior of get_unit_support_points() that returns a repeated list of unit support points for an FESystem of numerous (Lagrangian) base elements. As a consequence, it is possible to have fewer generalized support points than degrees of freedom in the element. An example is the element FESystem<dim>(FE_Q<dim>(1), 2), which has two copies of the $Q_1$ element. In 2d, each copy has 4 degrees of freedom, and each copy has its support points in the four vertices of the cell. While the get_support_points() function would return a vector of size 8 in which each of the vertices is listed twice, this function strips out the duplicates and returns a vector of length 4 in which each vertex is listed only once. This is possible because the purpose of this function is to return a list of points so that it is possible to interpolate an arbitrary function onto the finite element space, and this is possible by knowing the two components of the function in question at the four vertices of the cell – it is not necessary to ask for this information twice at each vertex.

    See the glossary entry on generalized support points for more information.

    @@ -4179,9 +4179,9 @@

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    @@ -4224,11 +4224,11 @@
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__FaceQ.html differs (JavaScript source, ASCII text, with very long lines (752)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__FaceQ.html 2023-11-25 15:25:53.676683211 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__FaceQ.html 2023-11-25 15:25:53.683349739 +0100 @@ -2987,7 +2987,7 @@
    Returns
    The index of this degree of freedom within the set of degrees of freedom on the entire cell. The returned value will be between zero and dofs_per_cell.
    -
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.
    +
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.

    Reimplemented in FE_Q_Base< dim, spacedim >, FE_Q_Base< dim, dim >, FE_Q_Base< dim, spacedim >, FESystem< dim, spacedim >, FESystem< dim, dim >, and FESystem< dim, spacedim >.

    @@ -3708,7 +3708,7 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3816,7 +3816,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -4045,7 +4045,7 @@
    component_maskThe mask that selects individual components of the finite element

    Return a vector of generalized support points.

    -
    Note
    The vector returned by this function is always a minimal set of unique support points. This is in contrast to the behavior of get_unit_support_points() that returns a repeated list of unit support points for an FESystem of numerous (Lagrangian) base elements. As a consequence, it is possible to have fewer generalized support points than degrees of freedom in the element. An example is the element FESystem<dim>(FE_Q<dim>(1), 2), which has two copies of the $Q_1$ element. In 2d, each copy has 4 degrees of freedom, and each copy has its support points in the four vertices of the cell. While the get_support_points() function would return a vector of size 8 in which each of the vertices is listed twice, this function strips out the duplicates and returns a vector of length 4 in which each vertex is listed only once. This is possible because the purpose of this function is to return a list of points so that it is possible to interpolate an arbitrary function onto the finite element space, and this is possible by knowing the two components of the function in question at the four vertices of the cell – it is not necessary to ask for this information twice at each vertex.
    +
    Note
    The vector returned by this function is always a minimal set of unique support points. This is in contrast to the behavior of get_unit_support_points() that returns a repeated list of unit support points for an FESystem of numerous (Lagrangian) base elements. As a consequence, it is possible to have fewer generalized support points than degrees of freedom in the element. An example is the element FESystem<dim>(FE_Q<dim>(1), 2), which has two copies of the $Q_1$ element. In 2d, each copy has 4 degrees of freedom, and each copy has its support points in the four vertices of the cell. While the get_support_points() function would return a vector of size 8 in which each of the vertices is listed twice, this function strips out the duplicates and returns a vector of length 4 in which each vertex is listed only once. This is possible because the purpose of this function is to return a list of points so that it is possible to interpolate an arbitrary function onto the finite element space, and this is possible by knowing the two components of the function in question at the four vertices of the cell – it is not necessary to ask for this information twice at each vertex.

    See the glossary entry on generalized support points for more information.

    @@ -4104,9 +4104,9 @@

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__FaceQ_3_011_00_01spacedim_01_4.html differs (JavaScript source, ASCII text, with very long lines (752)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__FaceQ_3_011_00_01spacedim_01_4.html 2023-11-25 15:25:53.736681990 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__FaceQ_3_011_00_01spacedim_01_4.html 2023-11-25 15:25:53.736681990 +0100 @@ -2606,7 +2606,7 @@
    Returns
    The index of this degree of freedom within the set of degrees of freedom on the entire cell. The returned value will be between zero and dofs_per_cell.
    -
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.
    +
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.
    @@ -3277,7 +3277,7 @@
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3379,7 +3379,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3590,7 +3590,7 @@
    component_maskThe mask that selects individual components of the finite element

    Return a vector of generalized support points.

    -
    Note
    The vector returned by this function is always a minimal set of unique support points. This is in contrast to the behavior of get_unit_support_points() that returns a repeated list of unit support points for an FESystem of numerous (Lagrangian) base elements. As a consequence, it is possible to have fewer generalized support points than degrees of freedom in the element. An example is the element FESystem<dim>(FE_Q<dim>(1), 2), which has two copies of the $Q_1$ element. In 2d, each copy has 4 degrees of freedom, and each copy has its support points in the four vertices of the cell. While the get_support_points() function would return a vector of size 8 in which each of the vertices is listed twice, this function strips out the duplicates and returns a vector of length 4 in which each vertex is listed only once. This is possible because the purpose of this function is to return a list of points so that it is possible to interpolate an arbitrary function onto the finite element space, and this is possible by knowing the two components of the function in question at the four vertices of the cell – it is not necessary to ask for this information twice at each vertex.
    +
    Note
    The vector returned by this function is always a minimal set of unique support points. This is in contrast to the behavior of get_unit_support_points() that returns a repeated list of unit support points for an FESystem of numerous (Lagrangian) base elements. As a consequence, it is possible to have fewer generalized support points than degrees of freedom in the element. An example is the element FESystem<dim>(FE_Q<dim>(1), 2), which has two copies of the $Q_1$ element. In 2d, each copy has 4 degrees of freedom, and each copy has its support points in the four vertices of the cell. While the get_support_points() function would return a vector of size 8 in which each of the vertices is listed twice, this function strips out the duplicates and returns a vector of length 4 in which each vertex is listed only once. This is possible because the purpose of this function is to return a list of points so that it is possible to interpolate an arbitrary function onto the finite element space, and this is possible by knowing the two components of the function in question at the four vertices of the cell – it is not necessary to ask for this information twice at each vertex.

    See the glossary entry on generalized support points for more information.

    @@ -3645,9 +3645,9 @@

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    @@ -3690,11 +3690,11 @@
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__Nedelec.html differs (JavaScript source, ASCII text, with very long lines (752)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__Nedelec.html 2023-11-25 15:25:53.796680768 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__Nedelec.html 2023-11-25 15:25:53.796680768 +0100 @@ -507,12 +507,12 @@

    Detailed Description

    template<int dim>
    class FE_Nedelec< dim >
    Warning
    Several aspects of the implementation are experimental. For the moment, it is safe to use the element on globally refined meshes with consistent orientation of faces. See the todo entries below for more detailed caveats.
    -

    Implementation of Nédélec elements. The Nédélec space is designed to solve problems in which the solution only lives in the space $H^\text{curl}=\{ {\mathbf u} \in L_2: \text{curl}\, {\mathbf u} \in L_2\}$, rather than in the more commonly used space $H^1=\{ u \in L_2: \nabla u \in L_2\}$. In other words, the solution must be a vector field whose curl is square integrable, but for which the gradient may not be square integrable. The typical application for this space (and these elements) is to the Maxwell equations and corresponding simplifications, such as the reduced version of the Maxwell equation that only involves the electric field $\mathbf E$ which has to satisfy the equation $\text{curl}\, \text{curl}\, {\mathbf E} = 0$ in the time independent case when no currents are present, or the equation $\text{curl}\,\text{curl}\,{\mathbf A} = 4\pi{\mathbf j}$ that the magnetic vector potential $\mathbf A$ has to satisfy in the time independent case.

    -

    The defining characteristic of functions in $H^\text{curl}$ is that they are in general discontinuous – but that if you draw a line in 2d (or a surface in 3d), then the tangential component(s) of the vector field must be continuous across the line (or surface) even though the normal component may not be. As a consequence, the Nédélec element is constructed in such a way that (i) it is vector-valued, (ii) the shape functions are discontinuous, but (iii) the tangential component(s) of the vector field represented by each shape function are continuous across the faces of cells.

    +

    Implementation of Nédélec elements. The Nédélec space is designed to solve problems in which the solution only lives in the space $H^\text{curl}=\{ {\mathbf u} \in L_2: \text{curl}\, {\mathbf u} \in L_2\}$, rather than in the more commonly used space $H^1=\{ u \in L_2: \nabla u \in L_2\}$. In other words, the solution must be a vector field whose curl is square integrable, but for which the gradient may not be square integrable. The typical application for this space (and these elements) is to the Maxwell equations and corresponding simplifications, such as the reduced version of the Maxwell equation that only involves the electric field $\mathbf E$ which has to satisfy the equation $\text{curl}\, \text{curl}\, {\mathbf E} = 0$ in the time independent case when no currents are present, or the equation $\text{curl}\,\text{curl}\,{\mathbf A} = 4\pi{\mathbf j}$ that the magnetic vector potential $\mathbf A$ has to satisfy in the time independent case.

    +

    The defining characteristic of functions in $H^\text{curl}$ is that they are in general discontinuous – but that if you draw a line in 2d (or a surface in 3d), then the tangential component(s) of the vector field must be continuous across the line (or surface) even though the normal component may not be. As a consequence, the Nédélec element is constructed in such a way that (i) it is vector-valued, (ii) the shape functions are discontinuous, but (iii) the tangential component(s) of the vector field represented by each shape function are continuous across the faces of cells.

    Other properties of the Nédélec element are that (i) it is not a primitive element ; (ii) the shape functions are defined so that certain integrals over the faces are either zero or one, rather than the common case of certain point values being either zero or one.

    We follow the commonly used – though confusing – definition of the "degree" of Nédélec elements. Specifically, the "degree" of the element denotes the polynomial degree of the largest complete polynomial subspace contained in the finite element space, even if the space may contain shape functions of higher polynomial degree. The lowest order element is consequently FE_Nedelec(0), i.e., the Raviart-Thomas element "of degree zero", even though the functions of this space are in general polynomials of degree one in each variable. This choice of "degree" implies that the approximation order of the function itself is degree+1, as with usual polynomial spaces. The numbering so chosen implies the sequence

    -\[
+<picture><source srcset=\[
   Q_{k+1}
   \stackrel{\text{grad}}{\rightarrow}
   \text{Nedelec}_k
@@ -520,7 +520,7 @@
   \text{RaviartThomas}_k
   \stackrel{\text{div}}{\rightarrow}
   DGQ_{k}
-\] +\]" src="form_1119.png"/>

    Note that this follows the convention of Brezzi and Raviart, though not the one used in the original paper by Nédélec.

    This class is not implemented for the codimension one case (spacedim != dim).

    @@ -1421,11 +1421,11 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    @@ -3801,7 +3801,7 @@
    Returns
    The index of this degree of freedom within the set of degrees of freedom on the entire cell. The returned value will be between zero and dofs_per_cell.
    -
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.
    +
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.

    Reimplemented in FE_Q_Base< dim, spacedim >, FE_Q_Base< dim, dim >, FE_Q_Base< dim, spacedim >, FESystem< dim, spacedim >, FESystem< dim, dim >, and FESystem< dim, spacedim >.

    @@ -4522,7 +4522,7 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -4630,7 +4630,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -4859,7 +4859,7 @@
    component_maskThe mask that selects individual components of the finite element

    Return a vector of generalized support points.

    -
    Note
    The vector returned by this function is always a minimal set of unique support points. This is in contrast to the behavior of get_unit_support_points() that returns a repeated list of unit support points for an FESystem of numerous (Lagrangian) base elements. As a consequence, it is possible to have fewer generalized support points than degrees of freedom in the element. An example is the element FESystem<dim>(FE_Q<dim>(1), 2), which has two copies of the $Q_1$ element. In 2d, each copy has 4 degrees of freedom, and each copy has its support points in the four vertices of the cell. While the get_support_points() function would return a vector of size 8 in which each of the vertices is listed twice, this function strips out the duplicates and returns a vector of length 4 in which each vertex is listed only once. This is possible because the purpose of this function is to return a list of points so that it is possible to interpolate an arbitrary function onto the finite element space, and this is possible by knowing the two components of the function in question at the four vertices of the cell – it is not necessary to ask for this information twice at each vertex.
    +
    Note
    The vector returned by this function is always a minimal set of unique support points. This is in contrast to the behavior of get_unit_support_points() that returns a repeated list of unit support points for an FESystem of numerous (Lagrangian) base elements. As a consequence, it is possible to have fewer generalized support points than degrees of freedom in the element. An example is the element FESystem<dim>(FE_Q<dim>(1), 2), which has two copies of the $Q_1$ element. In 2d, each copy has 4 degrees of freedom, and each copy has its support points in the four vertices of the cell. While the get_support_points() function would return a vector of size 8 in which each of the vertices is listed twice, this function strips out the duplicates and returns a vector of length 4 in which each vertex is listed only once. This is possible because the purpose of this function is to return a list of points so that it is possible to interpolate an arbitrary function onto the finite element space, and this is possible by knowing the two components of the function in question at the four vertices of the cell – it is not necessary to ask for this information twice at each vertex.

    See the glossary entry on generalized support points for more information.

    @@ -4918,9 +4918,9 @@

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__NedelecSZ.html differs (JavaScript source, ASCII text, with very long lines (752)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__NedelecSZ.html 2023-11-25 15:25:53.850013017 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__NedelecSZ.html 2023-11-25 15:25:53.850013017 +0100 @@ -2489,7 +2489,7 @@
    Returns
    The index of this degree of freedom within the set of degrees of freedom on the entire cell. The returned value will be between zero and dofs_per_cell.
    -
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.
    +
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.
    @@ -3160,7 +3160,7 @@
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3262,7 +3262,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3500,7 +3500,7 @@
    component_maskThe mask that selects individual components of the finite element

    Return a vector of generalized support points.

    -
    Note
    The vector returned by this function is always a minimal set of unique support points. This is in contrast to the behavior of get_unit_support_points() that returns a repeated list of unit support points for an FESystem of numerous (Lagrangian) base elements. As a consequence, it is possible to have fewer generalized support points than degrees of freedom in the element. An example is the element FESystem<dim>(FE_Q<dim>(1), 2), which has two copies of the $Q_1$ element. In 2d, each copy has 4 degrees of freedom, and each copy has its support points in the four vertices of the cell. While the get_support_points() function would return a vector of size 8 in which each of the vertices is listed twice, this function strips out the duplicates and returns a vector of length 4 in which each vertex is listed only once. This is possible because the purpose of this function is to return a list of points so that it is possible to interpolate an arbitrary function onto the finite element space, and this is possible by knowing the two components of the function in question at the four vertices of the cell – it is not necessary to ask for this information twice at each vertex.
    +
    Note
    The vector returned by this function is always a minimal set of unique support points. This is in contrast to the behavior of get_unit_support_points() that returns a repeated list of unit support points for an FESystem of numerous (Lagrangian) base elements. As a consequence, it is possible to have fewer generalized support points than degrees of freedom in the element. An example is the element FESystem<dim>(FE_Q<dim>(1), 2), which has two copies of the $Q_1$ element. In 2d, each copy has 4 degrees of freedom, and each copy has its support points in the four vertices of the cell. While the get_support_points() function would return a vector of size 8 in which each of the vertices is listed twice, this function strips out the duplicates and returns a vector of length 4 in which each vertex is listed only once. This is possible because the purpose of this function is to return a list of points so that it is possible to interpolate an arbitrary function onto the finite element space, and this is possible by knowing the two components of the function in question at the four vertices of the cell – it is not necessary to ask for this information twice at each vertex.

    See the glossary entry on generalized support points for more information.

    @@ -3555,9 +3555,9 @@

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    @@ -3600,11 +3600,11 @@
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__NedelecSZ_1_1InternalData.html differs (JavaScript source, ASCII text, with very long lines (787)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__NedelecSZ_1_1InternalData.html 2023-11-25 15:25:53.863346078 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__NedelecSZ_1_1InternalData.html 2023-11-25 15:25:53.863346078 +0100 @@ -146,9 +146,9 @@ class FE_NedelecSZ< dim, spacedim >::InternalData

    Derived Internal data which is used to store cell-independent data. Note that due to the nature of this element, a number of useful pre-computed quantities are stored for the computation of cell-dependent shape functions.

    The main quantities which are stored are associated with edge and face parameterizations. These are:

    • -$\lambda_{i}$ - trilinear function, equal to one at the $i$-th vertex and zero at all other vertices.
    • +$\lambda_{i}$ - trilinear function, equal to one at the $i$-th vertex and zero at all other vertices.
    • -$\sigma_{i}$ - linear functional associated with the $i$-th vertex.
    • +$\sigma_{i}$ - linear functional associated with the $i$-th vertex.

    The definitions of these functionals, as well as the edge and face parameterizations and edge and face extension parameters, can be found on page 82 of Zaglmayr's thesis. The details of the definition of the globally-defined edge and face orientations can be found on page 67.

    @@ -279,9 +279,9 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Storage for all possible edge parameterization between vertices. These are required in the computation of edge- and face-based DoFs, which are cell-dependent.

    -

    The edge parameterization of an edge, E, starting at vertex i and ending at vertex $j$ is given by $\sigma_{E} = \sigma_{i} - \sigma{j}$.

    -

    sigma_imj_values[q][i][j] stores the value of the edge parameterization connected by vertices $i$ and $j$ at the q-th quadrature point.

    -

    Note that not all of the $i$ and $j$ combinations result in valid edges on the hexahedral cell, but they are computed in this fashion for use with non-standard edge and face orientations.

    +

    The edge parameterization of an edge, E, starting at vertex i and ending at vertex $j$ is given by $\sigma_{E} = \sigma_{i} - \sigma{j}$.

    +

    sigma_imj_values[q][i][j] stores the value of the edge parameterization connected by vertices $i$ and $j$ at the q-th quadrature point.

    +

    Note that not all of the $i$ and $j$ combinations result in valid edges on the hexahedral cell, but they are computed in this fashion for use with non-standard edge and face orientations.

    Definition at line 287 of file fe_nedelec_sz.h.

    @@ -301,8 +301,8 @@

    Storage for gradients of all possible edge parameterizations between vertices. These are required in the computation of edge- and face-based DoFs, which are cell-dependent. Note that the components of the gradient are constant.

    -

    The edge parameterization of an edge, $E$, starting at vertex $i$ and ending at vertex $j$ is given by $\sigma_{E} = \sigma_{i} - \sigma{j}$.

    -

    sigma_imj_grads[i][j][d] stores the gradient of the edge parameterization connected by vertices $i$ and $j$ in component $d$.

    +

    The edge parameterization of an edge, $E$, starting at vertex $i$ and ending at vertex $j$ is given by $\sigma_{E} = \sigma_{i} - \sigma{j}$.

    +

    sigma_imj_grads[i][j][d] stores the gradient of the edge parameterization connected by vertices $i$ and $j$ in component $d$.

    Note that the gradient of the edge parameterization is constant on an edge, so we do not need to store it at every quadrature point.

    Definition at line 304 of file fe_nedelec_sz.h.

    @@ -365,10 +365,10 @@

    Storage for edge extension parameters at quadrature points. These are stored for the 12 edges such that the global vertex numbering would follow the order defined by the "standard" deal.II cell.

    -

    The edge extension parameter of an edge, $E$, starting at vertex $i$ and ending at vertex $j$ is given by $\lambda_{E} = \lambda_{i} +
-\lambda_{j}$.

    -

    Note that under this definition, the values of $\lambda_{E}$ do not change with the orientation of the edge.

    -

    edge_lambda_values[m][q] stores the edge extension parameter value at the $q$-th quadrature point on edge $m$.

    +

    The edge extension parameter of an edge, $E$, starting at vertex $i$ and ending at vertex $j$ is given by $\lambda_{E} = \lambda_{i} +
+\lambda_{j}$.

    +

    Note that under this definition, the values of $\lambda_{E}$ do not change with the orientation of the edge.

    +

    edge_lambda_values[m][q] stores the edge extension parameter value at the $q$-th quadrature point on edge $m$.

    Definition at line 347 of file fe_nedelec_sz.h.

    @@ -388,7 +388,7 @@

    Storage for gradients of edge extension parameters in 2d. In this case they are constant. These are stored for the 12 edges such that the global vertex numbering* would follow the order defined by the "standard" deal.II cell.

    -

    edge_lambda_grads_2d[m][d] stores the gradient of the edge extension parameter for component $d$ on edge $m$.

    +

    edge_lambda_grads_2d[m][d] stores the gradient of the edge extension parameter for component $d$ on edge $m$.

    Definition at line 358 of file fe_nedelec_sz.h.

    @@ -408,7 +408,7 @@

    Storage for gradients of edge extension parameters in 3d. In this case they are non-constant. These are stored for the 12 edges such that the global vertex numbering* would follow the order defined by the "standard" deal.II cell.

    -

    edge_lambda_grads_3d[m][q][d] stores the gradient of the edge extension parameter for component $d$ at the $q$-th quadrature point on edge m.

    +

    edge_lambda_grads_3d[m][q][d] stores the gradient of the edge extension parameter for component $d$ at the $q$-th quadrature point on edge m.

    Definition at line 369 of file fe_nedelec_sz.h.

    @@ -428,7 +428,7 @@

    Storage for 2nd derivatives of edge extension parameters in 3d, which are constant across the cell. These are stored for the 12 edges such that the global vertex numbering* would follow the order defined by the "standard" deal.II cell.

    -

    edge_lambda_gradgrads_3d[m][d1][d2] stores the 2nd derivatives of the edge extension parameters with respect to components d1 and d2 on edge $m$.

    +

    edge_lambda_gradgrads_3d[m][d1][d2] stores the 2nd derivatives of the edge extension parameters with respect to components d1 and d2 on edge $m$.

    Definition at line 381 of file fe_nedelec_sz.h.

    @@ -448,10 +448,10 @@

    Storage for the face extension parameters. These are stored for the 6 faces such that the global vertex numbering would follow the order defined by the "standard" deal.II cell.

    -

    The face extension parameter of a face, F, defined by the vertices v1, v2, v3, v4 is given by $\lambda_{F} = \lambda_{v1} + \lambda_{v2} + \lambda_{v3} +
-\lambda_{v4}$.

    -

    Note that under this definition, the values of $\lambda_{F}$ do not change with the orientation of the face.

    -

    face_lambda_values[m][q] stores the face extension parameter value at the $q$-th quadrature point on face $m$.

    +

    The face extension parameter of a face, F, defined by the vertices v1, v2, v3, v4 is given by $\lambda_{F} = \lambda_{v1} + \lambda_{v2} + \lambda_{v3} +
+\lambda_{v4}$.

    +

    Note that under this definition, the values of $\lambda_{F}$ do not change with the orientation of the face.

    +

    face_lambda_values[m][q] stores the face extension parameter value at the $q$-th quadrature point on face $m$.

    Definition at line 399 of file fe_nedelec_sz.h.

    @@ -471,7 +471,7 @@

    Storage for gradients of face extension parameters. These are stored for the 6 faces such that the global vertex numbering would follow the order defined by the "standard" deal.II cell.

    -

    face_lambda_grads[m][d] stores the gradient of the face extension parameters for component $d$ on face $m$.

    +

    face_lambda_grads[m][d] stores the gradient of the face extension parameters for component $d$ on face $m$.

    Definition at line 409 of file fe_nedelec_sz.h.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__Nothing.html differs (JavaScript source, ASCII text, with very long lines (1483)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__Nothing.html 2023-11-25 15:25:53.916678324 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__Nothing.html 2023-11-25 15:25:53.916678324 +0100 @@ -465,7 +465,7 @@ class FE_Nothing< dim, spacedim >

    Definition of a finite element space with zero degrees of freedom and that, consequently, can only represent a single function: the zero function.

    This class is useful (in the context of an hp-method) to represent empty cells in the triangulation on which no degrees of freedom should be allocated, or to describe a field that is extended by zero to a part of the domain where we don't need it. Thus a triangulation may be divided into two regions: an active region where normal elements are used, and an inactive region where FE_Nothing elements are used. The DoFHandler will therefore assign no degrees of freedom to the FE_Nothing cells, and this subregion is therefore implicitly deleted from the computation. step-10 and step-46 show use cases for this element. An interesting application for this element is also presented in the paper [Cangiani2012].

    FE_Nothing as seen as a function space

    -

    Finite elements are often best interpreted as forming a function space, i.e., a set of functions that form a vector space. One can indeed interpret FE_Nothing in this light: It corresponds to the function space $V_h=\{0\}$, i.e., the set of functions that are zero everywhere. (The constructor can take an argument that, if greater than one, extends the space to one of vector-valued functions with more than one component, with all components equal to zero everywhere.) Indeed, this is a vector space since every linear combination of elements in the vector space is also an element in the vector space, as is every multiple of the single element zero. It is obvious that the function space has no degrees of freedom, thus the name of the class.

    +

    Finite elements are often best interpreted as forming a function space, i.e., a set of functions that form a vector space. One can indeed interpret FE_Nothing in this light: It corresponds to the function space $V_h=\{0\}$, i.e., the set of functions that are zero everywhere. (The constructor can take an argument that, if greater than one, extends the space to one of vector-valued functions with more than one component, with all components equal to zero everywhere.) Indeed, this is a vector space since every linear combination of elements in the vector space is also an element in the vector space, as is every multiple of the single element zero. It is obvious that the function space has no degrees of freedom, thus the name of the class.

    FE_Nothing in combination with other elements

    In situations such as those of step-46, one uses FE_Nothing on cells where one is not interested in a solution variable. For example, in fluid structure interaction problems, the fluid velocity is only defined on cells inside the fluid part of the domain. One then uses FE_Nothing on cells in the solid part of the domain to describe the finite element space for the velocity. In other words, the velocity lives everywhere conceptually, but it is identically zero in those parts of the domain where it is not of interest and doesn't use up any degrees of freedom there.

    The question is what happens at the interface between areas where one is interested in the solution (and uses a "normal" finite element) and where one is not interested (and uses FE_Nothing): Should the solution at that interface be zero – i.e., we consider a "continuous" finite element field that happens to be zero in that area where FE_Nothing is used – or is there no requirement for continuity at the interface. In the deal.II language, this is encoded by what the function FiniteElement::compare_for_domination() returns: If the FE_Nothing "dominates", then the solution must be zero at the interface; if it does not, then there is no requirement and one can think of FE_Nothing as a function space that is in general discontinuous (i.e., there is no requirement for any kind of continuity at cell interfaces) but on every cell equal to zero.

    @@ -633,7 +633,7 @@ - +
    [in]typeSpecifies the reference-cell type.
    [in]n_componentsDenotes the number of vector components to give this finite element. The default is one.
    [in]dominateDecides whether FE_Nothing will dominate any other FE in compare_for_domination() (with the default being false). Therefore at interfaces where, for example, a $Q_1$ meets an FE_Nothing, we will force the traces of the two functions to be the same. Because the FE_Nothing encodes a space that is zero everywhere, this means that the $Q_1$ field will be forced to become zero at this interface. See also the discussion in the general documentation of this class.
    [in]dominateDecides whether FE_Nothing will dominate any other FE in compare_for_domination() (with the default being false). Therefore at interfaces where, for example, a $Q_1$ meets an FE_Nothing, we will force the traces of the two functions to be the same. Because the FE_Nothing encodes a space that is zero everywhere, this means that the $Q_1$ field will be forced to become zero at this interface. See also the discussion in the general documentation of this class.
    @@ -2499,7 +2499,7 @@
    Returns
    The index of this degree of freedom within the set of degrees of freedom on the entire cell. The returned value will be between zero and dofs_per_cell.
    -
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.
    +
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.

    Reimplemented in FE_Q_Base< dim, spacedim >, FE_Q_Base< dim, dim >, FE_Q_Base< dim, spacedim >, FESystem< dim, spacedim >, FESystem< dim, dim >, and FESystem< dim, spacedim >.

    @@ -3220,7 +3220,7 @@

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3328,7 +3328,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3557,7 +3557,7 @@
    component_maskThe mask that selects individual components of the finite element

    Return a vector of generalized support points.

    -
    Note
    The vector returned by this function is always a minimal set of unique support points. This is in contrast to the behavior of get_unit_support_points() that returns a repeated list of unit support points for an FESystem of numerous (Lagrangian) base elements. As a consequence, it is possible to have fewer generalized support points than degrees of freedom in the element. An example is the element FESystem<dim>(FE_Q<dim>(1), 2), which has two copies of the $Q_1$ element. In 2d, each copy has 4 degrees of freedom, and each copy has its support points in the four vertices of the cell. While the get_support_points() function would return a vector of size 8 in which each of the vertices is listed twice, this function strips out the duplicates and returns a vector of length 4 in which each vertex is listed only once. This is possible because the purpose of this function is to return a list of points so that it is possible to interpolate an arbitrary function onto the finite element space, and this is possible by knowing the two components of the function in question at the four vertices of the cell – it is not necessary to ask for this information twice at each vertex.
    +
    Note
    The vector returned by this function is always a minimal set of unique support points. This is in contrast to the behavior of get_unit_support_points() that returns a repeated list of unit support points for an FESystem of numerous (Lagrangian) base elements. As a consequence, it is possible to have fewer generalized support points than degrees of freedom in the element. An example is the element FESystem<dim>(FE_Q<dim>(1), 2), which has two copies of the $Q_1$ element. In 2d, each copy has 4 degrees of freedom, and each copy has its support points in the four vertices of the cell. While the get_support_points() function would return a vector of size 8 in which each of the vertices is listed twice, this function strips out the duplicates and returns a vector of length 4 in which each vertex is listed only once. This is possible because the purpose of this function is to return a list of points so that it is possible to interpolate an arbitrary function onto the finite element space, and this is possible by knowing the two components of the function in question at the four vertices of the cell – it is not necessary to ask for this information twice at each vertex.

    See the glossary entry on generalized support points for more information.

    @@ -3616,9 +3616,9 @@

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    @@ -3663,11 +3663,11 @@
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__P1NC.html differs (JavaScript source, ASCII text, with very long lines (752)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__P1NC.html 2023-11-25 15:25:53.970010574 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__P1NC.html 2023-11-25 15:25:53.970010574 +0100 @@ -471,14 +471,14 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Detailed Description

    Implementation of the scalar version of the P1 nonconforming finite element, a piecewise linear element on quadrilaterals in 2d. This implementation is only for 2d cells in a 2d space (i.e., codimension 0).

    -

    Unlike the usual continuous, $H^1$ conforming finite elements, the P1 nonconforming element does not enforce continuity across edges. However, it requires the continuity in an integral sense: any function in the space should have the same integral values on two sides of the common edge shared by two adjacent elements.

    -

    Thus, each function in the nonconforming element space can be discontinuous, and consequently not included in $H^1_0$, just like the basis functions in Discontinuous Galerkin (DG) finite element spaces. On the other hand, basis functions in DG spaces are completely discontinuous across edges without any relation between the values from both sides. This is a reason why usual weak formulations for DG schemes contain additional penalty terms for jump across edges to control discontinuity. However, nonconforming elements usually do not need additional terms in their weak formulations because their integrals along edges are the same from both sides, i.e., there is some level of continuity.

    +

    Unlike the usual continuous, $H^1$ conforming finite elements, the P1 nonconforming element does not enforce continuity across edges. However, it requires the continuity in an integral sense: any function in the space should have the same integral values on two sides of the common edge shared by two adjacent elements.

    +

    Thus, each function in the nonconforming element space can be discontinuous, and consequently not included in $H^1_0$, just like the basis functions in Discontinuous Galerkin (DG) finite element spaces. On the other hand, basis functions in DG spaces are completely discontinuous across edges without any relation between the values from both sides. This is a reason why usual weak formulations for DG schemes contain additional penalty terms for jump across edges to control discontinuity. However, nonconforming elements usually do not need additional terms in their weak formulations because their integrals along edges are the same from both sides, i.e., there is some level of continuity.

    Dice Rule

    Since any function in the P1 nonconforming space is piecewise linear on each element, the function value at the midpoint of each edge is same as the mean value on the edge. Thus the continuity of the integral value across each edge is equivalent to the continuity of the midpoint value of each edge in this case.

    Thus for the P1 nonconforming element, the function values at midpoints on edges of a cell are important. The first attempt to define (local) degrees of freedom (DoFs) on a quadrilateral is by using midpoint values of a function.

    However, these 4 functionals are not linearly independent because a linear function on 2d is uniquely determined by only 3 independent values. A simple observation reads that any linear function on a quadrilateral should satisfy the 'dice rule': the sum of two function values at the midpoints of the edge pair on opposite sides of a cell is equal to the sum of those at the midpoints of the other edge pair. This is called the 'dice rule' because the number of points on opposite sides of a dice always adds up to the same number as well (in the case of dice, to seven).

    -

    In formulas, the dice rule is written as $\phi(m_0) + \phi(m_1) = \phi(m_2) +
-  \phi(m_3)$ for all $\phi$ in the function space where $m_j$ is the midpoint of the edge $e_j$. Here, we assume the standard numbering convention for edges used in deal.II and described in class GeometryInfo.

    +

    In formulas, the dice rule is written as $\phi(m_0) + \phi(m_1) = \phi(m_2) +
+  \phi(m_3)$ for all $\phi$ in the function space where $m_j$ is the midpoint of the edge $e_j$. Here, we assume the standard numbering convention for edges used in deal.II and described in class GeometryInfo.

    Conversely if 4 values at midpoints satisfying the dice rule are given, then there always exists the unique linear function which coincides with 4 midpoints values.

    Due to the dice rule, three values at any three midpoints can determine the last value at the last midpoint. It means that the number of independent local functionals on a cell is 3, and this is also the dimension of the linear polynomial space on a cell in 2d.

    Shape functions

    @@ -494,11 +494,11 @@ * | | * | | * 0---------|---------1 -*

    For each vertex $v_j$ of given cell, there are two edges of which $v_j$ is one of end points. Consider a linear function such that it has value 0.5 at the midpoints of two adjacent edges, and 0.0 at the two midpoints of the other edges. Note that the set of these values satisfies the dice rule which is described above. We denote such a function associated with vertex $v_j$ by $\phi_j$. Then the set of 4 shape functions is a partition of unity on a cell: $\sum_{j=0}^{3} \phi_j = 1$. (This is easy to see: at each edge midpoint, the sum of the four function adds up to one because two functions have value 0.5 and the other value 0.0. Because the function is globally linear, the only function that can have value 1 at four points must also be globally equal to one.)

    -

    The following figures represent $\phi_j$ for $j=0,\cdots,3$ with their midpoint values:

    +*

    For each vertex $v_j$ of given cell, there are two edges of which $v_j$ is one of end points. Consider a linear function such that it has value 0.5 at the midpoints of two adjacent edges, and 0.0 at the two midpoints of the other edges. Note that the set of these values satisfies the dice rule which is described above. We denote such a function associated with vertex $v_j$ by $\phi_j$. Then the set of 4 shape functions is a partition of unity on a cell: $\sum_{j=0}^{3} \phi_j = 1$. (This is easy to see: at each edge midpoint, the sum of the four function adds up to one because two functions have value 0.5 and the other value 0.0. Because the function is globally linear, the only function that can have value 1 at four points must also be globally equal to one.)

    +

    The following figures represent $\phi_j$ for $j=0,\cdots,3$ with their midpoint values:

    • -

      shape function $\phi_0$:

      *  +--------0.0--------+
      +

      shape function $\phi_0$:

      *  +--------0.0--------+
       *  |                   |
       *  |                   |
       *  |                   |
      @@ -512,7 +512,7 @@
       *  

    • -

      shape function $\phi_1$:

      *  +--------0.0--------+
      +

      shape function $\phi_1$:

      *  +--------0.0--------+
       *  |                   |
       *  |                   |
       *  |                   |
      @@ -526,7 +526,7 @@
       *  

    • -

      shape function $\phi_2$:

      *  +--------0.5--------+
      +

      shape function $\phi_2$:

      *  +--------0.5--------+
       *  |                   |
       *  |                   |
       *  |                   |
      @@ -540,7 +540,7 @@
       *  

    • -

      shape function $\phi_3$:

      *  +--------0.5--------+
      +

      shape function $\phi_3$:

      *  +--------0.5--------+
       *  |                   |
       *  |                   |
       *  |                   |
      @@ -560,8 +560,8 @@
       

      Degrees of freedom

      We next have to consider the global basis functions for the element because the system of equations which we ultimately have to solve is for a global system, not local. The global basis functions associated with a node are defined by a cell-wise composition of local shape functions associated with the node on each element.

      There is a theoretical result about the linear independence of the global basis functions depending on the type of the boundary condition we consider.

      -

      When homogeneous Dirichlet boundary conditions are given, the global basis functions associated with interior nodes are linearly independent. Then, the number of DoFs is equal to the number of interior nodes, and consequently the same as the number of DoFs for the standard bilinear $Q_1$ finite element.

      -

      When Neumann boundary conditions are given, the global basis functions associated with all nodes (including boundary nodes) are actually not linearly independent. There exists one redundancy. Thus in this case, the number of DoFs is equal to the number of all nodes minus 1. This is, again as for the regular $Q_1$ element.

      +

      When homogeneous Dirichlet boundary conditions are given, the global basis functions associated with interior nodes are linearly independent. Then, the number of DoFs is equal to the number of interior nodes, and consequently the same as the number of DoFs for the standard bilinear $Q_1$ finite element.

      +

      When Neumann boundary conditions are given, the global basis functions associated with all nodes (including boundary nodes) are actually not linearly independent. There exists one redundancy. Thus in this case, the number of DoFs is equal to the number of all nodes minus 1. This is, again as for the regular $Q_1$ element.

      Unit support points

      For a smooth function, we construct a piecewise linear function which belongs to the element space by using its nodal values as DoF values.

      Note that for the P1 nonconforming element, two nodal values of a smooth function and its interpolant do not coincide in general, in contrast to ordinary Lagrange finite elements. Of course, it is meaningless to refer 'nodal value' because the element space has nonconformity. But it is also true even though the single global basis function associated with a node is considered the unique 'nodal value' at the node. For instance, consider the basis function associated with a node. Consider two lines representing the level sets for value 0.5 and 0, respectively, by connecting two midpoints. Then we cut the quad into two sub-triangles by the diagonal which is placed along those two lines. It gives another level set for value 0.25 which coincides with the cutting diagonal. Therefore these three level sets are all parallel in the quad and it gives the value 0.75 at the base node, not value 1. This is true whether the quadrilateral is a rectangle, parallelogram, or any other shape.

      @@ -871,8 +871,8 @@
    -

    Return the coefficients of 4 local linear shape functions $\phi_j(x,y) = a
-x + b y + c$ on given cell. For each local shape function, the array consists of three coefficients is in order of a,b and c.

    +

    Return the coefficients of 4 local linear shape functions $\phi_j(x,y) = a
+x + b y + c$ on given cell. For each local shape function, the array consists of three coefficients is in order of a,b and c.

    Definition at line 89 of file fe_p1nc.cc.

    @@ -2544,7 +2544,7 @@
    Returns
    The index of this degree of freedom within the set of degrees of freedom on the entire cell. The returned value will be between zero and dofs_per_cell.
    -
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.
    +
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.
    @@ -3215,7 +3215,7 @@

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3317,7 +3317,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3555,7 +3555,7 @@
    component_maskThe mask that selects individual components of the finite element

    Return a vector of generalized support points.

    -
    Note
    The vector returned by this function is always a minimal set of unique support points. This is in contrast to the behavior of get_unit_support_points() that returns a repeated list of unit support points for an FESystem of numerous (Lagrangian) base elements. As a consequence, it is possible to have fewer generalized support points than degrees of freedom in the element. An example is the element FESystem<dim>(FE_Q<dim>(1), 2), which has two copies of the $Q_1$ element. In 2d, each copy has 4 degrees of freedom, and each copy has its support points in the four vertices of the cell. While the get_support_points() function would return a vector of size 8 in which each of the vertices is listed twice, this function strips out the duplicates and returns a vector of length 4 in which each vertex is listed only once. This is possible because the purpose of this function is to return a list of points so that it is possible to interpolate an arbitrary function onto the finite element space, and this is possible by knowing the two components of the function in question at the four vertices of the cell – it is not necessary to ask for this information twice at each vertex.
    +
    Note
    The vector returned by this function is always a minimal set of unique support points. This is in contrast to the behavior of get_unit_support_points() that returns a repeated list of unit support points for an FESystem of numerous (Lagrangian) base elements. As a consequence, it is possible to have fewer generalized support points than degrees of freedom in the element. An example is the element FESystem<dim>(FE_Q<dim>(1), 2), which has two copies of the $Q_1$ element. In 2d, each copy has 4 degrees of freedom, and each copy has its support points in the four vertices of the cell. While the get_support_points() function would return a vector of size 8 in which each of the vertices is listed twice, this function strips out the duplicates and returns a vector of length 4 in which each vertex is listed only once. This is possible because the purpose of this function is to return a list of points so that it is possible to interpolate an arbitrary function onto the finite element space, and this is possible by knowing the two components of the function in question at the four vertices of the cell – it is not necessary to ask for this information twice at each vertex.

    See the glossary entry on generalized support points for more information.

    @@ -3610,9 +3610,9 @@

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    @@ -3655,11 +3655,11 @@
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__Poly.html differs (JavaScript source, ASCII text, with very long lines (851)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__Poly.html 2023-11-25 15:25:54.030009353 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__Poly.html 2023-11-25 15:25:54.030009353 +0100 @@ -1545,17 +1545,17 @@

    Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

    Before the correction, the Hessians would be given by

    -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1140.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1142.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    @@ -1599,21 +1599,21 @@

    Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

    Before the correction, the third derivatives would be given by

    -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1144.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1145.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    @@ -2641,7 +2641,7 @@
    Returns
    The index of this degree of freedom within the set of degrees of freedom on the entire cell. The returned value will be between zero and dofs_per_cell.
    -
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.
    +
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.

    Reimplemented in FE_Q_Base< dim, spacedim >, FE_Q_Base< dim, dim >, FE_Q_Base< dim, spacedim >, FESystem< dim, spacedim >, FESystem< dim, dim >, and FESystem< dim, spacedim >.

    @@ -3362,7 +3362,7 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3470,7 +3470,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3730,7 +3730,7 @@
    component_maskThe mask that selects individual components of the finite element

    Return a vector of generalized support points.

    -
    Note
    The vector returned by this function is always a minimal set of unique support points. This is in contrast to the behavior of get_unit_support_points() that returns a repeated list of unit support points for an FESystem of numerous (Lagrangian) base elements. As a consequence, it is possible to have fewer generalized support points than degrees of freedom in the element. An example is the element FESystem<dim>(FE_Q<dim>(1), 2), which has two copies of the $Q_1$ element. In 2d, each copy has 4 degrees of freedom, and each copy has its support points in the four vertices of the cell. While the get_support_points() function would return a vector of size 8 in which each of the vertices is listed twice, this function strips out the duplicates and returns a vector of length 4 in which each vertex is listed only once. This is possible because the purpose of this function is to return a list of points so that it is possible to interpolate an arbitrary function onto the finite element space, and this is possible by knowing the two components of the function in question at the four vertices of the cell – it is not necessary to ask for this information twice at each vertex.
    +
    Note
    The vector returned by this function is always a minimal set of unique support points. This is in contrast to the behavior of get_unit_support_points() that returns a repeated list of unit support points for an FESystem of numerous (Lagrangian) base elements. As a consequence, it is possible to have fewer generalized support points than degrees of freedom in the element. An example is the element FESystem<dim>(FE_Q<dim>(1), 2), which has two copies of the $Q_1$ element. In 2d, each copy has 4 degrees of freedom, and each copy has its support points in the four vertices of the cell. While the get_support_points() function would return a vector of size 8 in which each of the vertices is listed twice, this function strips out the duplicates and returns a vector of length 4 in which each vertex is listed only once. This is possible because the purpose of this function is to return a list of points so that it is possible to interpolate an arbitrary function onto the finite element space, and this is possible by knowing the two components of the function in question at the four vertices of the cell – it is not necessary to ask for this information twice at each vertex.

    See the glossary entry on generalized support points for more information.

    @@ -3789,9 +3789,9 @@

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    @@ -3836,11 +3836,11 @@
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__PolyFace.html differs (JavaScript source, ASCII text, with very long lines (752)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__PolyFace.html 2023-11-25 15:25:54.086674867 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__PolyFace.html 2023-11-25 15:25:54.086674867 +0100 @@ -2517,7 +2517,7 @@
    Returns
    The index of this degree of freedom within the set of degrees of freedom on the entire cell. The returned value will be between zero and dofs_per_cell.
    -
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.
    +
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.

    Reimplemented in FE_Q_Base< dim, spacedim >, FE_Q_Base< dim, dim >, FE_Q_Base< dim, spacedim >, FESystem< dim, spacedim >, FESystem< dim, dim >, and FESystem< dim, spacedim >.

    @@ -3238,7 +3238,7 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3346,7 +3346,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3606,7 +3606,7 @@
    component_maskThe mask that selects individual components of the finite element

    Return a vector of generalized support points.

    -
    Note
    The vector returned by this function is always a minimal set of unique support points. This is in contrast to the behavior of get_unit_support_points() that returns a repeated list of unit support points for an FESystem of numerous (Lagrangian) base elements. As a consequence, it is possible to have fewer generalized support points than degrees of freedom in the element. An example is the element FESystem<dim>(FE_Q<dim>(1), 2), which has two copies of the $Q_1$ element. In 2d, each copy has 4 degrees of freedom, and each copy has its support points in the four vertices of the cell. While the get_support_points() function would return a vector of size 8 in which each of the vertices is listed twice, this function strips out the duplicates and returns a vector of length 4 in which each vertex is listed only once. This is possible because the purpose of this function is to return a list of points so that it is possible to interpolate an arbitrary function onto the finite element space, and this is possible by knowing the two components of the function in question at the four vertices of the cell – it is not necessary to ask for this information twice at each vertex.
    +
    Note
    The vector returned by this function is always a minimal set of unique support points. This is in contrast to the behavior of get_unit_support_points() that returns a repeated list of unit support points for an FESystem of numerous (Lagrangian) base elements. As a consequence, it is possible to have fewer generalized support points than degrees of freedom in the element. An example is the element FESystem<dim>(FE_Q<dim>(1), 2), which has two copies of the $Q_1$ element. In 2d, each copy has 4 degrees of freedom, and each copy has its support points in the four vertices of the cell. While the get_support_points() function would return a vector of size 8 in which each of the vertices is listed twice, this function strips out the duplicates and returns a vector of length 4 in which each vertex is listed only once. This is possible because the purpose of this function is to return a list of points so that it is possible to interpolate an arbitrary function onto the finite element space, and this is possible by knowing the two components of the function in question at the four vertices of the cell – it is not necessary to ask for this information twice at each vertex.

    See the glossary entry on generalized support points for more information.

    @@ -3665,9 +3665,9 @@

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    @@ -3712,11 +3712,11 @@
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__PolyTensor.html differs (JavaScript source, ASCII text, with very long lines (808)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__PolyTensor.html 2023-11-25 15:25:54.146673646 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__PolyTensor.html 2023-11-25 15:25:54.146673646 +0100 @@ -492,12 +492,12 @@

    Similarly, in many cases, node functionals depend on the shape of the mesh cell, since they evaluate normal or tangential components on the faces. In order to allow for a set of transformations, the variable mapping_kind has been introduced. It needs be set in the constructor of a derived class.

    Any derived class must decide on the polynomial space to use. This polynomial space should be implemented simply as a set of vector valued polynomials like PolynomialsBDM and PolynomialsRaviartThomas. In order to facilitate this implementation, which basis the polynomial space chooses is not of importance to the current class – as described next, this class handles the transformation from the basis chosen by the polynomial space template argument to the basis we want to use for finite element computations internally.

    Determining the correct basis

    -

    In most cases, the basis used by the class that describes the polynomial space, $\{\tilde\varphi_j(\hat{\mathbf x})\}$, does not match the one we want to use for the finite element description, $\{\varphi_j(\hat{\mathbf x})\}$. Rather, we need to express the finite element shape functions as a linear combination of the basis provided by the polynomial space:

    -\begin{align*}
+<p>In most cases, the basis used by the class that describes the polynomial space, <picture><source srcset=$\{\tilde\varphi_j(\hat{\mathbf x})\}$, does not match the one we want to use for the finite element description, $\{\varphi_j(\hat{\mathbf x})\}$. Rather, we need to express the finite element shape functions as a linear combination of the basis provided by the polynomial space:

    +\begin{align*}
   \varphi_j = \sum_k c_{jk} \tilde\varphi_j.
-\end{align*} +\end{align*}" src="form_1149.png"/>

    -

    These expansion coefficients $c_{jk}$ are typically computed in the constructors of derived classes. To facilitate this, this class at first (unless told otherwise, see below), assumes that the shape functions should be exactly the ones provided by the polynomial space. In the constructor of the derived class, one then typically has code of the form

    // Now compute the inverse node matrix, generating the correct
    +

    These expansion coefficients $c_{jk}$ are typically computed in the constructors of derived classes. To facilitate this, this class at first (unless told otherwise, see below), assumes that the shape functions should be exactly the ones provided by the polynomial space. In the constructor of the derived class, one then typically has code of the form

    // Now compute the inverse node matrix, generating the correct
    // basis functions from the raw ones. For a discussion of what
    // exactly happens here, see FETools::compute_node_matrix.
    @@ -510,7 +510,7 @@
    void invert(const FullMatrix< number2 > &M)
    FullMatrix< double > compute_node_matrix(const FiniteElement< dim, spacedim > &fe)
    -

    The FETools::compute_node_matrix() function explains in more detail what exactly it computes, and how; in any case, the result is that inverse_node_matrix now contains the expansion coefficients $c_{jk}$, and the fact that this block of code now sets the matrix to a non-zero size indicates to the functions of the current class that it should from then on use the expanded basis, $\{\varphi_j(\hat{\mathbf x})\}$, and no longer the original, "raw" basis $\{\tilde\varphi_j(\hat{\mathbf x})\}$ when asked for values or derivatives of shape functions.

    +

    The FETools::compute_node_matrix() function explains in more detail what exactly it computes, and how; in any case, the result is that inverse_node_matrix now contains the expansion coefficients $c_{jk}$, and the fact that this block of code now sets the matrix to a non-zero size indicates to the functions of the current class that it should from then on use the expanded basis, $\{\varphi_j(\hat{\mathbf x})\}$, and no longer the original, "raw" basis $\{\tilde\varphi_j(\hat{\mathbf x})\}$ when asked for values or derivatives of shape functions.

    In order for this scheme to work, it is important to ensure that the size of the inverse_node_matrix be zero at the time when FETools::compute_node_matrix() is called; thus, the call to this function cannot be inlined into the last line – the result of the call really does need to be stored in the temporary object M.

    Setting the transformation

    In most cases, vector valued basis functions must be transformed when mapped from the reference cell to the actual grid cell. These transformations can be selected from the set MappingKind and stored in mapping_kind. Therefore, each constructor should contain a line like:

    @@ -2543,7 +2543,7 @@
    Returns
    The index of this degree of freedom within the set of degrees of freedom on the entire cell. The returned value will be between zero and dofs_per_cell.
    -
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.
    +
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.

    Reimplemented in FE_Q_Base< dim, spacedim >, FE_Q_Base< dim, dim >, FE_Q_Base< dim, spacedim >, FESystem< dim, spacedim >, FESystem< dim, dim >, and FESystem< dim, spacedim >.

    @@ -3264,7 +3264,7 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3372,7 +3372,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3632,7 +3632,7 @@
    component_maskThe mask that selects individual components of the finite element

    Return a vector of generalized support points.

    -
    Note
    The vector returned by this function is always a minimal set of unique support points. This is in contrast to the behavior of get_unit_support_points() that returns a repeated list of unit support points for an FESystem of numerous (Lagrangian) base elements. As a consequence, it is possible to have fewer generalized support points than degrees of freedom in the element. An example is the element FESystem<dim>(FE_Q<dim>(1), 2), which has two copies of the $Q_1$ element. In 2d, each copy has 4 degrees of freedom, and each copy has its support points in the four vertices of the cell. While the get_support_points() function would return a vector of size 8 in which each of the vertices is listed twice, this function strips out the duplicates and returns a vector of length 4 in which each vertex is listed only once. This is possible because the purpose of this function is to return a list of points so that it is possible to interpolate an arbitrary function onto the finite element space, and this is possible by knowing the two components of the function in question at the four vertices of the cell – it is not necessary to ask for this information twice at each vertex.
    +
    Note
    The vector returned by this function is always a minimal set of unique support points. This is in contrast to the behavior of get_unit_support_points() that returns a repeated list of unit support points for an FESystem of numerous (Lagrangian) base elements. As a consequence, it is possible to have fewer generalized support points than degrees of freedom in the element. An example is the element FESystem<dim>(FE_Q<dim>(1), 2), which has two copies of the $Q_1$ element. In 2d, each copy has 4 degrees of freedom, and each copy has its support points in the four vertices of the cell. While the get_support_points() function would return a vector of size 8 in which each of the vertices is listed twice, this function strips out the duplicates and returns a vector of length 4 in which each vertex is listed only once. This is possible because the purpose of this function is to return a list of points so that it is possible to interpolate an arbitrary function onto the finite element space, and this is possible by knowing the two components of the function in question at the four vertices of the cell – it is not necessary to ask for this information twice at each vertex.

    See the glossary entry on generalized support points for more information.

    @@ -3691,9 +3691,9 @@

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    @@ -3738,11 +3738,11 @@
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__PyramidDGP.html differs (JavaScript source, ASCII text, with very long lines (752)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__PyramidDGP.html 2023-11-25 15:25:54.203339159 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__PyramidDGP.html 2023-11-25 15:25:54.203339159 +0100 @@ -469,7 +469,7 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Detailed Description

    template<int dim, int spacedim = dim>
    -class FE_PyramidDGP< dim, spacedim >

    Implementation of a scalar Lagrange finite element on a pyramid that yields the finite element space of discontinuous, piecewise polynomials of degree $k$.

    +class FE_PyramidDGP< dim, spacedim >

    Implementation of a scalar Lagrange finite element on a pyramid that yields the finite element space of discontinuous, piecewise polynomials of degree $k$.

    Note
    Currently, only linear polynomials (degree=1) are implemented. See also the documentation of ScalarLagrangePolynomialPyramid.

    Also see Simplex support.

    @@ -703,11 +703,11 @@

    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    @@ -1951,17 +1951,17 @@

    Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

    Before the correction, the Hessians would be given by

    -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1140.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1142.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    @@ -2005,21 +2005,21 @@

    Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

    Before the correction, the third derivatives would be given by

    -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1144.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1145.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    @@ -2985,7 +2985,7 @@
    Returns
    The index of this degree of freedom within the set of degrees of freedom on the entire cell. The returned value will be between zero and dofs_per_cell.
    -
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.
    +
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.

    Reimplemented in FE_Q_Base< dim, spacedim >, FE_Q_Base< dim, dim >, FE_Q_Base< dim, spacedim >, FESystem< dim, spacedim >, FESystem< dim, dim >, and FESystem< dim, spacedim >.

    @@ -3706,7 +3706,7 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3814,7 +3814,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -4074,7 +4074,7 @@
    component_maskThe mask that selects individual components of the finite element

    Return a vector of generalized support points.

    -
    Note
    The vector returned by this function is always a minimal set of unique support points. This is in contrast to the behavior of get_unit_support_points() that returns a repeated list of unit support points for an FESystem of numerous (Lagrangian) base elements. As a consequence, it is possible to have fewer generalized support points than degrees of freedom in the element. An example is the element FESystem<dim>(FE_Q<dim>(1), 2), which has two copies of the $Q_1$ element. In 2d, each copy has 4 degrees of freedom, and each copy has its support points in the four vertices of the cell. While the get_support_points() function would return a vector of size 8 in which each of the vertices is listed twice, this function strips out the duplicates and returns a vector of length 4 in which each vertex is listed only once. This is possible because the purpose of this function is to return a list of points so that it is possible to interpolate an arbitrary function onto the finite element space, and this is possible by knowing the two components of the function in question at the four vertices of the cell – it is not necessary to ask for this information twice at each vertex.
    +
    Note
    The vector returned by this function is always a minimal set of unique support points. This is in contrast to the behavior of get_unit_support_points() that returns a repeated list of unit support points for an FESystem of numerous (Lagrangian) base elements. As a consequence, it is possible to have fewer generalized support points than degrees of freedom in the element. An example is the element FESystem<dim>(FE_Q<dim>(1), 2), which has two copies of the $Q_1$ element. In 2d, each copy has 4 degrees of freedom, and each copy has its support points in the four vertices of the cell. While the get_support_points() function would return a vector of size 8 in which each of the vertices is listed twice, this function strips out the duplicates and returns a vector of length 4 in which each vertex is listed only once. This is possible because the purpose of this function is to return a list of points so that it is possible to interpolate an arbitrary function onto the finite element space, and this is possible by knowing the two components of the function in question at the four vertices of the cell – it is not necessary to ask for this information twice at each vertex.

    See the glossary entry on generalized support points for more information.

    @@ -4133,9 +4133,9 @@

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__PyramidP.html differs (JavaScript source, ASCII text, with very long lines (752)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__PyramidP.html 2023-11-25 15:25:54.260004673 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__PyramidP.html 2023-11-25 15:25:54.260004673 +0100 @@ -469,7 +469,7 @@
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).

    Detailed Description

    template<int dim, int spacedim = dim>
    -class FE_PyramidP< dim, spacedim >

    Implementation of a scalar Lagrange finite element on a pyramid that yields the finite element space of continuous, piecewise polynomials of degree $k$.

    +class FE_PyramidP< dim, spacedim >

    Implementation of a scalar Lagrange finite element on a pyramid that yields the finite element space of continuous, piecewise polynomials of degree $k$.

    Note
    Currently, only linear polynomials (degree=1) are implemented. See also the documentation of ScalarLagrangePolynomialPyramid.

    Also see Simplex support.

    @@ -858,11 +858,11 @@

    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    @@ -2106,17 +2106,17 @@

    Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

    Before the correction, the Hessians would be given by

    -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1140.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1142.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    @@ -2160,21 +2160,21 @@

    Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

    Before the correction, the third derivatives would be given by

    -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1144.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1145.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    @@ -3001,7 +3001,7 @@
    Returns
    The index of this degree of freedom within the set of degrees of freedom on the entire cell. The returned value will be between zero and dofs_per_cell.
    -
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.
    +
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.

    Reimplemented in FE_Q_Base< dim, spacedim >, FE_Q_Base< dim, dim >, FE_Q_Base< dim, spacedim >, FESystem< dim, spacedim >, FESystem< dim, dim >, and FESystem< dim, spacedim >.

    @@ -3722,7 +3722,7 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3830,7 +3830,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -4090,7 +4090,7 @@
    component_maskThe mask that selects individual components of the finite element

    Return a vector of generalized support points.

    -
    Note
    The vector returned by this function is always a minimal set of unique support points. This is in contrast to the behavior of get_unit_support_points() that returns a repeated list of unit support points for an FESystem of numerous (Lagrangian) base elements. As a consequence, it is possible to have fewer generalized support points than degrees of freedom in the element. An example is the element FESystem<dim>(FE_Q<dim>(1), 2), which has two copies of the $Q_1$ element. In 2d, each copy has 4 degrees of freedom, and each copy has its support points in the four vertices of the cell. While the get_support_points() function would return a vector of size 8 in which each of the vertices is listed twice, this function strips out the duplicates and returns a vector of length 4 in which each vertex is listed only once. This is possible because the purpose of this function is to return a list of points so that it is possible to interpolate an arbitrary function onto the finite element space, and this is possible by knowing the two components of the function in question at the four vertices of the cell – it is not necessary to ask for this information twice at each vertex.
    +
    Note
    The vector returned by this function is always a minimal set of unique support points. This is in contrast to the behavior of get_unit_support_points() that returns a repeated list of unit support points for an FESystem of numerous (Lagrangian) base elements. As a consequence, it is possible to have fewer generalized support points than degrees of freedom in the element. An example is the element FESystem<dim>(FE_Q<dim>(1), 2), which has two copies of the $Q_1$ element. In 2d, each copy has 4 degrees of freedom, and each copy has its support points in the four vertices of the cell. While the get_support_points() function would return a vector of size 8 in which each of the vertices is listed twice, this function strips out the duplicates and returns a vector of length 4 in which each vertex is listed only once. This is possible because the purpose of this function is to return a list of points so that it is possible to interpolate an arbitrary function onto the finite element space, and this is possible by knowing the two components of the function in question at the four vertices of the cell – it is not necessary to ask for this information twice at each vertex.

    See the glossary entry on generalized support points for more information.

    @@ -4149,9 +4149,9 @@

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__PyramidPoly.html differs (JavaScript source, ASCII text, with very long lines (752)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__PyramidPoly.html 2023-11-25 15:25:54.316670187 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__PyramidPoly.html 2023-11-25 15:25:54.316670187 +0100 @@ -657,11 +657,11 @@

    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).
    @@ -1905,17 +1905,17 @@

    Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

    Before the correction, the Hessians would be given by

    -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1140.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1142.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    @@ -1959,21 +1959,21 @@

    Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

    Before the correction, the third derivatives would be given by

    -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1144.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1145.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    @@ -3001,7 +3001,7 @@
    Returns
    The index of this degree of freedom within the set of degrees of freedom on the entire cell. The returned value will be between zero and dofs_per_cell.
    -
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.
    +
    Note
    This function exists in this class because that is where it was first implemented. However, it can't really work in the most general case without knowing what element we have. The reason is that when a face is flipped or rotated, we also need to know whether we need to swap the degrees of freedom on this face, or whether they are immune from this. For this, consider the situation of a $Q_3$ element in 2d. If face_flip is true, then we need to consider the two degrees of freedom on the edge in reverse order. On the other hand, if the element were a $Q_1^2$, then because the two degrees of freedom on this edge belong to different vector components, they should not be considered in reverse order. What all of this shows is that the function can't work if there are more than one degree of freedom per line or quad, and that in these cases the function will throw an exception pointing out that this functionality will need to be provided by a derived class that knows what degrees of freedom actually represent.

    Reimplemented in FE_Q_Base< dim, spacedim >, FE_Q_Base< dim, dim >, FE_Q_Base< dim, spacedim >, FESystem< dim, spacedim >, FESystem< dim, dim >, and FESystem< dim, spacedim >.

    @@ -3722,7 +3722,7 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3830,7 +3830,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -4090,7 +4090,7 @@
    component_maskThe mask that selects individual components of the finite element

    Return a vector of generalized support points.

    -
    Note
    The vector returned by this function is always a minimal set of unique support points. This is in contrast to the behavior of get_unit_support_points() that returns a repeated list of unit support points for an FESystem of numerous (Lagrangian) base elements. As a consequence, it is possible to have fewer generalized support points than degrees of freedom in the element. An example is the element FESystem<dim>(FE_Q<dim>(1), 2), which has two copies of the $Q_1$ element. In 2d, each copy has 4 degrees of freedom, and each copy has its support points in the four vertices of the cell. While the get_support_points() function would return a vector of size 8 in which each of the vertices is listed twice, this function strips out the duplicates and returns a vector of length 4 in which each vertex is listed only once. This is possible because the purpose of this function is to return a list of points so that it is possible to interpolate an arbitrary function onto the finite element space, and this is possible by knowing the two components of the function in question at the four vertices of the cell – it is not necessary to ask for this information twice at each vertex.
    +
    Note
    The vector returned by this function is always a minimal set of unique support points. This is in contrast to the behavior of get_unit_support_points() that returns a repeated list of unit support points for an FESystem of numerous (Lagrangian) base elements. As a consequence, it is possible to have fewer generalized support points than degrees of freedom in the element. An example is the element FESystem<dim>(FE_Q<dim>(1), 2), which has two copies of the $Q_1$ element. In 2d, each copy has 4 degrees of freedom, and each copy has its support points in the four vertices of the cell. While the get_support_points() function would return a vector of size 8 in which each of the vertices is listed twice, this function strips out the duplicates and returns a vector of length 4 in which each vertex is listed only once. This is possible because the purpose of this function is to return a list of points so that it is possible to interpolate an arbitrary function onto the finite element space, and this is possible by knowing the two components of the function in question at the four vertices of the cell – it is not necessary to ask for this information twice at each vertex.

    See the glossary entry on generalized support points for more information.

    @@ -4149,9 +4149,9 @@

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__Q.html differs (JavaScript source, ASCII text, with very long lines (759)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__Q.html 2023-11-25 15:25:54.376668965 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__Q.html 2023-11-25 15:25:54.376668965 +0100 @@ -483,7 +483,7 @@

    The constructor creates a TensorProductPolynomials object that includes the tensor product of LagrangeEquidistant polynomials of degree p. This TensorProductPolynomials object provides all values and derivatives of the shape functions. In case a quadrature rule is given, the constructor creates a TensorProductPolynomials object that includes the tensor product of Lagrange polynomials with the support points from points.

    Furthermore the constructor fills the interface_constraints, the prolongation (embedding) and the restriction matrices. These are implemented only up to a certain degree and may not be available for very high polynomial degree.

    Unit support point distribution and conditioning of interpolation

    -

    When constructing an FE_Q element at polynomial degrees one or two, equidistant support points at 0 and 1 (linear case) or 0, 0.5, and 1 (quadratic case) are used. The unit support or nodal points xi are those points where the jth Lagrange polynomial satisfies the $\delta_{ij}$ property, i.e., where one polynomial is one and all the others are zero. For higher polynomial degrees, the support points are non-equidistant by default, and chosen to be the support points of the (degree+1)-order Gauss-Lobatto quadrature rule. This point distribution yields well-conditioned Lagrange interpolation at arbitrary polynomial degrees. By contrast, polynomials based on equidistant points get increasingly ill-conditioned as the polynomial degree increases. In interpolation, this effect is known as the Runge phenomenon. For Galerkin methods, the Runge phenomenon is typically not visible in the solution quality but rather in the condition number of the associated system matrices. For example, the elemental mass matrix of equidistant points at degree 10 has condition number 2.6e6, whereas the condition number for Gauss-Lobatto points is around 400.

    +

    When constructing an FE_Q element at polynomial degrees one or two, equidistant support points at 0 and 1 (linear case) or 0, 0.5, and 1 (quadratic case) are used. The unit support or nodal points xi are those points where the jth Lagrange polynomial satisfies the $\delta_{ij}$ property, i.e., where one polynomial is one and all the others are zero. For higher polynomial degrees, the support points are non-equidistant by default, and chosen to be the support points of the (degree+1)-order Gauss-Lobatto quadrature rule. This point distribution yields well-conditioned Lagrange interpolation at arbitrary polynomial degrees. By contrast, polynomials based on equidistant points get increasingly ill-conditioned as the polynomial degree increases. In interpolation, this effect is known as the Runge phenomenon. For Galerkin methods, the Runge phenomenon is typically not visible in the solution quality but rather in the condition number of the associated system matrices. For example, the elemental mass matrix of equidistant points at degree 10 has condition number 2.6e6, whereas the condition number for Gauss-Lobatto points is around 400.

    The Gauss-Lobatto points in 1d include the end points 0 and +1 of the unit interval. The interior points are shifted towards the end points, which gives a denser point distribution close to the element boundary.

    If combined with Gauss-Lobatto quadrature, FE_Q based on the default support points gives diagonal mass matrices. This case is demonstrated in step-48. However, this element can be combined with arbitrary quadrature rules through the usual FEValues approach, including full Gauss quadrature. In the general case, the mass matrix is non-diagonal.

    Numbering of the degrees of freedom (DoFs)

    @@ -544,9 +544,9 @@ - @@ -559,9 +559,9 @@ - +
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).

    $Q_1$ element, shape function 0

    +

    $Q_1$ element, shape function 0

    -

    $Q_1$ element, shape function 1

    +

    $Q_1$ element, shape function 1

    $Q_1$ element, shape function 2

    +

    $Q_1$ element, shape function 2

    -
    $Q_1$ element, shape function 3
    $Q_1$ element, shape function 3

    Q2 elements

      @@ -669,9 +669,9 @@
    -

    $Q_2$ element, shape function 0

    +

    $Q_2$ element, shape function 0

    -

    $Q_2$ element, shape function 1

    +

    $Q_2$ element, shape function 1

    @@ -684,9 +684,9 @@ -

    $Q_2$ element, shape function 2

    +

    $Q_2$ element, shape function 2

    -

    $Q_2$ element, shape function 3

    +

    $Q_2$ element, shape function 3

    @@ -699,9 +699,9 @@ -

    $Q_2$ element, shape function 4

    +

    $Q_2$ element, shape function 4

    -

    $Q_2$ element, shape function 5

    +

    $Q_2$ element, shape function 5

    @@ -714,9 +714,9 @@ -

    $Q_2$ element, shape function 6

    +

    $Q_2$ element, shape function 6

    -

    $Q_2$ element, shape function 7

    +

    $Q_2$ element, shape function 7

    @@ -726,7 +726,7 @@

    -

    $Q_2$ element, shape function 8

    +

    $Q_2$ element, shape function 8

    @@ -895,9 +895,9 @@ -

    $Q_4$ element, shape function 0

    +

    $Q_4$ element, shape function 0

    -

    $Q_4$ element, shape function 1

    +

    $Q_4$ element, shape function 1

    @@ -910,9 +910,9 @@ -

    $Q_4$ element, shape function 2

    +

    $Q_4$ element, shape function 2

    -

    $Q_4$ element, shape function 3

    +

    $Q_4$ element, shape function 3

    @@ -925,9 +925,9 @@ -

    $Q_4$ element, shape function 4

    +

    $Q_4$ element, shape function 4

    -

    $Q_4$ element, shape function 5

    +

    $Q_4$ element, shape function 5

    @@ -940,9 +940,9 @@ -

    $Q_4$ element, shape function 6

    +

    $Q_4$ element, shape function 6

    -

    $Q_4$ element, shape function 7

    +

    $Q_4$ element, shape function 7

    @@ -955,9 +955,9 @@ -

    $Q_4$ element, shape function 8

    +

    $Q_4$ element, shape function 8

    -

    $Q_4$ element, shape function 9

    +

    $Q_4$ element, shape function 9

    @@ -970,9 +970,9 @@ -

    $Q_4$ element, shape function 10

    +

    $Q_4$ element, shape function 10

    -

    $Q_4$ element, shape function 11

    +

    $Q_4$ element, shape function 11

    @@ -985,9 +985,9 @@ -

    $Q_4$ element, shape function 12

    +

    $Q_4$ element, shape function 12

    -

    $Q_4$ element, shape function 13

    +

    $Q_4$ element, shape function 13

    @@ -1000,9 +1000,9 @@ -

    $Q_4$ element, shape function 14

    +

    $Q_4$ element, shape function 14

    -

    $Q_4$ element, shape function 15

    +

    $Q_4$ element, shape function 15

    @@ -1015,9 +1015,9 @@ -

    $Q_4$ element, shape function 16

    +

    $Q_4$ element, shape function 16

    -

    $Q_4$ element, shape function 17

    +

    $Q_4$ element, shape function 17

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__Q__Base.html differs (JavaScript source, ASCII text, with very long lines (759)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__Q__Base.html 2023-11-25 15:25:54.433334479 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__Q__Base.html 2023-11-25 15:25:54.433334479 +0100 @@ -2523,17 +2523,17 @@

    Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

    Before the correction, the Hessians would be given by

    -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1140.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1142.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    @@ -2577,21 +2577,21 @@

    Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

    Before the correction, the third derivatives would be given by

    -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1144.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1145.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    @@ -3895,7 +3895,7 @@

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -4003,7 +4003,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -4232,7 +4232,7 @@
    component_maskThe mask that selects individual components of the finite element

    Return a vector of generalized support points.

    -
    Note
    The vector returned by this function is always a minimal set of unique support points. This is in contrast to the behavior of get_unit_support_points() that returns a repeated list of unit support points for an FESystem of numerous (Lagrangian) base elements. As a consequence, it is possible to have fewer generalized support points than degrees of freedom in the element. An example is the element FESystem<dim>(FE_Q<dim>(1), 2), which has two copies of the $Q_1$ element. In 2d, each copy has 4 degrees of freedom, and each copy has its support points in the four vertices of the cell. While the get_support_points() function would return a vector of size 8 in which each of the vertices is listed twice, this function strips out the duplicates and returns a vector of length 4 in which each vertex is listed only once. This is possible because the purpose of this function is to return a list of points so that it is possible to interpolate an arbitrary function onto the finite element space, and this is possible by knowing the two components of the function in question at the four vertices of the cell – it is not necessary to ask for this information twice at each vertex.
    +
    Note
    The vector returned by this function is always a minimal set of unique support points. This is in contrast to the behavior of get_unit_support_points() that returns a repeated list of unit support points for an FESystem of numerous (Lagrangian) base elements. As a consequence, it is possible to have fewer generalized support points than degrees of freedom in the element. An example is the element FESystem<dim>(FE_Q<dim>(1), 2), which has two copies of the $Q_1$ element. In 2d, each copy has 4 degrees of freedom, and each copy has its support points in the four vertices of the cell. While the get_support_points() function would return a vector of size 8 in which each of the vertices is listed twice, this function strips out the duplicates and returns a vector of length 4 in which each vertex is listed only once. This is possible because the purpose of this function is to return a list of points so that it is possible to interpolate an arbitrary function onto the finite element space, and this is possible by knowing the two components of the function in question at the four vertices of the cell – it is not necessary to ask for this information twice at each vertex.

    See the glossary entry on generalized support points for more information.

    @@ -4291,9 +4291,9 @@

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    @@ -4338,11 +4338,11 @@
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__Q__Bubbles.html differs (JavaScript source, ASCII text, with very long lines (759)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__Q__Bubbles.html 2023-11-25 15:25:54.489999993 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__Q__Bubbles.html 2023-11-25 15:25:54.489999993 +0100 @@ -484,17 +484,17 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Detailed Description

    template<int dim, int spacedim = dim>
    -class FE_Q_Bubbles< dim, spacedim >

    Implementation of a scalar Lagrange finite element $Q_p^+$ that yields the finite element space of continuous, piecewise polynomials of degree p in each coordinate direction plus some (non-normalized) bubble enrichment space spanned by the additional shape function $\varphi_j(\mathbf x)
+class FE_Q_Bubbles< dim, spacedim ></div><p>Implementation of a scalar Lagrange finite element <picture><source srcset=$Q_p^+$ that yields the finite element space of continuous, piecewise polynomials of degree p in each coordinate direction plus some (non-normalized) bubble enrichment space spanned by the additional shape function $\varphi_j(\mathbf x)
 = 2^{p-1}\left(x_j-\frac 12\right)^{p-1}
-\left[\prod_{i=0}^{dim-1}(x_i(1-x_i))\right]$. for $j=0,\ldots,dim-1$. If $p$ is one, then the first factor disappears and one receives the usual bubble function centered at the mid-point of the cell. Because these last shape functions have polynomial degree is $p+1$, the overall polynomial degree of the shape functions in the space described by this class is $p+1$.

    +\left[\prod_{i=0}^{dim-1}(x_i(1-x_i))\right]$" src="form_1157.png"/>. for $j=0,\ldots,dim-1$. If $p$ is one, then the first factor disappears and one receives the usual bubble function centered at the mid-point of the cell. Because these last shape functions have polynomial degree is $p+1$, the overall polynomial degree of the shape functions in the space described by this class is $p+1$.

    This class is realized using tensor product polynomials based on equidistant or given support points, in the same way as one can provide support points to the FE_Q class's constructors.

    For more information about the spacedim template parameter check the documentation of the FiniteElement class, or the one of Triangulation.

    -

    Due to the fact that the enrichments are small almost everywhere for large $p$, the condition number for the mass and stiffness matrix quickly increaseses with increasing $p$. Below you see a comparison with FE_Q(QGaussLobatto(p+1)) for dim=1.

    +

    Due to the fact that the enrichments are small almost everywhere for large $p$, the condition number for the mass and stiffness matrix quickly increaseses with increasing $p$. Below you see a comparison with FE_Q(QGaussLobatto(p+1)) for dim=1.

    -

    Therefore, this element should be used with care for $p>3$.

    +

    Therefore, this element should be used with care for $p>3$.

    Implementation

    The constructor creates a TensorProductPolynomials object that includes the tensor product of LagrangeEquidistant polynomials of degree p plus the bubble enrichments. This TensorProductPolynomialsBubbles object provides all values and derivatives of the shape functions. In case a quadrature rule is given, the constructor creates a TensorProductPolynomialsBubbles object that includes the tensor product of Lagrange polynomials with the support points from points and the bubble enrichments as defined above.

    Furthermore the constructor fills the interface_constrains, the prolongation (embedding) and the restriction matrices.

    @@ -720,11 +720,11 @@
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    @@ -2759,17 +2759,17 @@

    Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

    Before the correction, the Hessians would be given by

    -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1140.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1142.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    @@ -2813,21 +2813,21 @@

    Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

    Before the correction, the third derivatives would be given by

    -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1144.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1145.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    @@ -4029,7 +4029,7 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -4137,7 +4137,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -4366,7 +4366,7 @@
    component_maskThe mask that selects individual components of the finite element

    Return a vector of generalized support points.

    -
    Note
    The vector returned by this function is always a minimal set of unique support points. This is in contrast to the behavior of get_unit_support_points() that returns a repeated list of unit support points for an FESystem of numerous (Lagrangian) base elements. As a consequence, it is possible to have fewer generalized support points than degrees of freedom in the element. An example is the element FESystem<dim>(FE_Q<dim>(1), 2), which has two copies of the $Q_1$ element. In 2d, each copy has 4 degrees of freedom, and each copy has its support points in the four vertices of the cell. While the get_support_points() function would return a vector of size 8 in which each of the vertices is listed twice, this function strips out the duplicates and returns a vector of length 4 in which each vertex is listed only once. This is possible because the purpose of this function is to return a list of points so that it is possible to interpolate an arbitrary function onto the finite element space, and this is possible by knowing the two components of the function in question at the four vertices of the cell – it is not necessary to ask for this information twice at each vertex.
    +
    Note
    The vector returned by this function is always a minimal set of unique support points. This is in contrast to the behavior of get_unit_support_points() that returns a repeated list of unit support points for an FESystem of numerous (Lagrangian) base elements. As a consequence, it is possible to have fewer generalized support points than degrees of freedom in the element. An example is the element FESystem<dim>(FE_Q<dim>(1), 2), which has two copies of the $Q_1$ element. In 2d, each copy has 4 degrees of freedom, and each copy has its support points in the four vertices of the cell. While the get_support_points() function would return a vector of size 8 in which each of the vertices is listed twice, this function strips out the duplicates and returns a vector of length 4 in which each vertex is listed only once. This is possible because the purpose of this function is to return a list of points so that it is possible to interpolate an arbitrary function onto the finite element space, and this is possible by knowing the two components of the function in question at the four vertices of the cell – it is not necessary to ask for this information twice at each vertex.

    See the glossary entry on generalized support points for more information.

    @@ -4425,9 +4425,9 @@

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__Q__DG0.html differs (JavaScript source, ASCII text, with very long lines (759)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__Q__DG0.html 2023-11-25 15:25:54.546665504 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__Q__DG0.html 2023-11-25 15:25:54.546665504 +0100 @@ -891,11 +891,11 @@
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    @@ -2907,17 +2907,17 @@

    Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

    Before the correction, the Hessians would be given by

    -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1140.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1142.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    @@ -2961,21 +2961,21 @@

    Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

    Before the correction, the third derivatives would be given by

    -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1144.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1145.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    @@ -4177,7 +4177,7 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -4285,7 +4285,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -4514,7 +4514,7 @@
    component_maskThe mask that selects individual components of the finite element

    Return a vector of generalized support points.

    -
    Note
    The vector returned by this function is always a minimal set of unique support points. This is in contrast to the behavior of get_unit_support_points() that returns a repeated list of unit support points for an FESystem of numerous (Lagrangian) base elements. As a consequence, it is possible to have fewer generalized support points than degrees of freedom in the element. An example is the element FESystem<dim>(FE_Q<dim>(1), 2), which has two copies of the $Q_1$ element. In 2d, each copy has 4 degrees of freedom, and each copy has its support points in the four vertices of the cell. While the get_support_points() function would return a vector of size 8 in which each of the vertices is listed twice, this function strips out the duplicates and returns a vector of length 4 in which each vertex is listed only once. This is possible because the purpose of this function is to return a list of points so that it is possible to interpolate an arbitrary function onto the finite element space, and this is possible by knowing the two components of the function in question at the four vertices of the cell – it is not necessary to ask for this information twice at each vertex.
    +
    Note
    The vector returned by this function is always a minimal set of unique support points. This is in contrast to the behavior of get_unit_support_points() that returns a repeated list of unit support points for an FESystem of numerous (Lagrangian) base elements. As a consequence, it is possible to have fewer generalized support points than degrees of freedom in the element. An example is the element FESystem<dim>(FE_Q<dim>(1), 2), which has two copies of the $Q_1$ element. In 2d, each copy has 4 degrees of freedom, and each copy has its support points in the four vertices of the cell. While the get_support_points() function would return a vector of size 8 in which each of the vertices is listed twice, this function strips out the duplicates and returns a vector of length 4 in which each vertex is listed only once. This is possible because the purpose of this function is to return a list of points so that it is possible to interpolate an arbitrary function onto the finite element space, and this is possible by knowing the two components of the function in question at the four vertices of the cell – it is not necessary to ask for this information twice at each vertex.

    See the glossary entry on generalized support points for more information.

    @@ -4573,9 +4573,9 @@

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__Q__Hierarchical.html differs (JavaScript source, ASCII text, with very long lines (752)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__Q__Hierarchical.html 2023-11-25 15:25:54.613330814 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classFE__Q__Hierarchical.html 2023-11-25 15:25:54.613330814 +0100 @@ -519,7 +519,7 @@

    Numbering of the degrees of freedom (DoFs)

    The original ordering of the shape functions represented by the TensorProductPolynomials is a tensor product numbering. However, the shape functions on a cell are renumbered beginning with the shape functions whose support points are at the vertices, then on the line, on the quads, and finally (for 3d) on the hexes. To be explicit, these numberings are listed in the following:

    Q1 elements

    -

    The $Q_1^H$ element is of polynomial degree one and, consequently, is exactly the same as the $Q_1$ element in class FE_Q. In particular, the shape function are defined in the exact same way:

    +

    The $Q_1^H$ element is of polynomial degree one and, consequently, is exactly the same as the $Q_1$ element in class FE_Q. In particular, the shape function are defined in the exact same way:

    • 1d case:

      *      0-------1
      @@ -575,9 +575,9 @@
       
          
       
    - @@ -590,9 +590,9 @@ - +
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).

    $Q_1^H$ element, shape function 0

    +

    $Q_1^H$ element, shape function 0

    -

    $Q_1^H$ element, shape function 1

    +

    $Q_1^H$ element, shape function 1

    $Q_1^H$ element, shape function 2

    +

    $Q_1^H$ element, shape function 2

    -
    $Q_1^H$ element, shape function 3
    $Q_1^H$ element, shape function 3

    Q2 elements

      @@ -700,9 +700,9 @@
    -

    $Q_2^H$ element, shape function 0

    +

    $Q_2^H$ element, shape function 0

    -

    $Q_2^H$ element, shape function 1

    +

    $Q_2^H$ element, shape function 1

    @@ -715,9 +715,9 @@ -

    $Q_2^H$ element, shape function 2

    +

    $Q_2^H$ element, shape function 2

    -

    $Q_2^H$ element, shape function 3

    +

    $Q_2^H$ element, shape function 3

    @@ -730,9 +730,9 @@ -

    $Q_2^H$ element, shape function 4

    +

    $Q_2^H$ element, shape function 4

    -

    $Q_2^H$ element, shape function 5

    +

    $Q_2^H$ element, shape function 5

    @@ -745,9 +745,9 @@ -

    $Q_2^H$ element, shape function 6

    +

    $Q_2^H$ element, shape function 6

    -

    $Q_2^H$ element, shape function 7

    +

    $Q_2^H$ element, shape function 7

    @@ -757,7 +757,7 @@

    -

    $Q_2^H$ element, shape function 8

    +

    $Q_2^H$ element, shape function 8

    @@ -788,9 +788,9 @@ -

    $Q_3^H$ element, shape function 0

    +

    $Q_3^H$ element, shape function 0

    -

    $Q_3^H$ element, shape function 1

    +

    $Q_3^H$ element, shape function 1

    @@ -803,9 +803,9 @@ -

    $Q_3^H$ element, shape function 2

    +

    $Q_3^H$ element, shape function 2

    -

    $Q_3^H$ element, shape function 3

    +

    $Q_3^H$ element, shape function 3

    @@ -818,9 +818,9 @@ -

    $Q_3^H$ element, shape function 4

    +

    $Q_3^H$ element, shape function 4

    -

    $Q_3^H$ element, shape function 5

    +

    $Q_3^H$ element, shape function 5

    @@ -833,9 +833,9 @@ -

    $Q_3^H$ element, shape function 6

    +

    $Q_3^H$ element, shape function 6

    -

    $Q_3^H$ element, shape function 7

    +

    $Q_3^H$ element, shape function 7

    @@ -848,9 +848,9 @@ -

    $Q_3^H$ element, shape function 8

    +

    $Q_3^H$ element, shape function 8

    -

    $Q_3^H$ element, shape function 9

    +

    $Q_3^H$ element, shape function 9

    @@ -863,9 +863,9 @@ -

    $Q_3^H$ element, shape function 10

    +

    $Q_3^H$ element, shape function 10

    -

    $Q_3^H$ element, shape function 11

    +

    $Q_3^H$ element, shape function 11

    @@ -878,9 +878,9 @@ -

    $Q_3^H$ element, shape function 12

    +

    $Q_3^H$ element, shape function 12

    -

    $Q_3^H$ element, shape function 13

    +

    $Q_3^H$ element, shape function 13

    @@ -893,9 +893,9 @@ -

    $Q_3^H$ element, shape function 14

    +

    $Q_3^H$ element, shape function 14

    -$Q_3^H$ element, shape function 15 +$Q_3^H$ element, shape function 15

    Q4 elements

    -

    where $\mathbf A$ is this matrix. $\mathbf A$ and $\mathbf B$ are assumed to be symmetric, and $\mathbf B$ has to be positive definite. Only eigenvalues in the interval $(\rm{lower\_bound},
-\rm{upper\_bound}]$ are computed with the absolute tolerance $\rm{abs\_accuracy}$. An approximate eigenvalue is accepted as converged when it is determined to lie in an interval $[a,b]$ of width less than or equal to $\rm{abs\_accuracy} + eps * \rm{max}( |a|,|b| )$, where $eps$ is the machine precision. If $\rm{abs\_accuracy}$ is less than or equal to zero, then $eps \, |\mathbf{T}|_1$ will be used in its place, where $|\mathbf{T}|_1$ is the 1-norm of the tridiagonal matrix obtained by reducing $\mathbf A$ to tridiagonal form. Eigenvalues will be computed most accurately when $\rm{abs\_accuracy}$ is set to twice the underflow threshold, not zero. After this routine has been called, all eigenvalues in $(\rm{lower\_bound}, \rm{upper\_bound}]$ will be stored in eigenvalues and the corresponding eigenvectors will be stored in eigenvectors, whose dimension is set accordingly.

    +

    where $\mathbf A$ is this matrix. $\mathbf A$ and $\mathbf B$ are assumed to be symmetric, and $\mathbf B$ has to be positive definite. Only eigenvalues in the interval $(\rm{lower\_bound},
+\rm{upper\_bound}]$ are computed with the absolute tolerance $\rm{abs\_accuracy}$. An approximate eigenvalue is accepted as converged when it is determined to lie in an interval $[a,b]$ of width less than or equal to $\rm{abs\_accuracy} + eps * \rm{max}( |a|,|b| )$, where $eps$ is the machine precision. If $\rm{abs\_accuracy}$ is less than or equal to zero, then $eps \, |\mathbf{T}|_1$ will be used in its place, where $|\mathbf{T}|_1$ is the 1-norm of the tridiagonal matrix obtained by reducing $\mathbf A$ to tridiagonal form. Eigenvalues will be computed most accurately when $\rm{abs\_accuracy}$ is set to twice the underflow threshold, not zero. After this routine has been called, all eigenvalues in $(\rm{lower\_bound}, \rm{upper\_bound}]$ will be stored in eigenvalues and the corresponding eigenvectors will be stored in eigenvectors, whose dimension is set accordingly.

    Note
    Calls the LAPACK function Xsygvx.

    Definition at line 2237 of file lapack_full_matrix.cc.

    @@ -2471,7 +2471,7 @@

    Compute the inverse of the matrix by singular value decomposition.

    Requires that state is either LAPACKSupport::matrix or LAPACKSupport::svd. In the first case, this function calls compute_svd(). After this function, the object will have the state LAPACKSupport::inverse_svd.

    For a singular value decomposition, the inverse is simply computed by replacing all singular values by their reciprocal values. If the matrix does not have maximal rank, singular values 0 are not touched, thus computing the minimal norm right inverse of the matrix.

    -

    The parameter threshold determines, when a singular value should be considered zero. It is the ratio of the smallest to the largest nonzero singular value $s_{max}$. Thus, the inverses of all singular values less than $s_{max}/\rm{threshold}$ will be set to zero.

    +

    The parameter threshold determines, when a singular value should be considered zero. It is the ratio of the smallest to the largest nonzero singular value $s_{max}$. Thus, the inverses of all singular values less than $s_{max}/\rm{threshold}$ will be set to zero.

    Definition at line 1676 of file lapack_full_matrix.cc.

    @@ -2543,9 +2543,9 @@
    -

    After a call to compute_eigenvalues(), this function returns the $n\times
-n$ matrix of (right) eigenvectors in a decomposition of the form $A V = V
-\Lambda$. Note that this function constructs the associated matrix on the fly, since LAPACK packs complex-conjugate eigenvalue/eigenvector pairs of real-valued matrices into a real-valued return matrix. This call only succeeds in case the respective flag right_eigenvectors in compute_eigenvalues() has been set to true.

    +

    After a call to compute_eigenvalues(), this function returns the $n\times
+n$ matrix of (right) eigenvectors in a decomposition of the form $A V = V
+\Lambda$. Note that this function constructs the associated matrix on the fly, since LAPACK packs complex-conjugate eigenvalue/eigenvector pairs of real-valued matrices into a real-valued return matrix. This call only succeeds in case the respective flag right_eigenvectors in compute_eigenvalues() has been set to true.

    Definition at line 2065 of file lapack_full_matrix.cc.

    @@ -4201,7 +4201,7 @@
    -

    The matrix $\mathbf U$ in the singular value decomposition $\mathbf U \cdot \mathbf S \cdot \mathbf V^T$.

    +

    The matrix $\mathbf U$ in the singular value decomposition $\mathbf U \cdot \mathbf S \cdot \mathbf V^T$.

    Definition at line 986 of file lapack_full_matrix.h.

    @@ -4228,7 +4228,7 @@
    -

    The matrix $\mathbf V^T$ in the singular value decomposition $\mathbf U \cdot \mathbf S \cdot \mathbf V^T$.

    +

    The matrix $\mathbf V^T$ in the singular value decomposition $\mathbf U \cdot \mathbf S \cdot \mathbf V^T$.

    Definition at line 992 of file lapack_full_matrix.h.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classLinearAlgebra_1_1CUDAWrappers_1_1Vector.html differs (JavaScript source, ASCII text, with very long lines (778)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classLinearAlgebra_1_1CUDAWrappers_1_1Vector.html 2023-11-25 15:25:56.259963970 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classLinearAlgebra_1_1CUDAWrappers_1_1Vector.html 2023-11-25 15:25:56.259963970 +0100 @@ -1161,7 +1161,7 @@
    -

    Return the square of the $l_2$-norm.

    +

    Return the square of the $l_2$-norm.

    Definition at line 544 of file cuda_vector.cc.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classLinearAlgebra_1_1ReadWriteVector.html differs (JavaScript source, ASCII text, with very long lines (1069)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classLinearAlgebra_1_1ReadWriteVector.html 2023-11-25 15:25:56.286630096 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classLinearAlgebra_1_1ReadWriteVector.html 2023-11-25 15:25:56.289963360 +0100 @@ -313,7 +313,7 @@

    Detailed Description

    template<typename Number>
    -class LinearAlgebra::ReadWriteVector< Number >

    ReadWriteVector is intended to represent vectors in ${\mathbb R}^N$ for which it stores all or a subset of elements. The latter case in important in parallel computations, where $N$ may be so large that no processor can actually all elements of a solution vector, but where this is also not necessary: one typically only has to store the values of degrees of freedom that live on cells that are locally owned plus potentially those degrees of freedom that live on ghost cells.

    +class LinearAlgebra::ReadWriteVector< Number >

    ReadWriteVector is intended to represent vectors in ${\mathbb R}^N$ for which it stores all or a subset of elements. The latter case in important in parallel computations, where $N$ may be so large that no processor can actually all elements of a solution vector, but where this is also not necessary: one typically only has to store the values of degrees of freedom that live on cells that are locally owned plus potentially those degrees of freedom that live on ghost cells.

    This class allows to access individual elements to be read or written. However, it does not allow global operations such as taking the norm. ReadWriteVector can be used to read and write elements in vectors derived from VectorSpaceVector such as TrilinosWrappers::MPI::Vector and PETScWrappers::MPI::Vector.

    Storing elements

    Most of the time, one will simply read from or write into a vector of the current class using the global numbers of these degrees of freedom. This is done using operator()() or operator[]() which call global_to_local() to transform the global index into a local one. In such cases, it is clear that one can only access elements of the vector that the current object indeed stores.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classLinearAlgebra_1_1distributed_1_1BlockVector.html differs (JavaScript source, ASCII text, with very long lines (1022)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classLinearAlgebra_1_1distributed_1_1BlockVector.html 2023-11-25 15:25:56.319962749 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classLinearAlgebra_1_1distributed_1_1BlockVector.html 2023-11-25 15:25:56.319962749 +0100 @@ -1512,7 +1512,7 @@
    -

    $l_p$-norm of the vector. The pth root of the sum of the pth powers of the absolute values of the elements.

    +

    $l_p$-norm of the vector. The pth root of the sum of the pth powers of the absolute values of the elements.

    @@ -1851,7 +1851,7 @@
    -

    Calculate the scalar product between each block of this vector and V and store the result in a full matrix matrix. This function computes the result by forming $A_{ij}=U_i \cdot V_j$ where $U_i$ and $V_j$ indicate the $i$th block (not element!) of $U$ and the $j$th block of $V$, respectively. If symmetric is true, it is assumed that inner product results in a square symmetric matrix and almost half of the scalar products can be avoided.

    +

    Calculate the scalar product between each block of this vector and V and store the result in a full matrix matrix. This function computes the result by forming $A_{ij}=U_i \cdot V_j$ where $U_i$ and $V_j$ indicate the $i$th block (not element!) of $U$ and the $j$th block of $V$, respectively. If symmetric is true, it is assumed that inner product results in a square symmetric matrix and almost half of the scalar products can be avoided.

    Obviously, this function can only be used if all blocks of both vectors are of the same size.

    Note
    Internally, a single global reduction will be called to accumulate scalar product between locally owned degrees of freedom.
    @@ -1889,7 +1889,7 @@
    -

    Calculate the scalar product between each block of this vector and V using a metric tensor matrix. This function computes the result of $ \sum_{ij} A^{ij} U_i \cdot V_j$ where $U_i$ and $V_j$ indicate the $i$th block (not element) of $U$ and the $j$th block of $V$, respectively. If symmetric is true, it is assumed that $U_i \cdot V_j$ and $A^{ij}$ are symmetric matrices and almost half of the scalar products can be avoided.

    +

    Calculate the scalar product between each block of this vector and V using a metric tensor matrix. This function computes the result of $ \sum_{ij} A^{ij} U_i \cdot V_j$ where $U_i$ and $V_j$ indicate the $i$th block (not element) of $U$ and the $j$th block of $V$, respectively. If symmetric is true, it is assumed that $U_i \cdot V_j$ and $A^{ij}$ are symmetric matrices and almost half of the scalar products can be avoided.

    Obviously, this function can only be used if all blocks of both vectors are of the same size.

    Note
    Internally, a single global reduction will be called to accumulate the scalar product between locally owned degrees of freedom.
    @@ -1935,7 +1935,7 @@
    -

    Set each block of this vector as follows: $V^i = s V^i + b \sum_{j} U_j A^{ji}$ where $V^i$ and $U_j$ indicate the $i$th block (not element) of $V$ and the $j$th block of $U$, respectively.

    +

    Set each block of this vector as follows: $V^i = s V^i + b \sum_{j} U_j A^{ji}$ where $V^i$ and $U_j$ indicate the $i$th block (not element) of $V$ and the $j$th block of $U$, respectively.

    Obviously, this function can only be used if all blocks of both vectors are of the same size.

    @@ -2266,7 +2266,7 @@
    -

    Return the $l_2$ norm of the vector (i.e., the square root of the sum of the square of all entries among all processors).

    +

    Return the $l_2$ norm of the vector (i.e., the square root of the sum of the square of all entries among all processors).

    Implements LinearAlgebra::VectorSpaceVector< Number >.

    @@ -2288,7 +2288,7 @@
    -

    Return the square of the $l_2$ norm of the vector.

    +

    Return the square of the $l_2$ norm of the vector.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classLinearAlgebra_1_1distributed_1_1Vector.html differs (JavaScript source, ASCII text, with very long lines (847)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classLinearAlgebra_1_1distributed_1_1Vector.html 2023-11-25 15:25:56.356628671 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classLinearAlgebra_1_1distributed_1_1Vector.html 2023-11-25 15:25:56.356628671 +0100 @@ -1109,7 +1109,7 @@

    Initialize vector with local_size locally-owned and ghost_size ghost degrees of freedoms.

    The optional argument comm_sm, which consists of processes on the same shared-memory domain, allows users have read-only access to both locally-owned and ghost values of processes combined in the shared-memory communicator. See the general documentation of this class for more information about this argument.

    -
    Note
    In the created underlying partitioner, the local index range is translated to global indices in an ascending and one-to-one fashion, i.e., the indices of process $p$ sit exactly between the indices of the processes $p-1$ and $p+1$, respectively. Setting the ghost_size variable to an appropriate value provides memory space for the ghost data in a vector's memory allocation as and allows access to it via local_element(). However, the associated global indices must be handled externally in this case.
    +
    Note
    In the created underlying partitioner, the local index range is translated to global indices in an ascending and one-to-one fashion, i.e., the indices of process $p$ sit exactly between the indices of the processes $p-1$ and $p+1$, respectively. Setting the ghost_size variable to an appropriate value provides memory space for the ghost data in a vector's memory allocation as and allows access to it via local_element(). However, the associated global indices must be handled externally in this case.
    @@ -1281,7 +1281,7 @@

    Initiates communication for the compress() function with non- blocking communication. This function does not wait for the transfer to finish, in order to allow for other computations during the time it takes until all data arrives.

    Before the data is actually exchanged, the function must be followed by a call to compress_finish().

    -

    In case this function is called for more than one vector before compress_finish() is invoked, it is mandatory to specify a unique communication channel to each such call, in order to avoid several messages with the same ID that will corrupt this operation. Any communication channel less than 100 is a valid value (in particular, the range $[100, 200)$ is reserved for LinearAlgebra::distributed::BlockVector).

    +

    In case this function is called for more than one vector before compress_finish() is invoked, it is mandatory to specify a unique communication channel to each such call, in order to avoid several messages with the same ID that will corrupt this operation. Any communication channel less than 100 is a valid value (in particular, the range $[100, 200)$ is reserved for LinearAlgebra::distributed::BlockVector).

    @@ -1327,7 +1327,7 @@

    Initiates communication for the update_ghost_values() function with non-blocking communication. This function does not wait for the transfer to finish, in order to allow for other computations during the time it takes until all data arrives.

    Before the data is actually exchanged, the function must be followed by a call to update_ghost_values_finish().

    -

    In case this function is called for more than one vector before update_ghost_values_finish() is invoked, it is mandatory to specify a unique communication channel to each such call, in order to avoid several messages with the same ID that will corrupt this operation. Any communication channel less than 100 is a valid value (in particular, the range $[100, 200)$ is reserved for LinearAlgebra::distributed::BlockVector).

    +

    In case this function is called for more than one vector before update_ghost_values_finish() is invoked, it is mandatory to specify a unique communication channel to each such call, in order to avoid several messages with the same ID that will corrupt this operation. Any communication channel less than 100 is a valid value (in particular, the range $[100, 200)$ is reserved for LinearAlgebra::distributed::BlockVector).

    @@ -2122,7 +2122,7 @@
    -

    Return the $l_2$ norm of the vector (i.e., the square root of the sum of the square of all entries among all processors).

    +

    Return the $l_2$ norm of the vector (i.e., the square root of the sum of the square of all entries among all processors).

    Implements LinearAlgebra::VectorSpaceVector< Number >.

    @@ -2144,7 +2144,7 @@
    -

    Return the square of the $l_2$ norm of the vector.

    +

    Return the square of the $l_2$ norm of the vector.

    @@ -2957,7 +2957,7 @@
    -

    $l_p$-norm of the vector. The pth root of the sum of the pth powers of the absolute values of the elements.

    +

    $l_p$-norm of the vector. The pth root of the sum of the pth powers of the absolute values of the elements.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classLinearOperator.html differs (JavaScript source, ASCII text, with very long lines (1252)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classLinearOperator.html 2023-11-25 15:25:56.373294999 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classLinearOperator.html 2023-11-25 15:25:56.373294999 +0100 @@ -223,7 +223,7 @@
    LinearOperator::reinit_range_vector
    std::function< void(Range &v, bool omit_zeroing_entries)> reinit_range_vector
    Definition: linear_operator.h:302

    that store the knowledge how to initialize (resize + internal data structures) an arbitrary vector of the Range and Domain space.

    The primary purpose of this class is to provide syntactic sugar for complex matrix-vector operations and free the user from having to create, set up and handle intermediate storage locations by hand.

    -

    As an example consider the operation $(A+k\,B)\,C$, where $A$, $B$ and $C$ denote (possible different) matrices. In order to construct a LinearOperator op that stores the knowledge of this operation, one can write:

    +

    As an example consider the operation $(A+k\,B)\,C$, where $A$, $B$ and $C$ denote (possible different) matrices. In order to construct a LinearOperator op that stores the knowledge of this operation, one can write:

    #href_anchor"code" href="linear__operator__tools_8h.html">deal.II/lac/linear_operator_tools.h>
    ::SparseMatrix<double> A, B, C;
    @@ -239,7 +239,7 @@
    LinearOperator< Range, Domain, Payload > linear_operator(const Matrix &matrix)
    -
    Note
    This class makes heavy use of std::function objects and lambda functions. This flexibility comes with a run-time penalty. Only use this object to encapsulate matrix object of medium to large size (as a rule of thumb, sparse matrices with a size $1000\times1000$, or larger).
    +
    Note
    This class makes heavy use of std::function objects and lambda functions. This flexibility comes with a run-time penalty. Only use this object to encapsulate matrix object of medium to large size (as a rule of thumb, sparse matrices with a size $1000\times1000$, or larger).
    In order to use Trilinos or PETSc sparse matrices and preconditioners in conjunction with the LinearOperator class, it is necessary to extend the functionality of the LinearOperator class by means of an additional Payload.

    For example: LinearOperator instances representing matrix inverses usually require calling some linear solver. These solvers may not have interfaces to the LinearOperator (which, for example, may represent a composite operation). The TrilinosWrappers::internal::LinearOperatorImplementation::TrilinosPayload therefore provides an interface extension to the LinearOperator so that it can be passed to the solver and used by the solver as if it were a Trilinos operator. This implies that all of the necessary functionality of the specific Trilinos operator has been overloaded within the Payload class. This includes operator-vector multiplication and inverse operator-vector multiplication, where the operator can be either a TrilinosWrappers::SparseMatrix or a TrilinosWrappers::PreconditionBase and the vector is a native Trilinos vector.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classManifold.html differs (JavaScript source, ASCII text, with very long lines (2267)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classManifold.html 2023-11-25 15:25:56.396627856 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classManifold.html 2023-11-25 15:25:56.396627856 +0100 @@ -202,11 +202,11 @@

    In the most essential use of manifolds, manifold descriptions are used to create a "point between other points". For example, when a triangulation creates a new vertex on a cell, face, or edge, it determines the new vertex' coordinates through the following function call:

    ...
    Point<spacedim> new_vertex = manifold.get_new_point (points,weights);
    ...
    -

    Here, points is a collection of points in spacedim dimension, and a collection of corresponding weights. The points in this context will then be the vertices of the cell, face, or edge, and the weights are typically one over the number of points when a new midpoint of the cell, face, or edge is needed. Derived classes then will implement the Manifold::get_new_point() function in a way that computes the location of this new point. In the simplest case, for example in the FlatManifold class, the function simply computes the arithmetic average (with given weights) of the given points. However, other classes do something differently; for example, the SphericalManifold class, which is used to describe domains that form (part of) the sphere, will ensure that, given the two vertices of an edge at the boundary, the new returned point will lie on the grand circle that connects the two points, rather than choosing a point that is half-way between the two points in ${\mathbb R}^d$.

    +

    Here, points is a collection of points in spacedim dimension, and a collection of corresponding weights. The points in this context will then be the vertices of the cell, face, or edge, and the weights are typically one over the number of points when a new midpoint of the cell, face, or edge is needed. Derived classes then will implement the Manifold::get_new_point() function in a way that computes the location of this new point. In the simplest case, for example in the FlatManifold class, the function simply computes the arithmetic average (with given weights) of the given points. However, other classes do something differently; for example, the SphericalManifold class, which is used to describe domains that form (part of) the sphere, will ensure that, given the two vertices of an edge at the boundary, the new returned point will lie on the grand circle that connects the two points, rather than choosing a point that is half-way between the two points in ${\mathbb R}^d$.

    Note
    Unlike almost all other cases in the library, we here interpret the points to be in real space, not on the reference cell.

    Manifold::get_new_point() has a default implementation that can simplify this process somewhat: Internally, the function calls the Manifold::get_intermediate_point() to compute pair-wise intermediate points. Internally the Manifold::get_intermediate_point() calls the Manifold::project_to_manifold() function after computing the convex combination of the given points. This allows derived classes to only overload Manifold::project_to_manifold() for simple situations. This is often useful when describing manifolds that are embedded in higher dimensional space, e.g., the surface of a sphere. In those cases, the desired new point may be computed simply by the (weighted) average of the provided points, projected back out onto the sphere.

    Common use case: Computing tangent vectors

    -

    The second use of this class is in computing directions on domains and boundaries. For example, we may need to compute the normal vector to a face in order to impose the no-flow boundary condition $\mathbf u \cdot \mathbf n = 0$ (see the VectorTools::compute_no_normal_flux_constraints() as an example). Similarly, we may need normal vectors in the computation of the normal component of the gradient of the numerical solution in order to compute the jump in the gradient of the solution in error estimators (see, for example, the KellyErrorEstimator class).

    +

    The second use of this class is in computing directions on domains and boundaries. For example, we may need to compute the normal vector to a face in order to impose the no-flow boundary condition $\mathbf u \cdot \mathbf n = 0$ (see the VectorTools::compute_no_normal_flux_constraints() as an example). Similarly, we may need normal vectors in the computation of the normal component of the gradient of the numerical solution in order to compute the jump in the gradient of the solution in error estimators (see, for example, the KellyErrorEstimator class).

    To make this possible, the Manifold class provides a member function (to be implemented by derived classes) that computes a "vector tangent to the manifold at one point, in direction of another point" via the Manifold::get_tangent_vector() function. For example, in 2d, one would use this function with the two vertices of an edge at the boundary to compute a "tangential" vector along the edge, and then get the normal vector by rotation by 90 degrees. In 3d, one would compute the two vectors "tangential" to the two edges of a boundary face adjacent to a boundary vertex, and then take the cross product of these two to obtain a vector normal to the boundary.

    For reasons that are more difficult to understand, these direction vectors are normalized in a very specific way, rather than to have unit norm. See the documentation of Manifold::get_tangent_vector(), as well as below, for more information.

    @@ -214,11 +214,11 @@

    A unified description

    The "real" way to understand what this class does is to see it in the framework of differential geometry. More specifically, differential geometry is fundamentally based on the assumption that two sufficiently close points are connected via a line of "shortest distance". This line is called a "geodesic", and it is selected from all other lines that connect the two points by the property that it is shortest if distances are measured in terms of the "metric" that describes a manifold. To give examples, recall that the geodesics of a flat manifold (implemented in the FlatManifold class) are simply the straight lines connecting two points, whereas for spherical manifolds (see the SphericalManifold class) geodesics between two points of same distance are the grand circles, and are in general curved lines when connecting two lines of different distance from the origin.

    In the following discussion, and for the purposes of implementing the current class, the concept of "metrics" that is so fundamental to differential geometry is no longer of great importance to us. Rather, everything can simply be described by postulating the existence of geodesics connecting points on a manifold.

    -

    Given geodesics, the operations discussed in the previous two sections can be described in a more formal way. In essence, they rely on the fact that we can assume that a geodesic is parameterized by a "time" like variable $t$ so that $\mathbf s(t)$ describes the curve and so that $\mathbf s(0)$ is the location of the first and $\mathbf s(1)$ the location of the second point. Furthermore, $\mathbf s(t)$ traces out the geodesic at constant speed, covering equal distance in equal time (as measured by the metric). Note that this parameterization uses time, not arc length to denote progress along the geodesic.

    -

    In this picture, computing a mid-point between points $\mathbf x_1$ and $\mathbf x_2$, with weights $w_1$ and $w_2=1-w_1$, simply requires computing the point $\mathbf s(w_1)$. Computing a new point as a weighted average of more than two points can be done by considering pairwise geodesics, finding suitable points on the geodetic between the first two points, then on the geodetic between this new point and the third given point, etc.

    -

    Likewise, the "tangential" vector described above is simply the velocity vector, $\mathbf s'(t)$, evaluated at one of the end points of a geodesic (i.e., at $t=0$ or $t=1$). In the case of a flat manifold, the geodesic is simply the straight line connecting two points, and the velocity vector is just the connecting vector in that case. On the other hand, for two points on a spherical manifold, the geodesic is a grand circle, and the velocity vector is tangent to the spherical surface.

    -

    Note that if we wanted to, we could use this to compute the length of the geodesic that connects two points $\mathbf x_1$ and $\mathbf x_2$ by computing $\int_0^1 \|\mathbf s'(t)\| dt$ along the geodesic that connects them, but this operation will not be of use to us in practice. One could also conceive computing the direction vector using the "new point" operation above, using the formula $\mathbf s'(0)=\lim_{w\rightarrow 0}
-\frac{\mathbf s(w)-\mathbf s(0)}{w}$ where all we need to do is compute the new point $\mathbf s(w)$ with weights $w$ and $1-w$ along the geodesic connecting $\mathbf x_1$ and $\mathbf x_2$. The default implementation of the function does this, by evaluating the quotient for a small but finite weight $w$. In practice, however, it is almost always possible to explicitly compute the direction vector, i.e., without the need to numerically approximate the limit process, and derived classes should do so.

    +

    Given geodesics, the operations discussed in the previous two sections can be described in a more formal way. In essence, they rely on the fact that we can assume that a geodesic is parameterized by a "time" like variable $t$ so that $\mathbf s(t)$ describes the curve and so that $\mathbf s(0)$ is the location of the first and $\mathbf s(1)$ the location of the second point. Furthermore, $\mathbf s(t)$ traces out the geodesic at constant speed, covering equal distance in equal time (as measured by the metric). Note that this parameterization uses time, not arc length to denote progress along the geodesic.

    +

    In this picture, computing a mid-point between points $\mathbf x_1$ and $\mathbf x_2$, with weights $w_1$ and $w_2=1-w_1$, simply requires computing the point $\mathbf s(w_1)$. Computing a new point as a weighted average of more than two points can be done by considering pairwise geodesics, finding suitable points on the geodetic between the first two points, then on the geodetic between this new point and the third given point, etc.

    +

    Likewise, the "tangential" vector described above is simply the velocity vector, $\mathbf s'(t)$, evaluated at one of the end points of a geodesic (i.e., at $t=0$ or $t=1$). In the case of a flat manifold, the geodesic is simply the straight line connecting two points, and the velocity vector is just the connecting vector in that case. On the other hand, for two points on a spherical manifold, the geodesic is a grand circle, and the velocity vector is tangent to the spherical surface.

    +

    Note that if we wanted to, we could use this to compute the length of the geodesic that connects two points $\mathbf x_1$ and $\mathbf x_2$ by computing $\int_0^1 \|\mathbf s'(t)\| dt$ along the geodesic that connects them, but this operation will not be of use to us in practice. One could also conceive computing the direction vector using the "new point" operation above, using the formula $\mathbf s'(0)=\lim_{w\rightarrow 0}
+\frac{\mathbf s(w)-\mathbf s(0)}{w}$ where all we need to do is compute the new point $\mathbf s(w)$ with weights $w$ and $1-w$ along the geodesic connecting $\mathbf x_1$ and $\mathbf x_2$. The default implementation of the function does this, by evaluating the quotient for a small but finite weight $w$. In practice, however, it is almost always possible to explicitly compute the direction vector, i.e., without the need to numerically approximate the limit process, and derived classes should do so.

    Definition at line 286 of file manifold.h.

    Member Typedef Documentation

    @@ -689,11 +689,11 @@
    -

    Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. The geodesic is the shortest line between these two points, where "shortest" is defined via a metric specific to a particular implementation of this class in a derived class. For example, in the case of a FlatManifold, the shortest line between two points is just the straight line, and in this case the tangent vector is just the difference $\mathbf d=\mathbf
-x_2-\mathbf x_1$. On the other hand, for a manifold that describes a surface embedded in a higher dimensional space (e.g., the surface of a sphere), then the tangent vector is tangential to the surface, and consequently may point in a different direction than the straight line that connects the two points.

    -

    While tangent vectors are often normalized to unit length, the vectors returned by this function are normalized as described in the introduction of this class. Specifically, if $\mathbf s(t)$ traces out the geodesic between the two points where $\mathbf x_1 = \mathbf s(0)$ and $\mathbf x_2 = \mathbf s(1)$, then the returned vector must equal $\mathbf s'(0)$. In other words, the norm of the returned vector also encodes, in some sense, the length of the geodesic because a curve $\mathbf s(t)$ must move "faster" if the two points it connects between arguments $t=0$ and $t=1$ are farther apart.

    -

    The default implementation of this function approximates $\mathbf s'(0) \approx \frac{\mathbf s(\epsilon)-\mathbf x_1}{\epsilon}$ for a small value of $\epsilon$, and the evaluation of $\mathbf
-s(\epsilon)$ is done by calling get_new_point(). If possible, derived classes should override this function by an implementation of the exact derivative.

    +

    Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. The geodesic is the shortest line between these two points, where "shortest" is defined via a metric specific to a particular implementation of this class in a derived class. For example, in the case of a FlatManifold, the shortest line between two points is just the straight line, and in this case the tangent vector is just the difference $\mathbf d=\mathbf
+x_2-\mathbf x_1$. On the other hand, for a manifold that describes a surface embedded in a higher dimensional space (e.g., the surface of a sphere), then the tangent vector is tangential to the surface, and consequently may point in a different direction than the straight line that connects the two points.

    +

    While tangent vectors are often normalized to unit length, the vectors returned by this function are normalized as described in the introduction of this class. Specifically, if $\mathbf s(t)$ traces out the geodesic between the two points where $\mathbf x_1 = \mathbf s(0)$ and $\mathbf x_2 = \mathbf s(1)$, then the returned vector must equal $\mathbf s'(0)$. In other words, the norm of the returned vector also encodes, in some sense, the length of the geodesic because a curve $\mathbf s(t)$ must move "faster" if the two points it connects between arguments $t=0$ and $t=1$ are farther apart.

    +

    The default implementation of this function approximates $\mathbf s'(0) \approx \frac{\mathbf s(\epsilon)-\mathbf x_1}{\epsilon}$ for a small value of $\epsilon$, and the evaluation of $\mathbf
+s(\epsilon)$ is done by calling get_new_point(). If possible, derived classes should override this function by an implementation of the exact derivative.

    Parameters
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classMapping.html differs (JavaScript source, ASCII text, with very long lines (1165)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classMapping.html 2023-11-25 15:25:56.423293982 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classMapping.html 2023-11-25 15:25:56.423293982 +0100 @@ -228,84 +228,84 @@ class Mapping< dim, spacedim >

    Abstract base class for mapping classes.

    This class declares the interface for the functionality to describe mappings from the reference (unit) cell to a cell in real space, as well as for filling the information necessary to use the FEValues, FEFaceValues, and FESubfaceValues classes. Concrete implementations of these interfaces are provided in derived classes.

    Mathematics of the mapping

    -

    The mapping is a transformation $\mathbf x = \mathbf F_K(\hat{\mathbf  x})$ which maps points $\hat{\mathbf x}$ in the reference cell $[0,1]^\text{dim}$ to points $\mathbf x$ in the actual grid cell $K\subset{\mathbb R}^\text{spacedim}$. Many of the applications of such mappings require the Jacobian of this mapping, $J(\hat{\mathbf x}) =
-\hat\nabla {\mathbf F}_K(\hat{\mathbf  x})$. For instance, if dim=spacedim=2, we have

    -\[
+<p>The mapping is a transformation <picture><source srcset=$\mathbf x = \mathbf F_K(\hat{\mathbf  x})$ which maps points $\hat{\mathbf x}$ in the reference cell $[0,1]^\text{dim}$ to points $\mathbf x$ in the actual grid cell $K\subset{\mathbb R}^\text{spacedim}$. Many of the applications of such mappings require the Jacobian of this mapping, $J(\hat{\mathbf x}) =
+\hat\nabla {\mathbf F}_K(\hat{\mathbf  x})$. For instance, if dim=spacedim=2, we have

    +\[
 J(\hat{\mathbf  x}) = \left(\begin{matrix}
 \frac{\partial x}{\partial \hat x} & \frac{\partial x}{\partial \hat y}
 \\
 \frac{\partial y}{\partial \hat x} & \frac{\partial y}{\partial \hat y}
 \end{matrix}\right)
-\] +\]" src="form_1265.png"/>

    Mapping of scalar functions

    The shape functions of scalar finite elements are typically defined on a reference cell and are then simply mapped according to the rule

    -\[
+<picture><source srcset=\[
 \varphi(\mathbf x) = \varphi\bigl(\mathbf F_K(\hat{\mathbf  x})\bigr)
 = \hat \varphi(\hat{\mathbf  x}).
-\] +\]" src="form_1266.png"/>

    Mapping of integrals

    -

    Using simply a change of variables, integrals of scalar functions over a cell $K$ can be expressed as an integral over the reference cell $\hat K$. Specifically, The volume form $d\hat x$ is transformed so that

    -\[
+<p>Using simply a change of variables, integrals of scalar functions over a cell <picture><source srcset=$K$ can be expressed as an integral over the reference cell $\hat K$. Specifically, The volume form $d\hat x$ is transformed so that

    +\[
  \int_K u(\mathbf x)\,dx = \int_{\hat K} \hat
 u(\hat{\mathbf  x}) \left|\text{det}J(\hat{\mathbf  x})\right|
 \,d\hat x.
-\] +\]" src="form_1268.png"/>

    In expressions where such integrals are approximated by quadrature, this then leads to terms of the form

    -\[
+<picture><source srcset=\[
  \int_K u(\mathbf x)\,dx
  \approx
  \sum_{q}
  \hat u(\hat{\mathbf  x}_q)
  \underbrace{\left|\text{det}J(\hat{\mathbf  x}_q)\right| w_q}_{=:
 \text{JxW}_q}.
-\] +\]" src="form_1269.png"/>

    -

    Here, the weights $\text{JxW}_q$ of each quadrature point (where JxW mnemonically stands for Jacobian times Quadrature Weights) take the role of the $dx$ in the original integral. Consequently, they appear in all code that computes integrals approximated by quadrature, and are accessed by FEValues::JxW().

    +

    Here, the weights $\text{JxW}_q$ of each quadrature point (where JxW mnemonically stands for Jacobian times Quadrature Weights) take the role of the $dx$ in the original integral. Consequently, they appear in all code that computes integrals approximated by quadrature, and are accessed by FEValues::JxW().

    Todo:
    Document what happens in the codimension-1 case.

    Mapping of vector fields, differential forms and gradients of vector fields

    The transformation of vector fields or differential forms (gradients of scalar functions) $\mathbf v$, and gradients of vector fields $\mathbf T$ follows the general form

    -\[
+<picture><source srcset=\[
 \mathbf v(\mathbf x) = \mathbf A(\hat{\mathbf  x})
 \hat{\mathbf  v}(\hat{\mathbf  x}),
 \qquad
 \mathbf T(\mathbf x) = \mathbf A(\hat{\mathbf  x})
 \hat{\mathbf  T}(\hat{\mathbf  x}) \mathbf B(\hat{\mathbf  x}).
-\] +\]" src="form_1272.png"/>

    The differential forms A and B are determined by the kind of object being transformed. These transformations are performed through the transform() functions, and the type of object being transformed is specified by their MappingKind argument. See the documentation there for possible choices.

    Derivatives of the mapping

    -

    Some applications require the derivatives of the mapping, of which the first order derivative is the mapping Jacobian, $J_{iJ}(\hat{\mathbf
-x})=\frac{\partial x_i}{\partial \hat x_J}$, described above. Higher order derivatives of the mapping are similarly defined, for example the Jacobian derivative, $\hat H_{iJK}(\hat{\mathbf  x}) = \frac{\partial^2
-x_i}{\partial \hat x_J \partial \hat x_K}$, and the Jacobian second derivative, $\hat K_{iJKL}(\hat{\mathbf  x}) = \frac{\partial^3
-x_i}{\partial \hat x_J \partial \hat x_K \partial \hat x_L}$. It is also useful to define the "pushed-forward" versions of the higher order derivatives: the Jacobian pushed-forward derivative, $H_{ijk}(\hat{\mathbf
+<p>Some applications require the derivatives of the mapping, of which the first order derivative is the mapping Jacobian, <picture><source srcset=$J_{iJ}(\hat{\mathbf
+x})=\frac{\partial x_i}{\partial \hat x_J}$, described above. Higher order derivatives of the mapping are similarly defined, for example the Jacobian derivative, $\hat H_{iJK}(\hat{\mathbf  x}) = \frac{\partial^2
+x_i}{\partial \hat x_J \partial \hat x_K}$, and the Jacobian second derivative, $\hat K_{iJKL}(\hat{\mathbf  x}) = \frac{\partial^3
+x_i}{\partial \hat x_J \partial \hat x_K \partial \hat x_L}$. It is also useful to define the "pushed-forward" versions of the higher order derivatives: the Jacobian pushed-forward derivative, $H_{ijk}(\hat{\mathbf
 x}) = \frac{\partial^2 x_i}{\partial \hat x_J \partial \hat
-x_K}(J_{jJ})^{-1}(J_{kK})^{-1}$, and the Jacobian pushed-forward second derivative, $K_{ijkl}(\hat{\mathbf  x}) = \frac{\partial^3 x_i}{\partial
+x_K}(J_{jJ})^{-1}(J_{kK})^{-1}$, and the Jacobian pushed-forward second derivative, $K_{ijkl}(\hat{\mathbf  x}) = \frac{\partial^3 x_i}{\partial
 \hat x_J \partial \hat x_K \partial \hat
-x_L}(J_{jJ})^{-1}(J_{kK})^{-1}(J_{lL})^{-1}$. These pushed-forward versions can be used to compute the higher order derivatives of functions defined on the reference cell with respect to the real cell coordinates. For instance, the Jacobian derivative with respect to the real cell coordinates is given by:

    +x_L}(J_{jJ})^{-1}(J_{kK})^{-1}(J_{lL})^{-1}$" src="form_1277.png"/>. These pushed-forward versions can be used to compute the higher order derivatives of functions defined on the reference cell with respect to the real cell coordinates. For instance, the Jacobian derivative with respect to the real cell coordinates is given by:

    -\[
+<picture><source srcset=\[
 \frac{\partial}{\partial x_j}\left[J_{iJ}(\hat{\mathbf  x})\right] =
 H_{ikn}(\hat{\mathbf  x})J_{nJ}(\hat{\mathbf  x}),
-\] +\]" src="form_1278.png"/>

    and the derivative of the Jacobian inverse with respect to the real cell coordinates is similarly given by:

    -\[
+<picture><source srcset=\[
 \frac{\partial}{\partial x_j}\left[\left(J_{iJ}(\hat{\mathbf
 x})\right)^{-1}\right] = -H_{nik}(\hat{\mathbf  x})\left(J_{nJ}(\hat{\mathbf
 x})\right)^{-1}.
-\] +\]" src="form_1279.png"/>

    In a similar fashion, higher order derivatives, with respect to the real cell coordinates, of functions defined on the reference cell can be defined using the Jacobian pushed-forward higher-order derivatives. For example, the derivative, with respect to the real cell coordinates, of the Jacobian pushed-forward derivative is given by:

    -\[
+<picture><source srcset=\[
 \frac{\partial}{\partial x_l}\left[H_{ijk}(\hat{\mathbf  x})\right] =
 K_{ijkl}(\hat{\mathbf  x}) -H_{mjl}(\hat{\mathbf  x})H_{imk}(\hat{\mathbf
 x})-H_{mkl}(\hat{\mathbf  x})H_{imj}(\hat{\mathbf  x}).
-\] +\]" src="form_1280.png"/>

    References

    A general publication on differential geometry and finite elements is the survey

      @@ -1089,10 +1089,10 @@
    x1The first point that describes the geodesic, and the one at which the "direction" is to be evaluated.

    Compute information about the mapping from the reference cell to the real cell indicated by the first argument to this function. Derived classes will have to implement this function based on the kind of mapping they represent. It is called by FEValues::reinit().

    -

    Conceptually, this function's represents the application of the mapping $\mathbf x=\mathbf F_K(\hat {\mathbf x})$ from reference coordinates $\mathbf\in [0,1]^d$ to real space coordinates $\mathbf x$ for a given cell $K$. Its purpose is to compute the following kinds of data:

    +

    Conceptually, this function's represents the application of the mapping $\mathbf x=\mathbf F_K(\hat {\mathbf x})$ from reference coordinates $\mathbf\in [0,1]^d$ to real space coordinates $\mathbf x$ for a given cell $K$. Its purpose is to compute the following kinds of data:

    The information computed by this function is used to fill the various member variables of the output argument of this function. Which of the member variables of that structure should be filled is determined by the update flags stored in the Mapping::InternalDataBase object passed to this function.

    An extensive discussion of the interaction between this function and FEValues can be found in the How Mapping, FiniteElement, and FEValues work together documentation module.

    @@ -1397,37 +1397,37 @@

    The mapping kinds currently implemented by derived classes are:

    @@ -1489,21 +1489,21 @@
    -

    Transform a field of differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T} = \nabla \mathbf u$ and $\hat{\mathbf  T} = \hat \nabla \hat{\mathbf  u}$, with $\mathbf u$ a vector field. The mapping kinds currently implemented by derived classes are:

    -

    Transform a field of differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T} = \nabla \mathbf u$ and $\hat{\mathbf  T} = \hat \nabla \hat{\mathbf  u}$, with $\mathbf u$ a vector field. The mapping kinds currently implemented by derived classes are:

    Transform a tensor field from the reference cell to the physical cell. These tensors are usually the Jacobians in the reference cell of vector fields that have been pulled back from the physical cell. The mapping kinds currently implemented by derived classes are:

    @@ -1138,21 +1138,21 @@

    The mapping kinds currently implemented by derived classes are:

    Hessians of spacedim-vector valued differentiable functions are transformed this way (After subtraction of the product of the derivative with the Jacobian gradient).

    In the case when dim=spacedim the previous formula reduces to

    -\[J^{\dagger} = J^{-1}\] +\[J^{\dagger} = J^{-1}\]

    Parameters
    @@ -1214,40 +1214,40 @@
    -

    Transform a field of 3-differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T}_{ijk} = D^2_{jk}
-\mathbf u_i$ and $\mathbf{\hat T}_{IJK} = \hat D^2_{JK} \mathbf{\hat
-u}_I$, with $\mathbf u_i$ a vector field.

    +

    Transform a field of 3-differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T}_{ijk} = D^2_{jk}
+\mathbf u_i$ and $\mathbf{\hat T}_{IJK} = \hat D^2_{JK} \mathbf{\hat
+u}_I$, with $\mathbf u_i$ a vector field.

    The mapping kinds currently implemented by derived classes are:

    -

    Return the Jacobian of the transformation at the specified quadrature point, i.e. $J_{ij}=dx_i/d\hat x_j$

    +

    Return the Jacobian of the transformation at the specified quadrature point, i.e. $J_{ij}=dx_i/d\hat x_j$

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2354,7 +2354,7 @@
    -

    Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijk}=dJ_{jk}/d\hat x_i$.

    +

    Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijk}=dJ_{jk}/d\hat x_i$.

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_grads flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2408,7 +2408,7 @@
    -

    Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, pushed forward to the real cell coordinates, i.e. $G_{ijk}=dJ_{iJ}/d\hat x_K (J_{jJ})^{-1} (J_{kK})^{-1}$.

    +

    Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, pushed forward to the real cell coordinates, i.e. $G_{ijk}=dJ_{iJ}/d\hat x_K (J_{jJ})^{-1} (J_{kK})^{-1}$.

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_pushed_forward_grads flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classNonMatching_1_1FEInterfaceValues.html differs (JavaScript source, ASCII text, with very long lines (1631)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classNonMatching_1_1FEInterfaceValues.html 2023-11-25 15:25:56.906617479 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classNonMatching_1_1FEInterfaceValues.html 2023-11-25 15:25:56.906617479 +0100 @@ -157,11 +157,11 @@

    Detailed Description

    template<int dim>
    -class NonMatching::FEInterfaceValues< dim >

    This class is intended to facilitate assembling interface terms on faces in immersed (in the sense of cut) finite element methods. These types of terms occur mainly in cut discontinuous Galerkin methods. This class works analogously to NonMatching::FEValues. The domain is assumed to be described by a level set function, $\psi : \mathbb{R}^{dim} \to \mathbb{R}$, and this class assumes that we want to integrate over two different regions of each face, $F$:

    -\[
+class NonMatching::FEInterfaceValues< dim ></div><p>This class is intended to facilitate assembling interface terms on faces in immersed (in the sense of cut) finite element methods. These types of terms occur mainly in cut discontinuous Galerkin methods. This class works analogously to <a class=NonMatching::FEValues. The domain is assumed to be described by a level set function, $\psi : \mathbb{R}^{dim} \to \mathbb{R}$, and this class assumes that we want to integrate over two different regions of each face, $F$:

    +\[
 N = \{x \in F : \psi(x) < 0 \}, \\
 P = \{x \in F : \psi(x) > 0 \},
-\] +\]" src="form_2029.png"/>

    which we as before refer to as the "inside" and "outside" regions of the face.

    @@ -195,7 +195,7 @@
    }
    void reinit(const CellIteratorType &cell, const unsigned int face_no, const unsigned int sub_face_no, const CellNeighborIteratorType &cell_neighbor, const unsigned int face_no_neighbor, const unsigned int sub_face_no_neighbor)
    -

    To reduce the amount of work, the reinit() function of this class uses the MeshClassifier passed to the constructor to check how the incoming cell relates to the level set function. The immersed quadrature rules are only generated if the cell is intersected. If the cell is completely inside or outside, it returns a cached FEInterfaceValues object created with a quadrature over the reference cell: $[0, 1]^{dim-1}$.

    +

    To reduce the amount of work, the reinit() function of this class uses the MeshClassifier passed to the constructor to check how the incoming cell relates to the level set function. The immersed quadrature rules are only generated if the cell is intersected. If the cell is completely inside or outside, it returns a cached FEInterfaceValues object created with a quadrature over the reference cell: $[0, 1]^{dim-1}$.

    Definition at line 437 of file fe_values.h.

    Member Typedef Documentation

    @@ -360,7 +360,7 @@ - + @@ -476,7 +476,7 @@
    mapping_collectionCollection of Mappings to be used.
    fe_collectionCollection of FiniteElements to be used.
    q_collectionCollection of Quadrature rules over $[0, 1]^{dim-1}$ that should be used when a face is not intersected and we do not need to generate immersed quadrature rules.
    q_collectionCollection of Quadrature rules over $[0, 1]^{dim-1}$ that should be used when a face is not intersected and we do not need to generate immersed quadrature rules.
    q_collection_1dCollection of 1-dimensional quadrature rules used to generate the immersed quadrature rules. See the QuadratureGenerator class.
    mesh_classifierObject used to determine when the immersed quadrature rules need to be generated.
    region_update_flagsStruct storing UpdateFlags for the inside/outside region of the cell.
    -

    Return an FEInterfaceValues object reinitialized with a quadrature for the inside region of the cell: $\{x \in K : \psi(x) < 0 \}$.

    +

    Return an FEInterfaceValues object reinitialized with a quadrature for the inside region of the cell: $\{x \in K : \psi(x) < 0 \}$.

    Note
    If the quadrature rule over the region is empty, e.g. because the cell is completely located in the outside domain, the returned optional will not contain a value.

    Definition at line 461 of file fe_values.cc.

    @@ -496,7 +496,7 @@
    -

    Return an FEInterfaceValues object reinitialized with a quadrature for the outside region of the cell: $\{x \in K : \psi(x) > 0 \}$.

    +

    Return an FEInterfaceValues object reinitialized with a quadrature for the outside region of the cell: $\{x \in K : \psi(x) > 0 \}$.

    Note
    If the quadrature rule over the region is empty, e.g. because the cell is completely located in the inside domain, the returned optional will not contain a value.

    Definition at line 473 of file fe_values.cc.

    @@ -527,7 +527,7 @@
    -

    Do work common to the constructors. The incoming QCollection should be quadratures integrating over $[0, 1]^{dim-1}$. These will be used on the non-intersected cells.

    +

    Do work common to the constructors. The incoming QCollection should be quadratures integrating over $[0, 1]^{dim-1}$. These will be used on the non-intersected cells.

    Definition at line 332 of file fe_values.cc.

    @@ -790,7 +790,7 @@
    -

    For each element in the FECollection passed to the constructor, this object contains an FEInterfaceValues object created with a quadrature rule over the full reference cell: $[0, 1]^{dim-1}$ and UpdateFlags for the inside region. Thus, these optionals should always contain a value.

    +

    For each element in the FECollection passed to the constructor, this object contains an FEInterfaceValues object created with a quadrature rule over the full reference cell: $[0, 1]^{dim-1}$ and UpdateFlags for the inside region. Thus, these optionals should always contain a value.

    When LocationToLevelSet of the cell is INSIDE (and we do not need to generate an immersed quadrature), we return the FEInterfaceValues object in this container corresponding to the cell's active_fe_index.

    This container is a std::deque, which is compatible with the FEInterfaceValues class that does not have a copy-constructor.

    @@ -819,7 +819,7 @@
    -

    For each element in the FECollection passed to the constructor, this object contains an FEInterfaceValues object created with a quadrature rule over the full reference cell: $[0, 1]^{dim-1}$ and UpdateFlags for the outside region. Thus, these optionals should always contain a value.

    +

    For each element in the FECollection passed to the constructor, this object contains an FEInterfaceValues object created with a quadrature rule over the full reference cell: $[0, 1]^{dim-1}$ and UpdateFlags for the outside region. Thus, these optionals should always contain a value.

    When LocationToLevelSet of the cell is OUTSIDE (and we do not need to generate an immersed quadrature), we return the FEValues object in this container corresponding to the cell's active_fe_index.

    This container is a std::deque, which is compatible with the FEInterfaceValues class that does not have a copy-constructor.

    @@ -848,7 +848,7 @@
    -

    FEInterfaceValues object created with a quadrature rule integrating over the inside region, $\{x \in B : \psi(x) < 0 \}$, that was generated in the last call to reinit(..). If the cell in the last call was not intersected or if 0 quadrature points were generated, this optional will not contain a value.

    +

    FEInterfaceValues object created with a quadrature rule integrating over the inside region, $\{x \in B : \psi(x) < 0 \}$, that was generated in the last call to reinit(..). If the cell in the last call was not intersected or if 0 quadrature points were generated, this optional will not contain a value.

    Definition at line 668 of file fe_values.h.

    @@ -875,7 +875,7 @@
    -

    FEInterfaceValues object created with a quadrature rule integrating over the outside region, $\{x \in B : \psi(x) > 0 \}$, that was generated in the last call to reinit(..). If the cell in the last call was not intersected or if 0 quadrature points were generated, this optional will not contain a value.

    +

    FEInterfaceValues object created with a quadrature rule integrating over the outside region, $\{x \in B : \psi(x) > 0 \}$, that was generated in the last call to reinit(..). If the cell in the last call was not intersected or if 0 quadrature points were generated, this optional will not contain a value.

    Definition at line 677 of file fe_values.h.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classNonMatching_1_1FEValues.html differs (JavaScript source, ASCII text, with very long lines (2374)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classNonMatching_1_1FEValues.html 2023-11-25 15:25:56.923283807 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classNonMatching_1_1FEValues.html 2023-11-25 15:25:56.923283807 +0100 @@ -157,17 +157,17 @@

    Detailed Description

    template<int dim>
    -class NonMatching::FEValues< dim >

    This class is intended to facilitate assembling in immersed (in the sense of cut) finite element methods when the domain is described by a level set function, $\psi : \mathbb{R}^{dim} \to \mathbb{R}$. In this type of method, we typically need to integrate over 3 different regions of each cell, $K$:

    -\[
+class NonMatching::FEValues< dim ></div><p>This class is intended to facilitate assembling in immersed (in the sense of cut) finite element methods when the domain is described by a level set function, <picture><source srcset=$\psi : \mathbb{R}^{dim} \to \mathbb{R}$. In this type of method, we typically need to integrate over 3 different regions of each cell, $K$:

    +\[
 N = \{x \in K : \psi(x) < 0 \}, \\
 P = \{x \in K : \psi(x) > 0 \}, \\
 S = \{x \in K : \psi(x) = 0 \}.
-\] +\]" src="form_2026.png"/>

    Thus we need quadrature rules for these 3 regions:

    -

    As in the QuadratureGenerator class, we refer to $N$, $P$, and $S$ as the inside, outside, and surface regions. The constructor of this class takes a discrete level set function described by a DoFHandler and a Vector. When the reinit() function is called, the QuadratureGenerator will be called in the background to create these immersed quadrature rules. This class then creates FEValues objects for the inside/outside regions and an FEImmersedSurfaceValues object for the surface region. These objects can then be accessed through one of the functions: get_inside_fe_values(), get_outside_fe_values(), or get_surface_fe_values(). Since a cut between a cell and the domain can be arbitrarily small, the underlying algorithm may generate a quadrature rule with 0 points. This can, for example, happen if the relative size of the cut is similar to the floating-point accuracy. Since the FEValues-like objects are not allowed to contain 0 points, the object that get_inside/outside/surface_fe_values() returns is wrapped in a std_cxx17::optional. This requires us to check if the returned FEValues-like object contains a value before we use it:

    +

    As in the QuadratureGenerator class, we refer to $N$, $P$, and $S$ as the inside, outside, and surface regions. The constructor of this class takes a discrete level set function described by a DoFHandler and a Vector. When the reinit() function is called, the QuadratureGenerator will be called in the background to create these immersed quadrature rules. This class then creates FEValues objects for the inside/outside regions and an FEImmersedSurfaceValues object for the surface region. These objects can then be accessed through one of the functions: get_inside_fe_values(), get_outside_fe_values(), or get_surface_fe_values(). Since a cut between a cell and the domain can be arbitrarily small, the underlying algorithm may generate a quadrature rule with 0 points. This can, for example, happen if the relative size of the cut is similar to the floating-point accuracy. Since the FEValues-like objects are not allowed to contain 0 points, the object that get_inside/outside/surface_fe_values() returns is wrapped in a std_cxx17::optional. This requires us to check if the returned FEValues-like object contains a value before we use it:

    for (const auto &cell : dof_handler.active_cell_iterators())
    {
    @@ -188,7 +188,7 @@
    }
    std_cxx17::optional<::FEValues< dim > > fe_values_inside
    Definition: fe_values.h:345
    -

    Of course, it is somewhat expensive to generate the immersed quadrature rules and create FEValues objects with the generated quadratures. To reduce the amount of work, the reinit() function of this class uses the MeshClassifier passed to the constructor to check how the incoming cell relates to the level set function. It only generates the immersed quadrature rules if the cell is intersected. If the cell is completely inside or outside, it returns a cached FEValues object created with a quadrature over the reference cell: $[0, 1]^{dim}$.

    +

    Of course, it is somewhat expensive to generate the immersed quadrature rules and create FEValues objects with the generated quadratures. To reduce the amount of work, the reinit() function of this class uses the MeshClassifier passed to the constructor to check how the incoming cell relates to the level set function. It only generates the immersed quadrature rules if the cell is intersected. If the cell is completely inside or outside, it returns a cached FEValues object created with a quadrature over the reference cell: $[0, 1]^{dim}$.

    Definition at line 144 of file fe_values.h.

    Member Typedef Documentation

    @@ -353,7 +353,7 @@ - + @@ -407,7 +407,7 @@
    mapping_collectionCollection of Mappings to be used.
    fe_collectionCollection of FiniteElements to be used.
    q_collectionCollection of Quadrature rules over $[0, 1]^{dim}$ that should be used when a cell is not intersected and we do not need to generate immersed quadrature rules.
    q_collectionCollection of Quadrature rules over $[0, 1]^{dim}$ that should be used when a cell is not intersected and we do not need to generate immersed quadrature rules.
    q_collection_1dCollection of 1-dimensional quadrature rules used to generate the immersed quadrature rules. See the QuadratureGenerator class.
    mesh_classifierObject used to determine when the immersed quadrature rules need to be generated.
    region_update_flagsStruct storing UpdateFlags for the inside/outside/surface region of the cell.
    -

    Return an FEValues object reinitialized with a quadrature for the inside region of the cell: $\{x \in K : \psi(x) < 0 \}$.

    +

    Return an FEValues object reinitialized with a quadrature for the inside region of the cell: $\{x \in K : \psi(x) < 0 \}$.

    Note
    If the quadrature rule over the region is empty, e.g. because the cell is completely located in the outside domain, the returned optional will not contain a value.

    Definition at line 241 of file fe_values.cc.

    @@ -427,7 +427,7 @@
    -

    Return an FEValues object reinitialized with a quadrature for the outside region of the cell: $\{x \in K : \psi(x) > 0 \}$.

    +

    Return an FEValues object reinitialized with a quadrature for the outside region of the cell: $\{x \in K : \psi(x) > 0 \}$.

    Note
    If the quadrature rule over the region is empty, e.g. because the cell is completely located in the inside domain, the returned optional will not contain a value.

    Definition at line 253 of file fe_values.cc.

    @@ -447,7 +447,7 @@
    -

    Return an FEValues object reinitialized with a quadrature for the surface region of the cell: $\{x \in K : \psi(x) = 0 \}$.

    +

    Return an FEValues object reinitialized with a quadrature for the surface region of the cell: $\{x \in K : \psi(x) = 0 \}$.

    Note
    If the quadrature rule over the region is empty, e.g. because the cell is not intersected, the returned optional will not contain a value.

    Definition at line 265 of file fe_values.cc.

    @@ -478,7 +478,7 @@
    -

    Do work common to the constructors. The incoming QCollection should be quadratures integrating over $[0, 1]^{dim}$. These will be used on the non-intersected cells.

    +

    Do work common to the constructors. The incoming QCollection should be quadratures integrating over $[0, 1]^{dim}$. These will be used on the non-intersected cells.

    Definition at line 105 of file fe_values.cc.

    @@ -695,7 +695,7 @@
    -

    For each element in the FECollection passed to the constructor, this object contains an FEValues object created with a quadrature rule over the full reference cell: $[0, 1]^{dim}$ and UpdateFlags for the inside region. Thus, these optionals should always contain a value.

    +

    For each element in the FECollection passed to the constructor, this object contains an FEValues object created with a quadrature rule over the full reference cell: $[0, 1]^{dim}$ and UpdateFlags for the inside region. Thus, these optionals should always contain a value.

    When LocationToLevelSet of the cell is INSIDE (and we do not need to generate an immersed quadrature), we return the FEValues object in this container corresponding to the cell's active_fe_index.

    This container is a std::deque, which is compatible with the FEValues class that does not have a copy-constructor.

    @@ -724,7 +724,7 @@
    -

    For each element in the FECollection passed to the constructor, this object contains an FEValues object created with a quadrature rule over the full reference cell: $[0, 1]^{dim}$ and UpdateFlags for the outside region. Thus, these optionals should always contain a value.

    +

    For each element in the FECollection passed to the constructor, this object contains an FEValues object created with a quadrature rule over the full reference cell: $[0, 1]^{dim}$ and UpdateFlags for the outside region. Thus, these optionals should always contain a value.

    When LocationToLevelSet of the cell is OUTSIDE (and we do not need to generate an immersed quadrature), we return the FEValues object in this container corresponding to the cell's active_fe_index.

    This container is a std::deque, which is compatible with the FEValues class that does not have a copy-constructor.

    @@ -753,7 +753,7 @@
    -

    FEValues object created with a quadrature rule integrating over the inside region, $\{x \in B : \psi(x) < 0 \}$, that was generated in the last call to reinit(..). If the cell in the last call was not intersected or if 0 quadrature points were generated, this optional will not contain a value.

    +

    FEValues object created with a quadrature rule integrating over the inside region, $\{x \in B : \psi(x) < 0 \}$, that was generated in the last call to reinit(..). If the cell in the last call was not intersected or if 0 quadrature points were generated, this optional will not contain a value.

    Definition at line 345 of file fe_values.h.

    @@ -780,7 +780,7 @@
    -

    FEValues object created with a quadrature rule integrating over the outside region, $\{x \in B : \psi(x) > 0 \}$, that was generated in the last call to reinit(..). If the cell in the last call was not intersected or if 0 quadrature points were generated, this optional will not contain a value.

    +

    FEValues object created with a quadrature rule integrating over the outside region, $\{x \in B : \psi(x) > 0 \}$, that was generated in the last call to reinit(..). If the cell in the last call was not intersected or if 0 quadrature points were generated, this optional will not contain a value.

    Definition at line 354 of file fe_values.h.

    @@ -807,7 +807,7 @@
    -

    FEImmersedSurfaceValues object created with a quadrature rule integrating over the surface region, $\{x \in B : \psi(x) = 0 \}$, that was generated in the last call to reinit(..). If the cell in the last call was not intersected or if 0 quadrature points were generated, this optional will not contain a value.

    +

    FEImmersedSurfaceValues object created with a quadrature rule integrating over the surface region, $\{x \in B : \psi(x) = 0 \}$, that was generated in the last call to reinit(..). If the cell in the last call was not intersected or if 0 quadrature points were generated, this optional will not contain a value.

    Definition at line 364 of file fe_values.h.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classNonMatching_1_1FaceQuadratureGenerator.html differs (JavaScript source, ASCII text, with very long lines (1581)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classNonMatching_1_1FaceQuadratureGenerator.html 2023-11-25 15:25:56.936616868 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classNonMatching_1_1FaceQuadratureGenerator.html 2023-11-25 15:25:56.936616868 +0100 @@ -132,16 +132,16 @@

    Detailed Description

    template<int dim>
    -class NonMatching::FaceQuadratureGenerator< dim >

    This class creates immersed quadrature rules over a face, $F$, of a BoundingBox, when the domain is described by a level set function, $\psi$.

    -

    In the same way as in the QuadratureGenerator class, this class generates quadrature rules to integrate over 3 different regions of the face, $F \subset \mathbb{R}^{dim}$:

    -\[
+class NonMatching::FaceQuadratureGenerator< dim ></div><p>This class creates immersed quadrature rules over a face, <picture><source srcset=$F$, of a BoundingBox, when the domain is described by a level set function, $\psi$.

    +

    In the same way as in the QuadratureGenerator class, this class generates quadrature rules to integrate over 3 different regions of the face, $F \subset \mathbb{R}^{dim}$:

    +\[
 N = \{x \in F : \psi(x) < 0 \}, \\
 P = \{x \in F : \psi(x) > 0 \}, \\
 S = \{x \in F : \psi(x) = 0 \},
-\] +\]" src="form_2069.png"/>

    -

    which are again referred to as the "inside", $N$, "outside", $P$, and "surface" region, $S$. These type of quadrature rules are in general needed in immersed discontinuous Galerkin methods.

    -

    Under the hood, this class uses the QuadratureGenerator class to build these rules. This is done by restricting the dim-dimensional level set function to the face, thus creating a (dim-1)-dimensional level set function, $\phi$. It then creates the (dim-1)-dimensional quadratures by calling QuadratureGenerator with $\phi$. This means that what holds for the QuadratureGenerator class in general also holds for this class. In particular, if the 1d-quadrature that is used as base contains $n$ points, the number of points will be proportional to $n^{dim-1}$ in the in the inside/outside quadratures and to $n^{dim-2}$ in the surface quadrature.

    +

    which are again referred to as the "inside", $N$, "outside", $P$, and "surface" region, $S$. These type of quadrature rules are in general needed in immersed discontinuous Galerkin methods.

    +

    Under the hood, this class uses the QuadratureGenerator class to build these rules. This is done by restricting the dim-dimensional level set function to the face, thus creating a (dim-1)-dimensional level set function, $\phi$. It then creates the (dim-1)-dimensional quadratures by calling QuadratureGenerator with $\phi$. This means that what holds for the QuadratureGenerator class in general also holds for this class. In particular, if the 1d-quadrature that is used as base contains $n$ points, the number of points will be proportional to $n^{dim-1}$ in the in the inside/outside quadratures and to $n^{dim-2}$ in the surface quadrature.

    Definition at line 292 of file quadrature_generator.h.

    Member Typedef Documentation

    @@ -249,7 +249,7 @@
    -

    Return the quadrature rule for the region $\{x \in F : \psi(x) < 0 \}$ created in the previous call to generate(). Here, $F$ is the face of the BoundingBox passed to generate().

    +

    Return the quadrature rule for the region $\{x \in F : \psi(x) < 0 \}$ created in the previous call to generate(). Here, $F$ is the face of the BoundingBox passed to generate().

    Definition at line 1774 of file quadrature_generator.cc.

    @@ -268,7 +268,7 @@
    -

    Return the quadrature rule for the region $\{x \in F : \psi(x) > 0 \}$ created in the previous call to generate(). Here, $F$ is the face of the BoundingBox passed to generate().

    +

    Return the quadrature rule for the region $\{x \in F : \psi(x) > 0 \}$ created in the previous call to generate(). Here, $F$ is the face of the BoundingBox passed to generate().

    Definition at line 1782 of file quadrature_generator.cc.

    @@ -287,8 +287,8 @@
    -

    Return the quadrature rule for the region $\{x \in F : \psi(x) = 0 \}$ created in the previous call to generate(). Here, $F$ is the face of the BoundingBox passed to generate().

    -
    Note
    The normal at the quadrature points will be parallel to $\nabla \psi$.
    +

    Return the quadrature rule for the region $\{x \in F : \psi(x) = 0 \}$ created in the previous call to generate(). Here, $F$ is the face of the BoundingBox passed to generate().

    +
    Note
    The normal at the quadrature points will be parallel to $\nabla \psi$.

    Definition at line 1791 of file quadrature_generator.cc.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classNonMatching_1_1FaceQuadratureGenerator_3_011_01_4.html differs (JavaScript source, ASCII text, with very long lines (715)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classNonMatching_1_1FaceQuadratureGenerator_3_011_01_4.html 2023-11-25 15:25:56.946616665 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classNonMatching_1_1FaceQuadratureGenerator_3_011_01_4.html 2023-11-25 15:25:56.946616665 +0100 @@ -232,7 +232,7 @@

    -

    Return the quadrature rule for the region $\{x \in F : \psi(x) < 0 \}$ created in the previous call to generate(). Here, $F$ is the face of the BoundingBox passed to generate().

    +

    Return the quadrature rule for the region $\{x \in F : \psi(x) < 0 \}$ created in the previous call to generate(). Here, $F$ is the face of the BoundingBox passed to generate().

    Definition at line 1853 of file quadrature_generator.cc.

    @@ -254,7 +254,7 @@

    -

    Return the quadrature rule for the region $\{x \in F : \psi(x) > 0 \}$ created in the previous call to generate(). Here, $F$ is the face of the BoundingBox passed to generate().

    +

    Return the quadrature rule for the region $\{x \in F : \psi(x) > 0 \}$ created in the previous call to generate(). Here, $F$ is the face of the BoundingBox passed to generate().

    Definition at line 1860 of file quadrature_generator.cc.

    @@ -274,7 +274,7 @@
    -

    Return the quadrature rule for the region $\{x \in F : \psi(x) = 0 \}$ where, $F$ is the face of the BoundingBox passed to generate().

    +

    Return the quadrature rule for the region $\{x \in F : \psi(x) = 0 \}$ where, $F$ is the face of the BoundingBox passed to generate().

    Note
    In 1d, this quadrature always contains 0 points.

    Definition at line 1868 of file quadrature_generator.cc.

    @@ -321,7 +321,7 @@
    -

    Quadrature for the region $\{x \in F : \psi(x) < 0 \}$. Created in the last call to generate().

    +

    Quadrature for the region $\{x \in F : \psi(x) < 0 \}$. Created in the last call to generate().

    Definition at line 455 of file quadrature_generator.h.

    @@ -346,7 +346,7 @@
    -

    Quadrature for the region $\{x \in F : \psi(x) > 0 \}$. Created in the last call to generate().

    +

    Quadrature for the region $\{x \in F : \psi(x) > 0 \}$. Created in the last call to generate().

    Definition at line 462 of file quadrature_generator.h.

    @@ -371,7 +371,7 @@
    -

    Quadrature for the region $\{x \in F : \psi(x) = 0 \}$. This quadrature always contains zero points in 1d.

    +

    Quadrature for the region $\{x \in F : \psi(x) = 0 \}$. This quadrature always contains zero points in 1d.

    Definition at line 469 of file quadrature_generator.h.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classNonMatching_1_1ImmersedSurfaceQuadrature.html differs (JavaScript source, ASCII text, with very long lines (1507)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classNonMatching_1_1ImmersedSurfaceQuadrature.html 2023-11-25 15:25:56.966616258 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classNonMatching_1_1ImmersedSurfaceQuadrature.html 2023-11-25 15:25:56.966616258 +0100 @@ -206,41 +206,41 @@

    Detailed Description

    template<int dim, int spacedim = dim>
    -class NonMatching::ImmersedSurfaceQuadrature< dim, spacedim >

    This class defines a quadrature formula to integrate over the intersection between an oriented surface, $\hat{S}$, and a cell or face. The word "immersed" in the class name reflects that the surface may intersect the cell/face in an arbitrary way.

    -

    The spacedim template parameter of this class is the dimension that the (spacedim-1)-dimensional surface is embedded in: $\hat{S} \subset \mathbb{R}^{\text{spacedim}}$. The dim parameter describes the dimension of the "object" that the surface intersects. That is, dim = spacedim corresponds to the surface intersecting a cell and dim = spacedim - 1 corresponds to the surface intersecting a face. The quadrature formula is described by a set of quadrature points, $\hat{x}_q \in \mathbb{R}^{\text{dim}}$, weights, $w_q$, and normalized surface normals, $\hat{n}_q \in \mathbb{R}^{\text{spacedim}}$.

    -

    Consider first the case dim = spacedim. We typically want to compute integrals in real space. A surface, $S$, intersecting a cell, $K$, in real space can be mapped onto a surface, $\hat{S}$, intersecting the unit cell, $\hat{K}$. Thus an integral over $S\cap K$ in real space can be transformed to an integral over $\hat{S} \cap \hat{K}$ according to

    -\[
+class NonMatching::ImmersedSurfaceQuadrature< dim, spacedim ></div><p>This class defines a quadrature formula to integrate over the intersection between an oriented surface, <picture><source srcset=$\hat{S}$, and a cell or face. The word "immersed" in the class name reflects that the surface may intersect the cell/face in an arbitrary way.

    +

    The spacedim template parameter of this class is the dimension that the (spacedim-1)-dimensional surface is embedded in: $\hat{S} \subset \mathbb{R}^{\text{spacedim}}$. The dim parameter describes the dimension of the "object" that the surface intersects. That is, dim = spacedim corresponds to the surface intersecting a cell and dim = spacedim - 1 corresponds to the surface intersecting a face. The quadrature formula is described by a set of quadrature points, $\hat{x}_q \in \mathbb{R}^{\text{dim}}$, weights, $w_q$, and normalized surface normals, $\hat{n}_q \in \mathbb{R}^{\text{spacedim}}$.

    +

    Consider first the case dim = spacedim. We typically want to compute integrals in real space. A surface, $S$, intersecting a cell, $K$, in real space can be mapped onto a surface, $\hat{S}$, intersecting the unit cell, $\hat{K}$. Thus an integral over $S\cap K$ in real space can be transformed to an integral over $\hat{S} \cap \hat{K}$ according to

    +\[
 \int_{S\cap K} f dS =
 \int_{S\cap K} f |d\bar{S}| =
 \int_{\hat{S}\cap\hat{K}} f \circ F_{K} \det(J) |\left( J^{-1} \right
 )^T d\hat{S}|,
-\] +\]" src="form_2043.png"/>

    -

    where $F_K$ is the mapping from reference to real space and $J$ is its Jacobian matrix. This transformation is possible since the continuous surface elements are vectors: $d\bar{S}, d\hat{S} \in \mathbb{R}^{spacedim}$, which are parallel to the normals of $S$ and $\hat{S}$. That is, the normal is needed to do the transformation. Thus, in addition to storing points and weights, this quadrature stores also the normalized normal for each quadrature point. This can be viewed as storing a discrete surface element,

    -\[
+<p> where <picture><source srcset=$F_K$ is the mapping from reference to real space and $J$ is its Jacobian matrix. This transformation is possible since the continuous surface elements are vectors: $d\bar{S}, d\hat{S} \in \mathbb{R}^{spacedim}$, which are parallel to the normals of $S$ and $\hat{S}$. That is, the normal is needed to do the transformation. Thus, in addition to storing points and weights, this quadrature stores also the normalized normal for each quadrature point. This can be viewed as storing a discrete surface element,

    +\[
 \Delta \hat{S}_q \dealcoloneq w_q \hat{n}_q \approx d\hat{S}(\hat{x}_q),
-\] +\]" src="form_2046.png"/>

    for each quadrature point. The surface integral in real space would then be approximated as

    -\[
+<picture><source srcset=\[
 \int_{S\cap K} f dS \approx
 \sum_{q} f \left(F_{K}(\hat{x}_{q}) \right) \det(J_q)
 |\left( J_q^{-1} \right)^T \hat{n}_q| w_q.
-\] +\]" src="form_2047.png"/>

    -

    When dim = spacedim - 1, this class represents a (spacedim-2)-dimensional integral. That is, if spacedim = 3 we have a line integral immersed in a face. Let $\hat{r}(t)$, $t \in [0,T]$ be an arc-length parameterizations of $\hat{F}\cap \hat{S}$, i.e., the part of the surface that intersects the face in reference space. This means that $\bar{r}(t) = F_K(\hat{r}(t))$ is a parameterization of $S\cap F$. The transformation of the line integral now reads

    -\[
+<p>When dim = spacedim - 1, this class represents a (spacedim-2)-dimensional integral. That is, if spacedim = 3 we have a line integral immersed in a face. Let <picture><source srcset=$\hat{r}(t)$, $t \in [0,T]$ be an arc-length parameterizations of $\hat{F}\cap \hat{S}$, i.e., the part of the surface that intersects the face in reference space. This means that $\bar{r}(t) = F_K(\hat{r}(t))$ is a parameterization of $S\cap F$. The transformation of the line integral now reads

    +\[
 \int_{S\cap F} f dr
 = \int_{0}^T f(\bar{r}(t)) \left \|\frac{d\bar{r}}{dt} \right \| dt
 = \int_{0}^T f(F_K(\hat{r}(t))) \left \| J \frac{d\hat{r}}{dt} \right \| dt
 \approx \sum_{q} f \left(F_{K}(\hat{x}_{q}) \right) \|J(\hat{x}_q)
 \hat{t}_q \| w_q,
-\] +\]" src="form_2053.png"/>

    -

    where $\hat{t}_q = \frac{d\hat{r}}{dt}(x_q) $ is the tangent to the curve at $\hat{x}_q$. This tangent can also be computed as $t_q = \hat{n}_q \times \hat{n}_F / \| \hat{n}_q \times \hat{n}_F \|$ where $\hat{n}_F$ is the face normal. It would be possible to compute the tangent by only knowing the normal to the curve in the face plane (i.e. the dim-dimensional normal). However, when these quadratures are used, the weak form typically involves the so-called conormal, which can not be computed without knowing the surface normal in $\mathbb{R}^{\text{spacedim}}$. The conormal is the unit vector parallel to the projection of the face normal into the surface plane. This is the same as the normalized boundary form.

    +

    where $\hat{t}_q = \frac{d\hat{r}}{dt}(x_q) $ is the tangent to the curve at $\hat{x}_q$. This tangent can also be computed as $t_q = \hat{n}_q \times \hat{n}_F / \| \hat{n}_q \times \hat{n}_F \|$ where $\hat{n}_F$ is the face normal. It would be possible to compute the tangent by only knowing the normal to the curve in the face plane (i.e. the dim-dimensional normal). However, when these quadratures are used, the weak form typically involves the so-called conormal, which can not be computed without knowing the surface normal in $\mathbb{R}^{\text{spacedim}}$. The conormal is the unit vector parallel to the projection of the face normal into the surface plane. This is the same as the normalized boundary form.

    Definition at line 107 of file immersed_surface_quadrature.h.

    Member Typedef Documentation

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classNonMatching_1_1QuadratureGenerator.html differs (JavaScript source, ASCII text, with very long lines (1947)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classNonMatching_1_1QuadratureGenerator.html 2023-11-25 15:25:56.979949318 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classNonMatching_1_1QuadratureGenerator.html 2023-11-25 15:25:56.979949318 +0100 @@ -130,24 +130,24 @@

    Detailed Description

    template<int dim>
    -class NonMatching::QuadratureGenerator< dim >

    This class creates immersed quadrature rules over a BoundingBox, $B \subset \mathbb{R}^{dim}$, when the domain is described by a level set function, $\psi$.

    +class NonMatching::QuadratureGenerator< dim >

    This class creates immersed quadrature rules over a BoundingBox, $B \subset \mathbb{R}^{dim}$, when the domain is described by a level set function, $\psi$.

    This class creates quadrature rules for the intersections between the box and the three different regions defined by the level set function. That is, it creates quadrature rules to integrate over the following regions

    -\[
+<picture><source srcset=\[
 N = \{x \in B : \psi(x) < 0 \}, \\
 P = \{x \in B : \psi(x) > 0 \}, \\
 S = \{x \in B : \psi(x) = 0 \}.
-\] +\]" src="form_2063.png"/>

    -

    When working with level set functions, the most common is to describe a domain, $\Omega$, as

    -\[
+<p>When working with level set functions, the most common is to describe a domain, <picture><source srcset=$\Omega$, as

    +\[
 \Omega = \{ x \in \mathbb{R}^{dim} : \psi(x) < 0 \}.
-\] +\]" src="form_2064.png"/>

    -

    Given this, we shall use the name convention that $N$ is the "inside" region (i.e. inside $\Omega$), $P$ is the "outside" region and $S$ is the "surface" region. The "inside" and "outside" quadratures will also be referred to as the "bulk"-quadratures.

    -

    The underlying algorithm use a 1-dimensional quadrature rule as base for creating the immersed quadrature rules. Gauss-Legendre quadrature (QGauss) is recommended. The constructor takes an hp::QCollection<1>. One can select which 1d-quadrature in the collection should be used through the set_1d_quadrature() function. The number of quadrature points in the constructed quadratures will vary depending on the level set function. More quadrature points will be created if the intersection is "bad", for example, if the zero-contour has a high curvature compared to the size of the box. However, if the number of points in the 1d quadrature is $n$ the number of points will be proportional to $n^{dim}$ in the bulk quadratures and to $n^{dim-1}$ in the surface quadrature. For example, in the 2d-example in the above figure, there are 2 points in the 1d-quadrature. If the 1d-quadrature is a Gauss-Legendre quadrature and the grid has size $h$, the immersed quadratures typically give global errors proportional to $h^{2n}$, both for the bulk and surface integrals. If the 1d-quadrature has positive weights, the weights of the immersed quadratures will also be positive.

    +

    Given this, we shall use the name convention that $N$ is the "inside" region (i.e. inside $\Omega$), $P$ is the "outside" region and $S$ is the "surface" region. The "inside" and "outside" quadratures will also be referred to as the "bulk"-quadratures.

    +

    The underlying algorithm use a 1-dimensional quadrature rule as base for creating the immersed quadrature rules. Gauss-Legendre quadrature (QGauss) is recommended. The constructor takes an hp::QCollection<1>. One can select which 1d-quadrature in the collection should be used through the set_1d_quadrature() function. The number of quadrature points in the constructed quadratures will vary depending on the level set function. More quadrature points will be created if the intersection is "bad", for example, if the zero-contour has a high curvature compared to the size of the box. However, if the number of points in the 1d quadrature is $n$ the number of points will be proportional to $n^{dim}$ in the bulk quadratures and to $n^{dim-1}$ in the surface quadrature. For example, in the 2d-example in the above figure, there are 2 points in the 1d-quadrature. If the 1d-quadrature is a Gauss-Legendre quadrature and the grid has size $h$, the immersed quadratures typically give global errors proportional to $h^{2n}$, both for the bulk and surface integrals. If the 1d-quadrature has positive weights, the weights of the immersed quadratures will also be positive.

    A detailed description of the underlying algorithm can be found in "High-Order %Quadrature Methods for Implicitly Defined Surfaces and Volumes in Hyperrectangles, R. I. Saye, SIAM J. Sci. Comput., 37(2), <a href="http://www.dx.doi.org/10.1137/140966290"> @@ -254,7 +254,7 @@

    -

    Return the quadrature rule for the region $\{x \in B : \psi(x) < 0 \}$ created in the previous call to generate(). Here, $B$ is BoundingBox passed to generate().

    +

    Return the quadrature rule for the region $\{x \in B : \psi(x) < 0 \}$ created in the previous call to generate(). Here, $B$ is BoundingBox passed to generate().

    Definition at line 1668 of file quadrature_generator.cc.

    @@ -273,7 +273,7 @@
    -

    Return the quadrature rule for the region $\{x \in B : \psi(x) > 0 \}$ created in the previous call to generate(). Here, $B$ is BoundingBox passed to generate().

    +

    Return the quadrature rule for the region $\{x \in B : \psi(x) > 0 \}$ created in the previous call to generate(). Here, $B$ is BoundingBox passed to generate().

    Definition at line 1677 of file quadrature_generator.cc.

    @@ -292,8 +292,8 @@
    -

    Return the quadrature rule for the region $\{x \in B : \psi(x) = 0 \}$ created in the previous call to generate(). Here, $B$ is BoundingBox passed to generate().

    -
    Note
    The normal at the quadrature points will be parallel to $\nabla \psi$.
    +

    Return the quadrature rule for the region $\{x \in B : \psi(x) = 0 \}$ created in the previous call to generate(). Here, $B$ is BoundingBox passed to generate().

    +
    Note
    The normal at the quadrature points will be parallel to $\nabla \psi$.

    Definition at line 1686 of file quadrature_generator.cc.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classNonMatching_1_1internal_1_1DiscreteQuadratureGeneratorImplementation_1_1RefSpaceFEFieldFunction.html differs (JavaScript source, ASCII text, with very long lines (870)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classNonMatching_1_1internal_1_1DiscreteQuadratureGeneratorImplementation_1_1RefSpaceFEFieldFunction.html 2023-11-25 15:25:56.999948914 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classNonMatching_1_1internal_1_1DiscreteQuadratureGeneratorImplementation_1_1RefSpaceFEFieldFunction.html 2023-11-25 15:25:56.999948914 +0100 @@ -228,10 +228,10 @@
    template<int dim, class VectorType = Vector<double>>
    class NonMatching::internal::DiscreteQuadratureGeneratorImplementation::RefSpaceFEFieldFunction< dim, VectorType >

    This class evaluates a function defined by a solution vector and a DoFHandler transformed to reference space. To be precise, if we let $\hat{x}$ be a point on the reference cell, this class implements the function

    $\hat{f}(\hat{x}) = \sum_{j=0}^{n-1} f_j \hat{\phi}_j(\hat{x})$,

    -

    where $f_j$ are the local solution values and $\hat{\phi}_j(\hat(x))$ are the local reference space shape functions. The gradient and Hessian of this function are thus derivatives with respect to the reference space coordinates, $\hat{x}_0, \hat{x}_1, \ldots$.

    +

    where $f_j$ are the local solution values and $\hat{\phi}_j(\hat(x))$ are the local reference space shape functions. The gradient and Hessian of this function are thus derivatives with respect to the reference space coordinates, $\hat{x}_0, \hat{x}_1, \ldots$.

    Note that this class is similar to FEFieldFunction, but that FEFieldFunction implements the following function on a given cell, $K$,

    $f(x) = \sum_{j=0}^{n-1} f_j \hat{\phi}_j(F_K^{-1}(x))$,

    -

    which has the same coefficients but uses real space basis functions. Here, $F_K$ is the mapping from the reference cell to the real cell.

    +

    which has the same coefficients but uses real space basis functions. Here, $F_K$ is the mapping from the reference cell to the real cell.

    Before calling the value/gradient/hessian function, the set_active_cell function must be called to specify which cell the function should be evaluated on.

    Definition at line 1312 of file quadrature_generator.cc.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1QGenerator.html differs (JavaScript source, ASCII text, with very long lines (1698)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1QGenerator.html 2023-11-25 15:25:57.016615239 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1QGenerator.html 2023-11-25 15:25:57.016615239 +0100 @@ -149,20 +149,20 @@

    Detailed Description

    template<int dim, int spacedim>
    class NonMatching::internal::QuadratureGeneratorImplementation::QGenerator< dim, spacedim >

    This class implements the Saye-algorithm cited in the documentation of the QuadratureGenerator class.

    -

    The generate function takes a number of $dim$-dimensional level set functions, $\psi_i$, and a BoundingBox<dim>, and builds a partitioning of quadratures, as defined in documentation of the QPartitioning class. That is, this class builds an object of type QPartitioning<dim>.

    -

    If all $\psi_i$ passed to generate can be determined to be positive or negative definite, the QPartitioning will consist of a single quadrature forming a tensor product.

    -

    If this is not the case, the algorithm uses recursion over the spatial dimension. The spacedim template parameter denotes the dimension we started with and dim denotes on what level we are in the recursion. That is, we first construct a QPartitioning<dim - 1> and then build the higher dimensional quadratures from these. What we in the end actually want is a spacedim-dimensional partitioning of quadratures, for a single level set function, $\psi$.

    -

    The algorithm is based on the implicit function theorem. Starting with a single level set function, $\psi$, we try to find a direction $i$, such that

    -

    $|\frac{\partial \psi}{\partial x_i}| > 0$.

    +

    The generate function takes a number of $dim$-dimensional level set functions, $\psi_i$, and a BoundingBox<dim>, and builds a partitioning of quadratures, as defined in documentation of the QPartitioning class. That is, this class builds an object of type QPartitioning<dim>.

    +

    If all $\psi_i$ passed to generate can be determined to be positive or negative definite, the QPartitioning will consist of a single quadrature forming a tensor product.

    +

    If this is not the case, the algorithm uses recursion over the spatial dimension. The spacedim template parameter denotes the dimension we started with and dim denotes on what level we are in the recursion. That is, we first construct a QPartitioning<dim - 1> and then build the higher dimensional quadratures from these. What we in the end actually want is a spacedim-dimensional partitioning of quadratures, for a single level set function, $\psi$.

    +

    The algorithm is based on the implicit function theorem. Starting with a single level set function, $\psi$, we try to find a direction $i$, such that

    +

    $|\frac{\partial \psi}{\partial x_i}| > 0$.

    throughout the whole box. This means that the zero-contour of the level set function can be parameterized by an implicit function

    -

    $H = H(x_0, ..., x_{i-1}, x_{i+1}, ..., x_{dim-1})$,

    +

    $H = H(x_0, ..., x_{i-1}, x_{i+1}, ..., x_{dim-1})$,

    so that

    -

    $\psi(..., x_{i-1}, H(..., x_{i-1}, x_{i+1}, ...), x_{i+1}, ...) = 0$,

    -

    over a subset, $I \subset C \subset \mathbb{R}^{dim-1}$, of the cross section, $C$, of the box (see BoundingBox::cross_section). Here, $I$ is the "indefinite"-region defined in the QPartitioning class. To follow convention in the original paper, we will -refer to $H$ as the "height-function" and to $i$ as the "height-function direction".

    -

    If a height function direction can be found, we go down in dimension by creating two new level set functions, $\{\psi_0, \psi_1\}$, which are the restriction of $\psi$ to the top and bottom faces of the box (in the height function direction). We then delegate to QGenerator<dim-1, spacedim> to create a QPartitioning<dim-1> over the cross section.

    +

    $\psi(..., x_{i-1}, H(..., x_{i-1}, x_{i+1}, ...), x_{i+1}, ...) = 0$,

    +

    over a subset, $I \subset C \subset \mathbb{R}^{dim-1}$, of the cross section, $C$, of the box (see BoundingBox::cross_section). Here, $I$ is the "indefinite"-region defined in the QPartitioning class. To follow convention in the original paper, we will -refer to $H$ as the "height-function" and to $i$ as the "height-function direction".

    +

    If a height function direction can be found, we go down in dimension by creating two new level set functions, $\{\psi_0, \psi_1\}$, which are the restriction of $\psi$ to the top and bottom faces of the box (in the height function direction). We then delegate to QGenerator<dim-1, spacedim> to create a QPartitioning<dim-1> over the cross section.

    When we reach the base case, $dim = 1$, the creation of QPartitioning<1> is simple. See the documentation in specialized class: QGenerator<1, spacedim>.

    As we go up through the dimensions and create the higher dimensional quadratures, we need to know the function value of the height functions at the lower dimensional quadrature points. Since the functions are implicit, we need to do root-finding on the level set functions to find the function values. For this we use the class UpThroughDimensionCreator, see documentation there.

    -

    When we have $n$ level set functions (i.e. after having gone down in dimension), we try to find a height function direction, which works for all those $\psi_i$ which are intersected by the zero contour (i.e. those not positive or negative definite). If such a direction exist, we will have a maximum of $n$ associated implicit height functions, $H_j$. Each $H_j$ parametrize the $x_i$-coordinate of the zero-contour over a region, $I_j$. The indefinite region in the lower dimensional partitioning is the union of these $I = \cup_j I_j$.

    +

    When we have $n$ level set functions (i.e. after having gone down in dimension), we try to find a height function direction, which works for all those $\psi_i$ which are intersected by the zero contour (i.e. those not positive or negative definite). If such a direction exist, we will have a maximum of $n$ associated implicit height functions, $H_j$. Each $H_j$ parametrize the $x_i$-coordinate of the zero-contour over a region, $I_j$. The indefinite region in the lower dimensional partitioning is the union of these $I = \cup_j I_j$.

    As we try to find a height function direction, we estimate bounds on the gradient components by approximating each component as a 1st-order Taylor-polynomial. If a direction can not be found, the box is split and we recurse on each smaller box. This makes an implicit function more likely to exist since we seek it over a smaller portion of the zero contour. It also makes the estimated bounds tighter since we extrapolate the Taylor-polynomial a shorter distance.

    Since we can not split a box forever, there is an maximum number of allowed splits on the additional data struct passed to the constructor. If this is reached, the algorithm uses the midpoint method as a last resort.

    @@ -346,7 +346,7 @@
    -

    Gets the $(dim - 1)$-dimensional quadratures from the lower dimensional algorithm and creates the $dim$-dimensional quadrature rules over the box from the lower dimensional ones.

    +

    Gets the $(dim - 1)$-dimensional quadratures from the lower dimensional algorithm and creates the $dim$-dimensional quadrature rules over the box from the lower dimensional ones.

    Definition at line 1118 of file quadrature_generator.cc.

    @@ -544,7 +544,7 @@
    -

    Object responsible for creating the $dim$-dimensional quadratures from

    +

    Object responsible for creating the $dim$-dimensional quadratures from

    Definition at line 1182 of file quadrature_generator.h.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1QGenerator_3_011_00_01spacedim_01_4.html differs (JavaScript source, ASCII text, with very long lines (1964)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1QGenerator_3_011_00_01spacedim_01_4.html 2023-11-25 15:25:57.029948303 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1QGenerator_3_011_00_01spacedim_01_4.html 2023-11-25 15:25:57.029948303 +0100 @@ -150,8 +150,8 @@

    Detailed Description

    template<int spacedim>
    class NonMatching::internal::QuadratureGeneratorImplementation::QGenerator< 1, spacedim >

    The 1d-base case of the recursive algorithm QGenerator<dim, spacedim>.

    -

    Let $L$ and $R$ be the left and right bounds of the one-dimensional BoundingBox. This interval is partitioned into $[x_0, x_1, ..., x_n]$ where $x_0 = L$, $x_n = R$, and the remaining $x_i$ are the roots of the level set functions in the interval $[L, R]$. In each interval, $[x_i, x_{i+1}]$, quadrature points are distributed according to a 1d-quadrature rule. These points are added to one of the regions of QPartitioning determined from the signs of the level set functions on the interval (see documentation of QPartitioning).

    -

    If spacedim = 1 the points $[x_1, x_{n-1}]$ are also added as surface quadrature points to QPartitioning::surface.

    +

    Let $L$ and $R$ be the left and right bounds of the one-dimensional BoundingBox. This interval is partitioned into $[x_0, x_1, ..., x_n]$ where $x_0 = L$, $x_n = R$, and the remaining $x_i$ are the roots of the level set functions in the interval $[L, R]$. In each interval, $[x_i, x_{i+1}]$, quadrature points are distributed according to a 1d-quadrature rule. These points are added to one of the regions of QPartitioning determined from the signs of the level set functions on the interval (see documentation of QPartitioning).

    +

    If spacedim = 1 the points $[x_1, x_{n-1}]$ are also added as surface quadrature points to QPartitioning::surface.

    Definition at line 1208 of file quadrature_generator.h.

    Constructor & Destructor Documentation

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1QPartitioning.html differs (JavaScript source, ASCII text, with very long lines (1168)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1QPartitioning.html 2023-11-25 15:25:57.043281364 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1QPartitioning.html 2023-11-25 15:25:57.043281364 +0100 @@ -118,18 +118,18 @@

    Detailed Description

    template<int dim>
    class NonMatching::internal::QuadratureGeneratorImplementation::QPartitioning< dim >

    Class that stores quadrature rules to integrate over 4 different regions of a single BoundingBox, $B$. Given multiple level set functions,

    -

    $\psi_i : \mathbb{R}^{dim} \rightarrow \mathbb{R}$, $i = 0, 1, ...$,

    -

    the box, $B \subset \mathbb{R}^{dim}$, is partitioned into a "negative", "positive", and "indefinite" region, $B = N \cup P \cup I$, according to the signs of $\psi_i$ over each region:

    +

    $\psi_i : \mathbb{R}^{dim} \rightarrow \mathbb{R}$, $i = 0, 1, ...$,

    +

    the box, $B \subset \mathbb{R}^{dim}$, is partitioned into a "negative", "positive", and "indefinite" region, $B = N \cup P \cup I$, according to the signs of $\psi_i$ over each region:

    -\[
+<picture><source srcset=\[
 N = \{x \in B : \psi_i(x) < 0, \forall i \}, \\
 P = \{x \in B : \psi_i(x) > 0, \forall i \}, \\
 I = B \setminus (\overline{N} \cup \overline{P}).
-\] +\]" src="form_2088.png"/>

    -

    Thus, all $\psi_i$ are positive over $P$ and negative over $N$. Over $I$ the level set functions differ in sign. This class holds quadrature rules for each of these regions. In addition, when there is a single level set function, $\psi$, it holds a surface quadrature for the zero contour of $\psi$:

    -

    $S = \{x \in B : \psi(x) = 0 \}$.

    -

    Note that when there is a single level set function, $I$ is empty and $N$ and $P$ are the regions that one typically integrates over in an immersed finite element method.

    +

    Thus, all $\psi_i$ are positive over $P$ and negative over $N$. Over $I$ the level set functions differ in sign. This class holds quadrature rules for each of these regions. In addition, when there is a single level set function, $\psi$, it holds a surface quadrature for the zero contour of $\psi$:

    +

    $S = \{x \in B : \psi(x) = 0 \}$.

    +

    Note that when there is a single level set function, $I$ is empty and $N$ and $P$ are the regions that one typically integrates over in an immersed finite element method.

    Definition at line 753 of file quadrature_generator.h.

    Member Function Documentation

    @@ -170,7 +170,7 @@
    -

    Quadrature for the region $\{x \in B : \psi_i(x) < 0 \forall i \}$ of the box, $B$.

    +

    Quadrature for the region $\{x \in B : \psi_i(x) < 0 \forall i \}$ of the box, $B$.

    Definition at line 767 of file quadrature_generator.h.

    @@ -189,7 +189,7 @@
    -

    Quadrature for the region $\{x \in B : \psi_i(x) > 0 \forall i \}$ of the box, $B$.

    +

    Quadrature for the region $\{x \in B : \psi_i(x) > 0 \forall i \}$ of the box, $B$.

    Definition at line 773 of file quadrature_generator.h.

    @@ -227,7 +227,7 @@
    -

    Quadrature for the region $\{x \in B : \psi(x) = 0 \}$ of the box, $B$.

    +

    Quadrature for the region $\{x \in B : \psi(x) = 0 \}$ of the box, $B$.

    Definition at line 785 of file quadrature_generator.h.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1RootFinder.html differs (JavaScript source, ASCII text, with very long lines (1344)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1RootFinder.html 2023-11-25 15:25:57.053281161 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1RootFinder.html 2023-11-25 15:25:57.053281161 +0100 @@ -120,7 +120,7 @@  

    Detailed Description

    -

    A class that attempts to find multiple distinct roots of a function, $f(x)$, over an interval, $[l, r]$. This is done as follows. If there is a sign change in function value between the interval end points, we solve for the root. If there is no sign change, we attempt to bound the function value away from zero on $[a, b]$, to conclude that no roots exist. If we can't exclude that there are roots, we split the interval in two: $[l, (r + l) / 2]$, $[(r + l) / 2, r]$, and use the same algorithm recursively on each interval. This means that we can typically find 2 distinct roots, but not 3.

    +

    A class that attempts to find multiple distinct roots of a function, $f(x)$, over an interval, $[l, r]$. This is done as follows. If there is a sign change in function value between the interval end points, we solve for the root. If there is no sign change, we attempt to bound the function value away from zero on $[a, b]$, to conclude that no roots exist. If we can't exclude that there are roots, we split the interval in two: $[l, (r + l) / 2]$, $[(r + l) / 2, r]$, and use the same algorithm recursively on each interval. This means that we can typically find 2 distinct roots, but not 3.

    The bounds on the functions values are estimated using the function taylor_estimate_function_bounds, which approximates the function as a second order Taylor-polynomial around the interval midpoint. When we have a sign change on an interval, this class uses boost::math::tools::toms748_solve for finding roots .

    Definition at line 608 of file quadrature_generator.h.

    @@ -175,7 +175,7 @@
    -

    For each of the incoming functions, attempt to find the roots over the interval defined by interval and add these to roots. The returned roots will be sorted in ascending order: $x_0 < x_1 <...$ and duplicate roots (with respect to the tolerance in AdditionalData) will be removed.

    +

    For each of the incoming functions, attempt to find the roots over the interval defined by interval and add these to roots. The returned roots will be sorted in ascending order: $x_0 < x_1 <...$ and duplicate roots (with respect to the tolerance in AdditionalData) will be removed.

    Definition at line 532 of file quadrature_generator.cc.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1UpThroughDimensionCreator.html differs (JavaScript source, ASCII text, with very long lines (1787)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1UpThroughDimensionCreator.html 2023-11-25 15:25:57.066614221 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1UpThroughDimensionCreator.html 2023-11-25 15:25:57.066614221 +0100 @@ -129,13 +129,13 @@

    Detailed Description

    template<int dim, int spacedim>
    -class NonMatching::internal::QuadratureGeneratorImplementation::UpThroughDimensionCreator< dim, spacedim >

    This class is responsible for creating quadrature points for the $dim$-dimensional quadrature partitioning from an $(dim - 1)$-dimensional "indefinite" quadrature (see QPartitioning documentation).

    -

    To be precise, let $[L, R]$ be the extents of the box in the height function direction and let $I \subset \mathbb{R}^{dim-1}$ be the lower dimensional indefinite region. This class will create quadrature points over $I \times [L, R] \subset \mathbb{R}^{dim}$ and in the case $dim=spacedim$, points for the surface quadrature.

    -

    For each lower dimensional quadrature point, $(x_I, w_I)$ in the indefinite quadrature, we create several 1d-level set functions by restricting $\psi_j$ to $x_I$. We then partition the interval $[L, R]$ into $[y_0, y_1, ..., y_n]$, where $y_0 = L$, $y_n = R$, and the remaining $y_i$ are the roots of the 1d-level set functions in $[L, R]$. Since the level set functions change sign between the roots, each interval belong to different regions in the quadrature partitioning.

    -

    In each interval, $[y_i, y_{i+1}]$, we distribute points according to the 1d-base quadrature, $(x_q, w_q)$ and take the cartesian product with $(x_I, w_I)$ to create the $dim$-dimensional quadrature points, $(X_q, W_q)$: $X_q = x_I \times (y_i + (y_{i+1} - y_i) x_q)$, $W_q = w_I (y_{i+1} - y_i) w_q$.

    -

    When $dim=spacedim$, we have a single level set function, $\psi$. Since we have fulfilled the implicit function theorem, there is a single root $y_1 \in [L, R]$. The point, $x_s = x_I \times y_1$, will be added as a point in the surface quadrature. One can show that the correct weight of this point is

    -

    $w_s = \frac{\|\nabla \psi(x_s)\|}{|\partial_i \psi(x_s)|} w_I$,

    -

    where $i$ is the height function direction.

    +class NonMatching::internal::QuadratureGeneratorImplementation::UpThroughDimensionCreator< dim, spacedim >

    This class is responsible for creating quadrature points for the $dim$-dimensional quadrature partitioning from an $(dim - 1)$-dimensional "indefinite" quadrature (see QPartitioning documentation).

    +

    To be precise, let $[L, R]$ be the extents of the box in the height function direction and let $I \subset \mathbb{R}^{dim-1}$ be the lower dimensional indefinite region. This class will create quadrature points over $I \times [L, R] \subset \mathbb{R}^{dim}$ and in the case $dim=spacedim$, points for the surface quadrature.

    +

    For each lower dimensional quadrature point, $(x_I, w_I)$ in the indefinite quadrature, we create several 1d-level set functions by restricting $\psi_j$ to $x_I$. We then partition the interval $[L, R]$ into $[y_0, y_1, ..., y_n]$, where $y_0 = L$, $y_n = R$, and the remaining $y_i$ are the roots of the 1d-level set functions in $[L, R]$. Since the level set functions change sign between the roots, each interval belong to different regions in the quadrature partitioning.

    +

    In each interval, $[y_i, y_{i+1}]$, we distribute points according to the 1d-base quadrature, $(x_q, w_q)$ and take the cartesian product with $(x_I, w_I)$ to create the $dim$-dimensional quadrature points, $(X_q, W_q)$: $X_q = x_I \times (y_i + (y_{i+1} - y_i) x_q)$, $W_q = w_I (y_{i+1} - y_i) w_q$.

    +

    When $dim=spacedim$, we have a single level set function, $\psi$. Since we have fulfilled the implicit function theorem, there is a single root $y_1 \in [L, R]$. The point, $x_s = x_I \times y_1$, will be added as a point in the surface quadrature. One can show that the correct weight of this point is

    +

    $w_s = \frac{\|\nabla \psi(x_s)\|}{|\partial_i \psi(x_s)|} w_I$,

    +

    where $i$ is the height function direction.

    Definition at line 828 of file quadrature_generator.h.

    Constructor & Destructor Documentation

    @@ -211,7 +211,7 @@
    -

    Create $dim$-dimensional immersed quadratures from the incoming $(dim-1)$-dimensional quadratures and add these to q_partitioning.

    +

    Create $dim$-dimensional immersed quadratures from the incoming $(dim-1)$-dimensional quadratures and add these to q_partitioning.

    Definition at line 725 of file quadrature_generator.cc.

    @@ -294,7 +294,7 @@

    Create a surface quadrature point from the lower-dimensional point and add it to surface_quadrature.

    -

    This function is only called when $dim=spacedim$ and there is a single level set function. At this point there should only be a single root in the interval $[L, R]$

    +

    This function is only called when $dim=spacedim$ and there is a single level set function. At this point there should only be a single root in the interval $[L, R]$

    Definition at line 781 of file quadrature_generator.cc.

    @@ -403,7 +403,7 @@
    -

    1d-functions, that are restrictions of each dim-dimensional level set function passed to generate() to some $(dim-1)$-dimensional point.

    +

    1d-functions, that are restrictions of each dim-dimensional level set function passed to generate() to some $(dim-1)$-dimensional point.

    Definition at line 898 of file quadrature_generator.h.

    @@ -457,7 +457,7 @@
    -

    The roots of the functions in point_restrictions. This will be the values of the height functions, $\{H_i(x_I)\}$ at some lower dimensional quadrature point, $x_I \in \mathbb{R}^{dim-1}$.

    +

    The roots of the functions in point_restrictions. This will be the values of the height functions, $\{H_i(x_I)\}$ at some lower dimensional quadrature point, $x_I \in \mathbb{R}^{dim-1}$.

    Definition at line 911 of file quadrature_generator.h.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classNonlinearSolverSelector.html differs (JavaScript source, ASCII text, with very long lines (607)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classNonlinearSolverSelector.html 2023-11-25 15:25:57.083280550 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classNonlinearSolverSelector.html 2023-11-25 15:25:57.083280550 +0100 @@ -538,11 +538,11 @@

    A function object that users may supply and that is intended to prepare the linear solver for subsequent calls to solve_jacobian_system().

    The job of setup_jacobian() is to prepare the linear solver for subsequent calls to solve_with_jacobian(), in the solution of linear systems $Ax = b$. The exact nature of this system depends on the SolutionStrategy that has been selected.

    -

    In the cases strategy = SolutionStrategy::newton or SolutionStrategy::linesearch, $A$ is the Jacobian $J = \partial
-F/\partial u$. If strategy = SolutionStrategy::picard, $A$ is the approximate Jacobian matrix $L$.

    +

    In the cases strategy = SolutionStrategy::newton or SolutionStrategy::linesearch, $A$ is the Jacobian $J = \partial
+F/\partial u$. If strategy = SolutionStrategy::picard, $A$ is the approximate Jacobian matrix $L$.

    Parameters
    - +
    current_uCurrent value of $u$
    current_uCurrent value of $u$
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classNonlinearSolverSelector_1_1AdditionalData.html differs (JavaScript source, ASCII text, with very long lines (1630)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classNonlinearSolverSelector_1_1AdditionalData.html 2023-11-25 15:25:57.093280347 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classNonlinearSolverSelector_1_1AdditionalData.html 2023-11-25 15:25:57.093280347 +0100 @@ -361,7 +361,7 @@
    -

    Relative $l_2$ tolerance of the residual to be reached.

    +

    Relative $l_2$ tolerance of the residual to be reached.

    Note
    Solver terminates successfully if either the function tolerance or the relative tolerance has been reached.

    Definition at line 186 of file nonlinear.h.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classOpenCASCADE_1_1ArclengthProjectionLineManifold.html differs (JavaScript source, ASCII text, with very long lines (949)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classOpenCASCADE_1_1ArclengthProjectionLineManifold.html 2023-11-25 15:25:57.116613206 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classOpenCASCADE_1_1ArclengthProjectionLineManifold.html 2023-11-25 15:25:57.116613206 +0100 @@ -605,7 +605,7 @@
    -

    Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the chartdim-dimensional to the spacedim-dimensional space. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

    +

    Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the chartdim-dimensional to the spacedim-dimensional space. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

    This function is used in the computations required by the get_tangent_vector() function. Since not all users of the Manifold class interface will require calling that function, the current function is implemented but will trigger an exception whenever called. This allows derived classes to avoid implementing the push_forward_gradient function if this functionality is not needed in the user program.

    Refer to the general documentation of this class for more information.

    @@ -644,24 +644,24 @@
    -

    Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

    -

    For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

    -\begin{align*}
+<p>Return a vector that, at <picture><source srcset=$\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

    +

    For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

    +\begin{align*}
   \zeta(t) &= \xi_1 +  t (\xi_2-\xi_1)
  \\          &= F^{-1}(\mathbf x_1) + t\left[F^{-1}(\mathbf x_2)
                                             -F^{-1}(\mathbf x_1)\right]
-\end{align*} +\end{align*}" src="form_1440.png"/>

    In image space, i.e., in the space in which we operate, this leads to the curve

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   \mathbf s(t) &= F(\zeta(t))
  \\          &= F(\xi_1 +  t (\xi_2-\xi_1))
  \\          &= F\left(F^{-1}(\mathbf x_1) + t\left[F^{-1}(\mathbf x_2)
                                     -F^{-1}(\mathbf x_1)\right]\right).
-\end{align*} +\end{align*}" src="form_1441.png"/>

    -

    What the current function is supposed to return is $\mathbf s'(0)$. By the chain rule, this is equal to

    -\begin{align*}
+<p> What the current function is supposed to return is <picture><source srcset=$\mathbf s'(0)$. By the chain rule, this is equal to

    +\begin{align*}
   \mathbf s'(0) &=
     \frac{d}{dt}\left. F\left(F^{-1}(\mathbf x_1)
                        + t\left[F^{-1}(\mathbf x_2)
@@ -670,11 +670,11 @@
 \\ &= \nabla_\xi F\left(F^{-1}(\mathbf x_1)\right)
                    \left[F^{-1}(\mathbf x_2)
                                 -F^{-1}(\mathbf x_1)\right].
-\end{align*} +\end{align*}" src="form_1442.png"/>

    This formula may then have to be slightly modified by considering any periodicity that was assumed in the call to the constructor.

    -

    Thus, the computation of tangent vectors also requires the implementation of derivatives $\nabla_\xi F(\xi)$ of the push-forward mapping. Here, $F^{-1}(\mathbf x_2)-F^{-1}(\mathbf x_1)$ is a chartdim-dimensional vector, and $\nabla_\xi F\left(F^{-1}(\mathbf
-x_1)\right) = \nabla_\xi F\left(\xi_1\right)$ is a spacedim-times-chartdim-dimensional matrix. Consequently, and as desired, the operation results in a spacedim-dimensional vector.

    +

    Thus, the computation of tangent vectors also requires the implementation of derivatives $\nabla_\xi F(\xi)$ of the push-forward mapping. Here, $F^{-1}(\mathbf x_2)-F^{-1}(\mathbf x_1)$ is a chartdim-dimensional vector, and $\nabla_\xi F\left(F^{-1}(\mathbf
+x_1)\right) = \nabla_\xi F\left(\xi_1\right)$ is a spacedim-times-chartdim-dimensional matrix. Consequently, and as desired, the operation results in a spacedim-dimensional vector.

    Parameters
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classOpenCASCADE_1_1DirectionalProjectionManifold.html differs (JavaScript source, ASCII text, with very long lines (899)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classOpenCASCADE_1_1DirectionalProjectionManifold.html 2023-11-25 15:25:57.136612799 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classOpenCASCADE_1_1DirectionalProjectionManifold.html 2023-11-25 15:25:57.136612799 +0100 @@ -540,7 +540,7 @@
    x1The first point that describes the geodesic, and the one at which the "direction" is to be evaluated.
    -

    Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. For the current class, we assume that the manifold is flat, so the geodesic is the straight line between the two points, and we return $\mathbf x_2-\mathbf x_1$. The normalization of the vector is chosen so that it fits the convention described in Manifold::get_tangent_vector().

    +

    Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. For the current class, we assume that the manifold is flat, so the geodesic is the straight line between the two points, and we return $\mathbf x_2-\mathbf x_1$. The normalization of the vector is chosen so that it fits the convention described in Manifold::get_tangent_vector().

    Note
    If you use this class as a stepping stone to build a manifold that only "slightly" deviates from a flat manifold, by overloading the project_to_manifold() function.
    Parameters
    @@ -549,7 +549,7 @@
    -
    Returns
    A "direction" vector tangential to the geodesic. Here, this is $\mathbf x_2-\mathbf x_1$, possibly modified by the periodicity of the domain as set in the constructor, to use the "shortest" connection between the points through the periodic boundary as necessary.
    +
    Returns
    A "direction" vector tangential to the geodesic. Here, this is $\mathbf x_2-\mathbf x_1$, possibly modified by the periodicity of the domain as set in the constructor, to use the "shortest" connection between the points through the periodic boundary as necessary.

    Reimplemented from Manifold< dim, spacedim >.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classOpenCASCADE_1_1NURBSPatchManifold.html differs (JavaScript source, ASCII text, with very long lines (897)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classOpenCASCADE_1_1NURBSPatchManifold.html 2023-11-25 15:25:57.159945657 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classOpenCASCADE_1_1NURBSPatchManifold.html 2023-11-25 15:25:57.156612392 +0100 @@ -452,7 +452,7 @@
    -

    Given a point in the spacedim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the uv coordinate system to the Euclidean coordinate system. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

    +

    Given a point in the spacedim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the uv coordinate system to the Euclidean coordinate system. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

    This function is used in the computations required by the get_tangent_vector() function.

    Refer to the general documentation of this class for more information.

    @@ -666,7 +666,7 @@
    -

    Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the chartdim-dimensional to the spacedim-dimensional space. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

    +

    Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the chartdim-dimensional to the spacedim-dimensional space. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

    This function is used in the computations required by the get_tangent_vector() function. Since not all users of the Manifold class interface will require calling that function, the current function is implemented but will trigger an exception whenever called. This allows derived classes to avoid implementing the push_forward_gradient function if this functionality is not needed in the user program.

    Refer to the general documentation of this class for more information.

    @@ -705,24 +705,24 @@
    -

    Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

    -

    For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

    -\begin{align*}
+<p>Return a vector that, at <picture><source srcset=$\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

    +

    For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

    +\begin{align*}
   \zeta(t) &= \xi_1 +  t (\xi_2-\xi_1)
  \\          &= F^{-1}(\mathbf x_1) + t\left[F^{-1}(\mathbf x_2)
                                             -F^{-1}(\mathbf x_1)\right]
-\end{align*} +\end{align*}" src="form_1440.png"/>

    In image space, i.e., in the space in which we operate, this leads to the curve

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   \mathbf s(t) &= F(\zeta(t))
  \\          &= F(\xi_1 +  t (\xi_2-\xi_1))
  \\          &= F\left(F^{-1}(\mathbf x_1) + t\left[F^{-1}(\mathbf x_2)
                                     -F^{-1}(\mathbf x_1)\right]\right).
-\end{align*} +\end{align*}" src="form_1441.png"/>

    -

    What the current function is supposed to return is $\mathbf s'(0)$. By the chain rule, this is equal to

    -\begin{align*}
+<p> What the current function is supposed to return is <picture><source srcset=$\mathbf s'(0)$. By the chain rule, this is equal to

    +\begin{align*}
   \mathbf s'(0) &=
     \frac{d}{dt}\left. F\left(F^{-1}(\mathbf x_1)
                        + t\left[F^{-1}(\mathbf x_2)
@@ -731,11 +731,11 @@
 \\ &= \nabla_\xi F\left(F^{-1}(\mathbf x_1)\right)
                    \left[F^{-1}(\mathbf x_2)
                                 -F^{-1}(\mathbf x_1)\right].
-\end{align*} +\end{align*}" src="form_1442.png"/>

    This formula may then have to be slightly modified by considering any periodicity that was assumed in the call to the constructor.

    -

    Thus, the computation of tangent vectors also requires the implementation of derivatives $\nabla_\xi F(\xi)$ of the push-forward mapping. Here, $F^{-1}(\mathbf x_2)-F^{-1}(\mathbf x_1)$ is a chartdim-dimensional vector, and $\nabla_\xi F\left(F^{-1}(\mathbf
-x_1)\right) = \nabla_\xi F\left(\xi_1\right)$ is a spacedim-times-chartdim-dimensional matrix. Consequently, and as desired, the operation results in a spacedim-dimensional vector.

    +

    Thus, the computation of tangent vectors also requires the implementation of derivatives $\nabla_\xi F(\xi)$ of the push-forward mapping. Here, $F^{-1}(\mathbf x_2)-F^{-1}(\mathbf x_1)$ is a chartdim-dimensional vector, and $\nabla_\xi F\left(F^{-1}(\mathbf
+x_1)\right) = \nabla_\xi F\left(\xi_1\right)$ is a spacedim-times-chartdim-dimensional matrix. Consequently, and as desired, the operation results in a spacedim-dimensional vector.

    Parameters
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classOpenCASCADE_1_1NormalProjectionManifold.html differs (JavaScript source, ASCII text, with very long lines (899)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classOpenCASCADE_1_1NormalProjectionManifold.html 2023-11-25 15:25:57.179945250 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classOpenCASCADE_1_1NormalProjectionManifold.html 2023-11-25 15:25:57.179945250 +0100 @@ -534,7 +534,7 @@
    x1The first point that describes the geodesic, and the one at which the "direction" is to be evaluated.
    -

    Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. For the current class, we assume that the manifold is flat, so the geodesic is the straight line between the two points, and we return $\mathbf x_2-\mathbf x_1$. The normalization of the vector is chosen so that it fits the convention described in Manifold::get_tangent_vector().

    +

    Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. For the current class, we assume that the manifold is flat, so the geodesic is the straight line between the two points, and we return $\mathbf x_2-\mathbf x_1$. The normalization of the vector is chosen so that it fits the convention described in Manifold::get_tangent_vector().

    Note
    If you use this class as a stepping stone to build a manifold that only "slightly" deviates from a flat manifold, by overloading the project_to_manifold() function.
    Parameters
    @@ -543,7 +543,7 @@
    -
    Returns
    A "direction" vector tangential to the geodesic. Here, this is $\mathbf x_2-\mathbf x_1$, possibly modified by the periodicity of the domain as set in the constructor, to use the "shortest" connection between the points through the periodic boundary as necessary.
    +
    Returns
    A "direction" vector tangential to the geodesic. Here, this is $\mathbf x_2-\mathbf x_1$, possibly modified by the periodicity of the domain as set in the constructor, to use the "shortest" connection between the points through the periodic boundary as necessary.

    Reimplemented from Manifold< dim, spacedim >.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classOpenCASCADE_1_1NormalToMeshProjectionManifold.html differs (JavaScript source, ASCII text, with very long lines (899)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classOpenCASCADE_1_1NormalToMeshProjectionManifold.html 2023-11-25 15:25:57.203278106 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classOpenCASCADE_1_1NormalToMeshProjectionManifold.html 2023-11-25 15:25:57.203278106 +0100 @@ -534,7 +534,7 @@
    -

    Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. For the current class, we assume that the manifold is flat, so the geodesic is the straight line between the two points, and we return $\mathbf x_2-\mathbf x_1$. The normalization of the vector is chosen so that it fits the convention described in Manifold::get_tangent_vector().

    +

    Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. For the current class, we assume that the manifold is flat, so the geodesic is the straight line between the two points, and we return $\mathbf x_2-\mathbf x_1$. The normalization of the vector is chosen so that it fits the convention described in Manifold::get_tangent_vector().

    Note
    If you use this class as a stepping stone to build a manifold that only "slightly" deviates from a flat manifold, by overloading the project_to_manifold() function.
    Parameters
    @@ -543,7 +543,7 @@
    -
    Returns
    A "direction" vector tangential to the geodesic. Here, this is $\mathbf x_2-\mathbf x_1$, possibly modified by the periodicity of the domain as set in the constructor, to use the "shortest" connection between the points through the periodic boundary as necessary.
    +
    Returns
    A "direction" vector tangential to the geodesic. Here, this is $\mathbf x_2-\mathbf x_1$, possibly modified by the periodicity of the domain as set in the constructor, to use the "shortest" connection between the points through the periodic boundary as necessary.

    Reimplemented from Manifold< dim, spacedim >.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPArpackSolver.html differs (JavaScript source, ASCII text, with very long lines (1026)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPArpackSolver.html 2023-11-25 15:25:57.223277702 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPArpackSolver.html 2023-11-25 15:25:57.223277702 +0100 @@ -272,7 +272,7 @@

    Detailed Description

    template<typename VectorType>
    class PArpackSolver< VectorType >

    Interface for using PARPACK. PARPACK is a collection of Fortran77 subroutines designed to solve large scale eigenvalue problems. Here we interface to the routines pdneupd, pdseupd, pdnaupd, pdsaupd of PARPACK. The package is designed to compute a few eigenvalues and corresponding eigenvectors of a general n by n matrix A. It is most appropriate for large sparse matrices A.

    -

    In this class we make use of the method applied to the generalized eigenspectrum problem $(A-\lambda B)x=0$, for $x\neq0$; where $A$ is a system matrix, $B$ is a mass matrix, and $\lambda, x$ are a set of eigenvalues and eigenvectors respectively.

    +

    In this class we make use of the method applied to the generalized eigenspectrum problem $(A-\lambda B)x=0$, for $x\neq0$; where $A$ is a system matrix, $B$ is a mass matrix, and $\lambda, x$ are a set of eigenvalues and eigenvectors respectively.

    The ArpackSolver can be used in application codes in the following way:

    const unsigned int num_arnoldi_vectors = 2*size_of_spectrum + 2;
    @@ -297,7 +297,7 @@
    const AdditionalData additional_data
    -

    for the generalized eigenvalue problem $Ax=B\lambda x$, where the variable size_of_spectrum tells PARPACK the number of eigenvector/eigenvalue pairs to solve for. Here, lambda is a vector that will contain the eigenvalues computed, x a vector of objects of type V that will contain the eigenvectors computed.

    +

    for the generalized eigenvalue problem $Ax=B\lambda x$, where the variable size_of_spectrum tells PARPACK the number of eigenvector/eigenvalue pairs to solve for. Here, lambda is a vector that will contain the eigenvalues computed, x a vector of objects of type V that will contain the eigenvectors computed.

    Currently, only three modes of (P)Arpack are implemented. In mode 3 (default), OP is an inverse operation for the matrix A - sigma * B, where sigma is a shift value, set to zero by default. Whereas in mode 2, OP is an inverse of M. Finally, mode 1 corresponds to standard eigenvalue problem without spectral transformation $Ax=\lambda x$. The mode can be specified via AdditionalData object. Note that for shift-and-invert (mode=3), the sought eigenpairs are those after the spectral transformation is applied.

    The OP can be specified by using a LinearOperator:

    const double shift = 5.0;
    const auto op_A = linear_operator<vector_t>(A);
    @@ -647,7 +647,7 @@
    -

    Solve the generalized eigensprectrum problem $A x=\lambda B x$ by calling the pd(n/s)eupd and pd(n/s)aupd functions of PARPACK.

    +

    Solve the generalized eigensprectrum problem $A x=\lambda B x$ by calling the pd(n/s)eupd and pd(n/s)aupd functions of PARPACK.

    In mode=3, inverse should correspond to $[A-\sigma B]^{-1}$, whereas in mode=2 it should represent $B^{-1}$. For mode=1 both B and inverse are ignored.

    Definition at line 770 of file parpack_solver.h.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPETScWrappers_1_1CommunicationPattern.html differs (JavaScript source, ASCII text, with very long lines (696)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPETScWrappers_1_1CommunicationPattern.html 2023-11-25 15:25:57.236610763 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPETScWrappers_1_1CommunicationPattern.html 2023-11-25 15:25:57.236610763 +0100 @@ -318,7 +318,7 @@

    Reinitialization that takes the number of locally-owned degrees of freedom local_size and an index set for the required ghost indices ghost_indices.

    -

    The local index range is translated to global indices in an ascending and one-to-one fashion, i.e., the indices of process $p$ sit exactly between the indices of the processes $p-1$ and $p+1$, respectively.

    +

    The local index range is translated to global indices in an ascending and one-to-one fashion, i.e., the indices of process $p$ sit exactly between the indices of the processes $p-1$ and $p+1$, respectively.

    The export_to_ghost_array will populate an array containing values from locally-owned AND ghost indices, as for the relevant set of dofs of a usual FEM simulation.

    Definition at line 46 of file petsc_communication_pattern.cc.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPETScWrappers_1_1FullMatrix.html differs (JavaScript source, ASCII text, with very long lines (829)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPETScWrappers_1_1FullMatrix.html 2023-11-25 15:25:57.259943620 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPETScWrappers_1_1FullMatrix.html 2023-11-25 15:25:57.259943620 +0100 @@ -1682,8 +1682,8 @@
    -

    Return the l1-norm of the matrix, that is $|M|_1=max_{all columns
-j}\sum_{all rows i} |M_ij|$, (max. sum of columns). This is the natural matrix norm that is compatible to the l1-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    +

    Return the l1-norm of the matrix, that is $|M|_1=max_{all columns
+j}\sum_{all rows i} |M_ij|$, (max. sum of columns). This is the natural matrix norm that is compatible to the l1-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    Definition at line 417 of file petsc_matrix_base.cc.

    @@ -1711,8 +1711,8 @@
    -

    Return the linfty-norm of the matrix, that is $|M|_infty=max_{all rows
-i}\sum_{all columns j} |M_ij|$, (max. sum of rows). This is the natural matrix norm that is compatible to the linfty-norm of vectors, i.e. $|Mv|_infty \leq |M|_infty |v|_infty$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    +

    Return the linfty-norm of the matrix, that is $|M|_infty=max_{all rows
+i}\sum_{all columns j} |M_ij|$, (max. sum of rows). This is the natural matrix norm that is compatible to the linfty-norm of vectors, i.e. $|Mv|_infty \leq |M|_infty |v|_infty$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    Definition at line 430 of file petsc_matrix_base.cc.

    @@ -1768,7 +1768,7 @@
    -

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    +

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    Obviously, the matrix needs to be quadratic for this operation.

    The implementation of this function is not as efficient as the one in the MatrixBase class used in deal.II (i.e. the original one, not the PETSc wrapper class) since PETSc doesn't support this operation and needs a temporary vector.

    Note that if the current object represents a parallel distributed matrix (of type PETScWrappers::MPI::SparseMatrix), then the given vector has to be a distributed vector as well. Conversely, if the matrix is not distributed, then neither may the vector be.

    @@ -1808,7 +1808,7 @@
    -

    Compute the matrix scalar product $\left(u,Mv\right)$.

    +

    Compute the matrix scalar product $\left(u,Mv\right)$.

    The implementation of this function is not as efficient as the one in the MatrixBase class used in deal.II (i.e. the original one, not the PETSc wrapper class) since PETSc doesn't support this operation and needs a temporary vector.

    Note that if the current object represents a parallel distributed matrix (of type PETScWrappers::MPI::SparseMatrix), then both vectors have to be distributed vectors as well. Conversely, if the matrix is not distributed, then neither of the vectors may be.

    @@ -2315,7 +2315,7 @@
    -

    Test whether a matrix is symmetric. Default tolerance is $1000\times32$-bit machine precision.

    +

    Test whether a matrix is symmetric. Default tolerance is $1000\times32$-bit machine precision.

    Definition at line 694 of file petsc_matrix_base.cc.

    @@ -2344,7 +2344,7 @@
    -

    Test whether a matrix is Hermitian, i.e. it is the complex conjugate of its transpose. Default tolerance is $1000\times32$-bit machine precision.

    +

    Test whether a matrix is Hermitian, i.e. it is the complex conjugate of its transpose. Default tolerance is $1000\times32$-bit machine precision.

    Definition at line 704 of file petsc_matrix_base.cc.

    @@ -2593,9 +2593,9 @@
    -

    Base function to perform the matrix-matrix multiplication $C = AB$, or, if a vector $V$ whose size is compatible with B is given, $C = A \text{diag}(V) B$, where $\text{diag}(V)$ defines a diagonal matrix with the vector entries.

    -

    This function assumes that the calling matrix $A$ and $B$ have compatible sizes. The size of $C$ will be set within this function.

    -

    The content as well as the sparsity pattern of the matrix $C$ will be reset by this function, so make sure that the sparsity pattern is not used somewhere else in your program. This is an expensive operation, so think twice before you use this function.

    +

    Base function to perform the matrix-matrix multiplication $C = AB$, or, if a vector $V$ whose size is compatible with B is given, $C = A \text{diag}(V) B$, where $\text{diag}(V)$ defines a diagonal matrix with the vector entries.

    +

    This function assumes that the calling matrix $A$ and $B$ have compatible sizes. The size of $C$ will be set within this function.

    +

    The content as well as the sparsity pattern of the matrix $C$ will be reset by this function, so make sure that the sparsity pattern is not used somewhere else in your program. This is an expensive operation, so think twice before you use this function.

    Definition at line 638 of file petsc_matrix_base.cc.

    @@ -2637,9 +2637,9 @@
    -

    Base function to perform the matrix-matrix multiplication with the transpose of this, i.e., $C = A^T B$, or, if an optional vector $V$ whose size is compatible with $B$ is given, $C = A^T \text{diag}(V) B$, where $\text{diag}(V)$ defines a diagonal matrix with the vector entries.

    -

    This function assumes that the calling matrix $A$ and $B$ have compatible sizes. The size of $C$ will be set within this function.

    -

    The content as well as the sparsity pattern of the matrix $C$ will be changed by this function, so make sure that the sparsity pattern is not used somewhere else in your program. This is an expensive operation, so think twice before you use this function.

    +

    Base function to perform the matrix-matrix multiplication with the transpose of this, i.e., $C = A^T B$, or, if an optional vector $V$ whose size is compatible with $B$ is given, $C = A^T \text{diag}(V) B$, where $\text{diag}(V)$ defines a diagonal matrix with the vector entries.

    +

    This function assumes that the calling matrix $A$ and $B$ have compatible sizes. The size of $C$ will be set within this function.

    +

    The content as well as the sparsity pattern of the matrix $C$ will be changed by this function, so make sure that the sparsity pattern is not used somewhere else in your program. This is an expensive operation, so think twice before you use this function.

    Definition at line 646 of file petsc_matrix_base.cc.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1BlockSparseMatrix.html differs (JavaScript source, ASCII text, with very long lines (854)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1BlockSparseMatrix.html 2023-11-25 15:25:57.289943009 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1BlockSparseMatrix.html 2023-11-25 15:25:57.289943009 +0100 @@ -906,7 +906,7 @@
    -

    Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

    +

    Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

    Definition at line 410 of file petsc_block_sparse_matrix.h.

    @@ -1054,7 +1054,7 @@
    -

    Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

    +

    Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

    Definition at line 441 of file petsc_block_sparse_matrix.h.

    @@ -2295,7 +2295,7 @@
    -

    Adding Matrix-vector multiplication. Add $M*src$ on $dst$ with $M$ being this matrix.

    +

    Adding Matrix-vector multiplication. Add $M*src$ on $dst$ with $M$ being this matrix.

    @@ -2420,7 +2420,7 @@
    -

    Compute the matrix scalar product $\left(u,Mv\right)$.

    +

    Compute the matrix scalar product $\left(u,Mv\right)$.

    @@ -2941,7 +2941,7 @@
    -

    Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

    +

    Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

    Due to problems with deriving template arguments between the block and non-block versions of the vmult/Tvmult functions, the actual functions are implemented in derived classes, with implementations forwarding the calls to the implementations provided here under a unique name for which template arguments can be derived by the compiler.

    @@ -3089,7 +3089,7 @@
    -

    Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

    +

    Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

    Due to problems with deriving template arguments between the block and non-block versions of the vmult/Tvmult functions, the actual functions are implemented in derived classes, with implementations forwarding the calls to the implementations provided here under a unique name for which template arguments can be derived by the compiler.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1BlockVector.html differs (JavaScript source, ASCII text, with very long lines (990)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1BlockVector.html 2023-11-25 15:25:57.319942401 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1BlockVector.html 2023-11-25 15:25:57.319942401 +0100 @@ -2055,7 +2055,7 @@
    -

    Return the square of the $l_2$-norm.

    +

    Return the square of the $l_2$-norm.

    @@ -2107,7 +2107,7 @@
    -

    Return the $l_1$-norm of the vector, i.e. the sum of the absolute values.

    +

    Return the $l_1$-norm of the vector, i.e. the sum of the absolute values.

    @@ -2133,7 +2133,7 @@
    -

    Return the $l_2$-norm of the vector, i.e. the square root of the sum of the squares of the elements.

    +

    Return the $l_2$-norm of the vector, i.e. the square root of the sum of the squares of the elements.

    @@ -2159,7 +2159,7 @@
    -

    Return the maximum absolute value of the elements of this vector, which is the $l_\infty$-norm of a vector.

    +

    Return the maximum absolute value of the elements of this vector, which is the $l_\infty$-norm of a vector.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1SparseMatrix.html differs (JavaScript source, ASCII text, with very long lines (906)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1SparseMatrix.html 2023-11-25 15:25:57.346608523 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1SparseMatrix.html 2023-11-25 15:25:57.346608523 +0100 @@ -863,7 +863,7 @@
    -

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v^\ast,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    +

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v^\ast,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    Obviously, the matrix needs to be quadratic for this operation.

    The implementation of this function is not as efficient as the one in the MatrixBase class used in deal.II (i.e. the original one, not the PETSc wrapper class) since PETSc doesn't support this operation and needs a temporary vector.

    @@ -894,7 +894,7 @@
    -

    Compute the matrix scalar product $\left(u^\ast,Mv\right)$.

    +

    Compute the matrix scalar product $\left(u^\ast,Mv\right)$.

    The implementation of this function is not as efficient as the one in the MatrixBase class used in deal.II (i.e. the original one, not the PETSc wrapper class) since PETSc doesn't support this operation and needs a temporary vector.

    Definition at line 815 of file petsc_parallel_sparse_matrix.cc.

    @@ -961,7 +961,7 @@ const MPI::Vector &&#href_anchor"paramname">V = MPI::Vector()&#href_anchor"memdoc"> -

    Perform the matrix-matrix multiplication $C = AB$, or, $C = A \text{diag}(V) B$ given a compatible vector $V$.

    +

    Perform the matrix-matrix multiplication $C = AB$, or, $C = A \text{diag}(V) B$ given a compatible vector $V$.

    This function calls MatrixBase::mmult() to do the actual work.

    Definition at line 876 of file petsc_parallel_sparse_matrix.cc.

    @@ -988,7 +988,7 @@ const MPI::Vector &&#href_anchor"paramname">V = MPI::Vector()&#href_anchor"memdoc"> -

    Perform the matrix-matrix multiplication with the transpose of this, i.e., $C = A^T B$, or, $C = A^T \text{diag}(V) B$ given a compatible vector $V$.

    +

    Perform the matrix-matrix multiplication with the transpose of this, i.e., $C = A^T B$, or, $C = A^T \text{diag}(V) B$ given a compatible vector $V$.

    This function calls MatrixBase::Tmmult() to do the actual work.

    Definition at line 887 of file petsc_parallel_sparse_matrix.cc.

    @@ -2305,8 +2305,8 @@
    -

    Return the l1-norm of the matrix, that is $|M|_1=max_{all columns
-j}\sum_{all rows i} |M_ij|$, (max. sum of columns). This is the natural matrix norm that is compatible to the l1-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    +

    Return the l1-norm of the matrix, that is $|M|_1=max_{all columns
+j}\sum_{all rows i} |M_ij|$, (max. sum of columns). This is the natural matrix norm that is compatible to the l1-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    Definition at line 417 of file petsc_matrix_base.cc.

    @@ -2334,8 +2334,8 @@
    -

    Return the linfty-norm of the matrix, that is $|M|_infty=max_{all rows
-i}\sum_{all columns j} |M_ij|$, (max. sum of rows). This is the natural matrix norm that is compatible to the linfty-norm of vectors, i.e. $|Mv|_infty \leq |M|_infty |v|_infty$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    +

    Return the linfty-norm of the matrix, that is $|M|_infty=max_{all rows
+i}\sum_{all columns j} |M_ij|$, (max. sum of rows). This is the natural matrix norm that is compatible to the linfty-norm of vectors, i.e. $|Mv|_infty \leq |M|_infty |v|_infty$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    Definition at line 430 of file petsc_matrix_base.cc.

    @@ -2391,7 +2391,7 @@
    -

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    +

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    Obviously, the matrix needs to be quadratic for this operation.

    The implementation of this function is not as efficient as the one in the MatrixBase class used in deal.II (i.e. the original one, not the PETSc wrapper class) since PETSc doesn't support this operation and needs a temporary vector.

    Note that if the current object represents a parallel distributed matrix (of type PETScWrappers::MPI::SparseMatrix), then the given vector has to be a distributed vector as well. Conversely, if the matrix is not distributed, then neither may the vector be.

    @@ -2431,7 +2431,7 @@
    -

    Compute the matrix scalar product $\left(u,Mv\right)$.

    +

    Compute the matrix scalar product $\left(u,Mv\right)$.

    The implementation of this function is not as efficient as the one in the MatrixBase class used in deal.II (i.e. the original one, not the PETSc wrapper class) since PETSc doesn't support this operation and needs a temporary vector.

    Note that if the current object represents a parallel distributed matrix (of type PETScWrappers::MPI::SparseMatrix), then both vectors have to be distributed vectors as well. Conversely, if the matrix is not distributed, then neither of the vectors may be.

    @@ -2938,7 +2938,7 @@
    -

    Test whether a matrix is symmetric. Default tolerance is $1000\times32$-bit machine precision.

    +

    Test whether a matrix is symmetric. Default tolerance is $1000\times32$-bit machine precision.

    Definition at line 694 of file petsc_matrix_base.cc.

    @@ -2967,7 +2967,7 @@
    -

    Test whether a matrix is Hermitian, i.e. it is the complex conjugate of its transpose. Default tolerance is $1000\times32$-bit machine precision.

    +

    Test whether a matrix is Hermitian, i.e. it is the complex conjugate of its transpose. Default tolerance is $1000\times32$-bit machine precision.

    Definition at line 704 of file petsc_matrix_base.cc.

    @@ -3216,9 +3216,9 @@
    -

    Base function to perform the matrix-matrix multiplication $C = AB$, or, if a vector $V$ whose size is compatible with B is given, $C = A \text{diag}(V) B$, where $\text{diag}(V)$ defines a diagonal matrix with the vector entries.

    -

    This function assumes that the calling matrix $A$ and $B$ have compatible sizes. The size of $C$ will be set within this function.

    -

    The content as well as the sparsity pattern of the matrix $C$ will be reset by this function, so make sure that the sparsity pattern is not used somewhere else in your program. This is an expensive operation, so think twice before you use this function.

    +

    Base function to perform the matrix-matrix multiplication $C = AB$, or, if a vector $V$ whose size is compatible with B is given, $C = A \text{diag}(V) B$, where $\text{diag}(V)$ defines a diagonal matrix with the vector entries.

    +

    This function assumes that the calling matrix $A$ and $B$ have compatible sizes. The size of $C$ will be set within this function.

    +

    The content as well as the sparsity pattern of the matrix $C$ will be reset by this function, so make sure that the sparsity pattern is not used somewhere else in your program. This is an expensive operation, so think twice before you use this function.

    Definition at line 638 of file petsc_matrix_base.cc.

    @@ -3260,9 +3260,9 @@
    -

    Base function to perform the matrix-matrix multiplication with the transpose of this, i.e., $C = A^T B$, or, if an optional vector $V$ whose size is compatible with $B$ is given, $C = A^T \text{diag}(V) B$, where $\text{diag}(V)$ defines a diagonal matrix with the vector entries.

    -

    This function assumes that the calling matrix $A$ and $B$ have compatible sizes. The size of $C$ will be set within this function.

    -

    The content as well as the sparsity pattern of the matrix $C$ will be changed by this function, so make sure that the sparsity pattern is not used somewhere else in your program. This is an expensive operation, so think twice before you use this function.

    +

    Base function to perform the matrix-matrix multiplication with the transpose of this, i.e., $C = A^T B$, or, if an optional vector $V$ whose size is compatible with $B$ is given, $C = A^T \text{diag}(V) B$, where $\text{diag}(V)$ defines a diagonal matrix with the vector entries.

    +

    This function assumes that the calling matrix $A$ and $B$ have compatible sizes. The size of $C$ will be set within this function.

    +

    The content as well as the sparsity pattern of the matrix $C$ will be changed by this function, so make sure that the sparsity pattern is not used somewhere else in your program. This is an expensive operation, so think twice before you use this function.

    Definition at line 646 of file petsc_matrix_base.cc.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1Vector.html differs (JavaScript source, ASCII text, with very long lines (1766)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1Vector.html 2023-11-25 15:25:57.376607912 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1Vector.html 2023-11-25 15:25:57.376607912 +0100 @@ -2103,7 +2103,7 @@
    -

    Return the square of the $l_2$-norm.

    +

    Return the square of the $l_2$-norm.

    Definition at line 612 of file petsc_vector_base.cc.

    @@ -2159,7 +2159,7 @@
    -

    $l_1$-norm of the vector. The sum of the absolute values.

    +

    $l_1$-norm of the vector. The sum of the absolute values.

    Note
    In complex-valued PETSc priori to 3.7.0 this norm is implemented as the sum of absolute values of real and imaginary parts of elements of a complex vector.

    Definition at line 672 of file petsc_vector_base.cc.

    @@ -2188,7 +2188,7 @@
    -

    $l_2$-norm of the vector. The square root of the sum of the squares of the elements.

    +

    $l_2$-norm of the vector. The square root of the sum of the squares of the elements.

    Definition at line 685 of file petsc_vector_base.cc.

    @@ -2217,7 +2217,7 @@
    -

    $l_p$-norm of the vector. The pth root of the sum of the pth powers of the absolute values of the elements.

    +

    $l_p$-norm of the vector. The pth root of the sum of the pth powers of the absolute values of the elements.

    Definition at line 698 of file petsc_vector_base.cc.

    @@ -2245,7 +2245,7 @@
    -

    $l_\infty$-norm of the vector. Return the value of the vector element with the maximum absolute value.

    +

    $l_\infty$-norm of the vector. Return the value of the vector element with the maximum absolute value.

    Definition at line 740 of file petsc_vector_base.cc.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPETScWrappers_1_1MatrixBase.html differs (JavaScript source, ASCII text, with very long lines (844)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPETScWrappers_1_1MatrixBase.html 2023-11-25 15:25:57.399940773 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPETScWrappers_1_1MatrixBase.html 2023-11-25 15:25:57.399940773 +0100 @@ -1431,8 +1431,8 @@
    -

    Return the l1-norm of the matrix, that is $|M|_1=max_{all columns
-j}\sum_{all rows i} |M_ij|$, (max. sum of columns). This is the natural matrix norm that is compatible to the l1-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    +

    Return the l1-norm of the matrix, that is $|M|_1=max_{all columns
+j}\sum_{all rows i} |M_ij|$, (max. sum of columns). This is the natural matrix norm that is compatible to the l1-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    Definition at line 417 of file petsc_matrix_base.cc.

    @@ -1452,8 +1452,8 @@
    -

    Return the linfty-norm of the matrix, that is $|M|_infty=max_{all rows
-i}\sum_{all columns j} |M_ij|$, (max. sum of rows). This is the natural matrix norm that is compatible to the linfty-norm of vectors, i.e. $|Mv|_infty \leq |M|_infty |v|_infty$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    +

    Return the linfty-norm of the matrix, that is $|M|_infty=max_{all rows
+i}\sum_{all columns j} |M_ij|$, (max. sum of rows). This is the natural matrix norm that is compatible to the linfty-norm of vectors, i.e. $|Mv|_infty \leq |M|_infty |v|_infty$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    Definition at line 430 of file petsc_matrix_base.cc.

    @@ -1493,7 +1493,7 @@
    -

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    +

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    Obviously, the matrix needs to be quadratic for this operation.

    The implementation of this function is not as efficient as the one in the MatrixBase class used in deal.II (i.e. the original one, not the PETSc wrapper class) since PETSc doesn't support this operation and needs a temporary vector.

    Note that if the current object represents a parallel distributed matrix (of type PETScWrappers::MPI::SparseMatrix), then the given vector has to be a distributed vector as well. Conversely, if the matrix is not distributed, then neither may the vector be.

    @@ -1525,7 +1525,7 @@
    -

    Compute the matrix scalar product $\left(u,Mv\right)$.

    +

    Compute the matrix scalar product $\left(u,Mv\right)$.

    The implementation of this function is not as efficient as the one in the MatrixBase class used in deal.II (i.e. the original one, not the PETSc wrapper class) since PETSc doesn't support this operation and needs a temporary vector.

    Note that if the current object represents a parallel distributed matrix (of type PETScWrappers::MPI::SparseMatrix), then both vectors have to be distributed vectors as well. Conversely, if the matrix is not distributed, then neither of the vectors may be.

    @@ -1933,7 +1933,7 @@
    -

    Test whether a matrix is symmetric. Default tolerance is $1000\times32$-bit machine precision.

    +

    Test whether a matrix is symmetric. Default tolerance is $1000\times32$-bit machine precision.

    Definition at line 694 of file petsc_matrix_base.cc.

    @@ -1954,7 +1954,7 @@
    -

    Test whether a matrix is Hermitian, i.e. it is the complex conjugate of its transpose. Default tolerance is $1000\times32$-bit machine precision.

    +

    Test whether a matrix is Hermitian, i.e. it is the complex conjugate of its transpose. Default tolerance is $1000\times32$-bit machine precision.

    Definition at line 704 of file petsc_matrix_base.cc.

    @@ -2179,9 +2179,9 @@
    -

    Base function to perform the matrix-matrix multiplication $C = AB$, or, if a vector $V$ whose size is compatible with B is given, $C = A \text{diag}(V) B$, where $\text{diag}(V)$ defines a diagonal matrix with the vector entries.

    -

    This function assumes that the calling matrix $A$ and $B$ have compatible sizes. The size of $C$ will be set within this function.

    -

    The content as well as the sparsity pattern of the matrix $C$ will be reset by this function, so make sure that the sparsity pattern is not used somewhere else in your program. This is an expensive operation, so think twice before you use this function.

    +

    Base function to perform the matrix-matrix multiplication $C = AB$, or, if a vector $V$ whose size is compatible with B is given, $C = A \text{diag}(V) B$, where $\text{diag}(V)$ defines a diagonal matrix with the vector entries.

    +

    This function assumes that the calling matrix $A$ and $B$ have compatible sizes. The size of $C$ will be set within this function.

    +

    The content as well as the sparsity pattern of the matrix $C$ will be reset by this function, so make sure that the sparsity pattern is not used somewhere else in your program. This is an expensive operation, so think twice before you use this function.

    Definition at line 638 of file petsc_matrix_base.cc.

    @@ -2223,9 +2223,9 @@
    -

    Base function to perform the matrix-matrix multiplication with the transpose of this, i.e., $C = A^T B$, or, if an optional vector $V$ whose size is compatible with $B$ is given, $C = A^T \text{diag}(V) B$, where $\text{diag}(V)$ defines a diagonal matrix with the vector entries.

    -

    This function assumes that the calling matrix $A$ and $B$ have compatible sizes. The size of $C$ will be set within this function.

    -

    The content as well as the sparsity pattern of the matrix $C$ will be changed by this function, so make sure that the sparsity pattern is not used somewhere else in your program. This is an expensive operation, so think twice before you use this function.

    +

    Base function to perform the matrix-matrix multiplication with the transpose of this, i.e., $C = A^T B$, or, if an optional vector $V$ whose size is compatible with $B$ is given, $C = A^T \text{diag}(V) B$, where $\text{diag}(V)$ defines a diagonal matrix with the vector entries.

    +

    This function assumes that the calling matrix $A$ and $B$ have compatible sizes. The size of $C$ will be set within this function.

    +

    The content as well as the sparsity pattern of the matrix $C$ will be changed by this function, so make sure that the sparsity pattern is not used somewhere else in your program. This is an expensive operation, so think twice before you use this function.

    Definition at line 646 of file petsc_matrix_base.cc.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPETScWrappers_1_1MatrixFree.html differs (JavaScript source, ASCII text, with very long lines (986)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPETScWrappers_1_1MatrixFree.html 2023-11-25 15:25:57.429940162 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPETScWrappers_1_1MatrixFree.html 2023-11-25 15:25:57.429940162 +0100 @@ -2235,8 +2235,8 @@
    -

    Return the l1-norm of the matrix, that is $|M|_1=max_{all columns
-j}\sum_{all rows i} |M_ij|$, (max. sum of columns). This is the natural matrix norm that is compatible to the l1-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    +

    Return the l1-norm of the matrix, that is $|M|_1=max_{all columns
+j}\sum_{all rows i} |M_ij|$, (max. sum of columns). This is the natural matrix norm that is compatible to the l1-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    Definition at line 417 of file petsc_matrix_base.cc.

    @@ -2264,8 +2264,8 @@
    -

    Return the linfty-norm of the matrix, that is $|M|_infty=max_{all rows
-i}\sum_{all columns j} |M_ij|$, (max. sum of rows). This is the natural matrix norm that is compatible to the linfty-norm of vectors, i.e. $|Mv|_infty \leq |M|_infty |v|_infty$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    +

    Return the linfty-norm of the matrix, that is $|M|_infty=max_{all rows
+i}\sum_{all columns j} |M_ij|$, (max. sum of rows). This is the natural matrix norm that is compatible to the linfty-norm of vectors, i.e. $|Mv|_infty \leq |M|_infty |v|_infty$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    Definition at line 430 of file petsc_matrix_base.cc.

    @@ -2321,7 +2321,7 @@
    -

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    +

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    Obviously, the matrix needs to be quadratic for this operation.

    The implementation of this function is not as efficient as the one in the MatrixBase class used in deal.II (i.e. the original one, not the PETSc wrapper class) since PETSc doesn't support this operation and needs a temporary vector.

    Note that if the current object represents a parallel distributed matrix (of type PETScWrappers::MPI::SparseMatrix), then the given vector has to be a distributed vector as well. Conversely, if the matrix is not distributed, then neither may the vector be.

    @@ -2361,7 +2361,7 @@
    -

    Compute the matrix scalar product $\left(u,Mv\right)$.

    +

    Compute the matrix scalar product $\left(u,Mv\right)$.

    The implementation of this function is not as efficient as the one in the MatrixBase class used in deal.II (i.e. the original one, not the PETSc wrapper class) since PETSc doesn't support this operation and needs a temporary vector.

    Note that if the current object represents a parallel distributed matrix (of type PETScWrappers::MPI::SparseMatrix), then both vectors have to be distributed vectors as well. Conversely, if the matrix is not distributed, then neither of the vectors may be.

    @@ -2712,7 +2712,7 @@
    -

    Test whether a matrix is symmetric. Default tolerance is $1000\times32$-bit machine precision.

    +

    Test whether a matrix is symmetric. Default tolerance is $1000\times32$-bit machine precision.

    Definition at line 694 of file petsc_matrix_base.cc.

    @@ -2741,7 +2741,7 @@
    -

    Test whether a matrix is Hermitian, i.e. it is the complex conjugate of its transpose. Default tolerance is $1000\times32$-bit machine precision.

    +

    Test whether a matrix is Hermitian, i.e. it is the complex conjugate of its transpose. Default tolerance is $1000\times32$-bit machine precision.

    Definition at line 704 of file petsc_matrix_base.cc.

    @@ -2990,9 +2990,9 @@
    -

    Base function to perform the matrix-matrix multiplication $C = AB$, or, if a vector $V$ whose size is compatible with B is given, $C = A \text{diag}(V) B$, where $\text{diag}(V)$ defines a diagonal matrix with the vector entries.

    -

    This function assumes that the calling matrix $A$ and $B$ have compatible sizes. The size of $C$ will be set within this function.

    -

    The content as well as the sparsity pattern of the matrix $C$ will be reset by this function, so make sure that the sparsity pattern is not used somewhere else in your program. This is an expensive operation, so think twice before you use this function.

    +

    Base function to perform the matrix-matrix multiplication $C = AB$, or, if a vector $V$ whose size is compatible with B is given, $C = A \text{diag}(V) B$, where $\text{diag}(V)$ defines a diagonal matrix with the vector entries.

    +

    This function assumes that the calling matrix $A$ and $B$ have compatible sizes. The size of $C$ will be set within this function.

    +

    The content as well as the sparsity pattern of the matrix $C$ will be reset by this function, so make sure that the sparsity pattern is not used somewhere else in your program. This is an expensive operation, so think twice before you use this function.

    Definition at line 638 of file petsc_matrix_base.cc.

    @@ -3034,9 +3034,9 @@
    -

    Base function to perform the matrix-matrix multiplication with the transpose of this, i.e., $C = A^T B$, or, if an optional vector $V$ whose size is compatible with $B$ is given, $C = A^T \text{diag}(V) B$, where $\text{diag}(V)$ defines a diagonal matrix with the vector entries.

    -

    This function assumes that the calling matrix $A$ and $B$ have compatible sizes. The size of $C$ will be set within this function.

    -

    The content as well as the sparsity pattern of the matrix $C$ will be changed by this function, so make sure that the sparsity pattern is not used somewhere else in your program. This is an expensive operation, so think twice before you use this function.

    +

    Base function to perform the matrix-matrix multiplication with the transpose of this, i.e., $C = A^T B$, or, if an optional vector $V$ whose size is compatible with $B$ is given, $C = A^T \text{diag}(V) B$, where $\text{diag}(V)$ defines a diagonal matrix with the vector entries.

    +

    This function assumes that the calling matrix $A$ and $B$ have compatible sizes. The size of $C$ will be set within this function.

    +

    The content as well as the sparsity pattern of the matrix $C$ will be changed by this function, so make sure that the sparsity pattern is not used somewhere else in your program. This is an expensive operation, so think twice before you use this function.

    Definition at line 646 of file petsc_matrix_base.cc.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPETScWrappers_1_1NonlinearSolver.html differs (JavaScript source, ASCII text, with very long lines (553)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPETScWrappers_1_1NonlinearSolver.html 2023-11-25 15:25:57.446606490 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPETScWrappers_1_1NonlinearSolver.html 2023-11-25 15:25:57.446606490 +0100 @@ -162,7 +162,7 @@

    Detailed Description

    template<typename VectorType = PETScWrappers::VectorBase, typename PMatrixType = PETScWrappers::MatrixBase, typename AMatrixType = PMatrixType>
    class PETScWrappers::NonlinearSolver< VectorType, PMatrixType, AMatrixType >

    Interface to PETSc SNES solver for nonlinear equations. The SNES solver is described in the PETSc manual.

    -

    This class solves the nonlinear system of algebraic equations $F(x) = 0$.

    +

    This class solves the nonlinear system of algebraic equations $F(x) = 0$.

    The interface to PETSc is realized by means of std::function callbacks like in the TrilinosWrappers::NOXSolver and SUNDIALS::KINSOL classes.

    NonlinearSolver supports any vector and matrix type having constructors and methods:

    class VectorType : public Subscriptor
    @@ -179,7 +179,7 @@
    Mat & petsc_matrix();
    ...

    In particular, the supported types are the ones that can wrap PETSc's native vector and matrix classes, that are able to modify them in place, and that can return PETSc native types when requested.

    -

    To use the solvers the user needs to provide the implementation of $F$ via the NonlinearSolver::residual callback.

    +

    To use the solvers the user needs to provide the implementation of $F$ via the NonlinearSolver::residual callback.

    The default linearization procedure of a solver instantiated with this class consists in using Jacobian-Free-Newton-Krylov; the action of tangent matrices inside a linear solver process are approximated via matrix-free finite-differencing of the nonlinear residual equations. For details, consult the PETSc manual.

    In alternative, users can also provide the implementation of the Jacobian. This can be accomplished in two ways:

    -

    Solve the nonlinear system of equations $F(x) = 0$.

    +

    Solve the nonlinear system of equations $F(x) = 0$.

    This function returns the number of iterations. The vector x must contain the initial guess. Upon returning, the x vector contains the solution.

    @@ -473,7 +473,7 @@
    -

    Solve the nonlinear system of equations $F(x) = 0$.

    +

    Solve the nonlinear system of equations $F(x) = 0$.

    This function returns the number of iterations. The vector x must contain the initial guess. Upon returning, the x vector contains the solution.

    Here we also set the matrix to precondition the tangent system.

    @@ -511,7 +511,7 @@
    -

    Solve the nonlinear system of equations $F(x) = 0$.

    +

    Solve the nonlinear system of equations $F(x) = 0$.

    This function returns the number of iterations. The vector x must contain the initial guess. Upon returning, the x vector contains the solution.

    Here we also set the matrices to describe and precondition the tangent system.

    @@ -531,7 +531,7 @@
    -

    Callback for the computation of the nonlinear residual $F(x)$.

    +

    Callback for the computation of the nonlinear residual $F(x)$.

    Note
    This variable represents a user provided callback. See there for a description of how to deal with errors and other requirements and conventions.

    Definition at line 374 of file petsc_snes.h.

    @@ -551,7 +551,7 @@
    -

    Callback for the computation of the Jacobian $\dfrac{\partial F}{\partial x}$.

    +

    Callback for the computation of the Jacobian $\dfrac{\partial F}{\partial x}$.

    Note
    This variable represents a user provided callback. See there for a description of how to deal with errors and other requirements and conventions.

    Definition at line 386 of file petsc_snes.h.

    @@ -593,7 +593,7 @@

    Callback to set up the Jacobian system.

    -

    This callback gives full control to users to set up the tangent operator $\dfrac{\partial F}{\partial x}$.

    +

    This callback gives full control to users to set up the tangent operator $\dfrac{\partial F}{\partial x}$.

    Solvers must be provided via NonlinearSolver::solve_with_jacobian.

    Note
    This variable represents a user provided callback. See there for a description of how to deal with errors and other requirements and conventions.
    @@ -636,7 +636,7 @@

    Callback for the computation of the energy function.

    -

    This is usually not needed, since by default SNES assumes that the objective function to be minimized is $\frac{1}{2} || F(x) ||^2 $.

    +

    This is usually not needed, since by default SNES assumes that the objective function to be minimized is $\frac{1}{2} || F(x) ||^2 $.

    However, if the nonlinear equations are derived from energy arguments, it may be useful to use this callback to perform linesearch or to test for the reduction in a trust region step.

    The value of the energy function must be returned in energy_value.

    Note
    This variable represents a user provided callback. See there for a description of how to deal with errors and other requirements and conventions.
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPETScWrappers_1_1SparseMatrix.html differs (JavaScript source, ASCII text, with very long lines (949)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPETScWrappers_1_1SparseMatrix.html 2023-11-25 15:25:57.473272612 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPETScWrappers_1_1SparseMatrix.html 2023-11-25 15:25:57.473272612 +0100 @@ -890,7 +890,7 @@ const MPI::Vector &&#href_anchor"paramname">V = MPI::Vector()&#href_anchor"memdoc"> -

    Perform the matrix-matrix multiplication $C = AB$, or, $C = A \text{diag}(V) B$ given a compatible vector $V$.

    +

    Perform the matrix-matrix multiplication $C = AB$, or, $C = A \text{diag}(V) B$ given a compatible vector $V$.

    This function calls MatrixBase::mmult() to do the actual work.

    Definition at line 258 of file petsc_sparse_matrix.cc.

    @@ -917,7 +917,7 @@ const MPI::Vector &&#href_anchor"paramname">V = MPI::Vector()&#href_anchor"memdoc"> -

    Perform the matrix-matrix multiplication with the transpose of this, i.e., $C = A^T B$, or, $C = A^T \text{diag}(V) B$ given a compatible vector $V$.

    +

    Perform the matrix-matrix multiplication with the transpose of this, i.e., $C = A^T B$, or, $C = A^T \text{diag}(V) B$ given a compatible vector $V$.

    This function calls MatrixBase::Tmmult() to do the actual work.

    Definition at line 269 of file petsc_sparse_matrix.cc.

    @@ -2199,8 +2199,8 @@
    -

    Return the l1-norm of the matrix, that is $|M|_1=max_{all columns
-j}\sum_{all rows i} |M_ij|$, (max. sum of columns). This is the natural matrix norm that is compatible to the l1-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    +

    Return the l1-norm of the matrix, that is $|M|_1=max_{all columns
+j}\sum_{all rows i} |M_ij|$, (max. sum of columns). This is the natural matrix norm that is compatible to the l1-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    Definition at line 417 of file petsc_matrix_base.cc.

    @@ -2228,8 +2228,8 @@
    -

    Return the linfty-norm of the matrix, that is $|M|_infty=max_{all rows
-i}\sum_{all columns j} |M_ij|$, (max. sum of rows). This is the natural matrix norm that is compatible to the linfty-norm of vectors, i.e. $|Mv|_infty \leq |M|_infty |v|_infty$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    +

    Return the linfty-norm of the matrix, that is $|M|_infty=max_{all rows
+i}\sum_{all columns j} |M_ij|$, (max. sum of rows). This is the natural matrix norm that is compatible to the linfty-norm of vectors, i.e. $|Mv|_infty \leq |M|_infty |v|_infty$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    Definition at line 430 of file petsc_matrix_base.cc.

    @@ -2285,7 +2285,7 @@
    -

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    +

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    Obviously, the matrix needs to be quadratic for this operation.

    The implementation of this function is not as efficient as the one in the MatrixBase class used in deal.II (i.e. the original one, not the PETSc wrapper class) since PETSc doesn't support this operation and needs a temporary vector.

    Note that if the current object represents a parallel distributed matrix (of type PETScWrappers::MPI::SparseMatrix), then the given vector has to be a distributed vector as well. Conversely, if the matrix is not distributed, then neither may the vector be.

    @@ -2325,7 +2325,7 @@
    -

    Compute the matrix scalar product $\left(u,Mv\right)$.

    +

    Compute the matrix scalar product $\left(u,Mv\right)$.

    The implementation of this function is not as efficient as the one in the MatrixBase class used in deal.II (i.e. the original one, not the PETSc wrapper class) since PETSc doesn't support this operation and needs a temporary vector.

    Note that if the current object represents a parallel distributed matrix (of type PETScWrappers::MPI::SparseMatrix), then both vectors have to be distributed vectors as well. Conversely, if the matrix is not distributed, then neither of the vectors may be.

    @@ -2832,7 +2832,7 @@
    -

    Test whether a matrix is symmetric. Default tolerance is $1000\times32$-bit machine precision.

    +

    Test whether a matrix is symmetric. Default tolerance is $1000\times32$-bit machine precision.

    Definition at line 694 of file petsc_matrix_base.cc.

    @@ -2861,7 +2861,7 @@
    -

    Test whether a matrix is Hermitian, i.e. it is the complex conjugate of its transpose. Default tolerance is $1000\times32$-bit machine precision.

    +

    Test whether a matrix is Hermitian, i.e. it is the complex conjugate of its transpose. Default tolerance is $1000\times32$-bit machine precision.

    Definition at line 704 of file petsc_matrix_base.cc.

    @@ -3110,9 +3110,9 @@
    -

    Base function to perform the matrix-matrix multiplication $C = AB$, or, if a vector $V$ whose size is compatible with B is given, $C = A \text{diag}(V) B$, where $\text{diag}(V)$ defines a diagonal matrix with the vector entries.

    -

    This function assumes that the calling matrix $A$ and $B$ have compatible sizes. The size of $C$ will be set within this function.

    -

    The content as well as the sparsity pattern of the matrix $C$ will be reset by this function, so make sure that the sparsity pattern is not used somewhere else in your program. This is an expensive operation, so think twice before you use this function.

    +

    Base function to perform the matrix-matrix multiplication $C = AB$, or, if a vector $V$ whose size is compatible with B is given, $C = A \text{diag}(V) B$, where $\text{diag}(V)$ defines a diagonal matrix with the vector entries.

    +

    This function assumes that the calling matrix $A$ and $B$ have compatible sizes. The size of $C$ will be set within this function.

    +

    The content as well as the sparsity pattern of the matrix $C$ will be reset by this function, so make sure that the sparsity pattern is not used somewhere else in your program. This is an expensive operation, so think twice before you use this function.

    Definition at line 638 of file petsc_matrix_base.cc.

    @@ -3154,9 +3154,9 @@
    -

    Base function to perform the matrix-matrix multiplication with the transpose of this, i.e., $C = A^T B$, or, if an optional vector $V$ whose size is compatible with $B$ is given, $C = A^T \text{diag}(V) B$, where $\text{diag}(V)$ defines a diagonal matrix with the vector entries.

    -

    This function assumes that the calling matrix $A$ and $B$ have compatible sizes. The size of $C$ will be set within this function.

    -

    The content as well as the sparsity pattern of the matrix $C$ will be changed by this function, so make sure that the sparsity pattern is not used somewhere else in your program. This is an expensive operation, so think twice before you use this function.

    +

    Base function to perform the matrix-matrix multiplication with the transpose of this, i.e., $C = A^T B$, or, if an optional vector $V$ whose size is compatible with $B$ is given, $C = A^T \text{diag}(V) B$, where $\text{diag}(V)$ defines a diagonal matrix with the vector entries.

    +

    This function assumes that the calling matrix $A$ and $B$ have compatible sizes. The size of $C$ will be set within this function.

    +

    The content as well as the sparsity pattern of the matrix $C$ will be changed by this function, so make sure that the sparsity pattern is not used somewhere else in your program. This is an expensive operation, so think twice before you use this function.

    Definition at line 646 of file petsc_matrix_base.cc.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPETScWrappers_1_1TimeStepper.html differs (JavaScript source, ASCII text, with very long lines (793)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPETScWrappers_1_1TimeStepper.html 2023-11-25 15:25:57.493272205 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPETScWrappers_1_1TimeStepper.html 2023-11-25 15:25:57.489938941 +0100 @@ -176,20 +176,20 @@ requires ( (concepts::is_dealii_petsc_vector_type<VectorType> || std::constructible_from< VectorType, Vec>)&&(concepts::is_dealii_petsc_matrix_type<PMatrixType> || std::constructible_from< PMatrixType, Mat>)&&(concepts::is_dealii_petsc_matrix_type<AMatrixType> || std::constructible_from<AMatrixType, Mat>))
    class PETScWrappers::TimeStepper< VectorType, PMatrixType, AMatrixType >

    Interface to the PETSc TS solver for Ordinary Differential Equations and Differential-Algebraic Equations. The TS solver is described in the PETSc manual.

    This class supports two kinds of formulations. The explicit formulation:

    -\[
+<picture><source srcset=\[
   \begin{cases}
       \dot y = G(t,y)\, , \\
       y(t_0) = y_0\, , \\
   \end{cases}
-\] +\]" src="form_1737.png"/>

    and the implicit formulation:

    -\[
+<picture><source srcset=\[
   \begin{cases}
       F(t,y,\dot y) = 0\, , \\
       y(t_0) = y_0\, . \\
   \end{cases}
-\] +\]" src="form_1738.png"/>

    The interface to PETSc is realized by means of std::function callbacks like in the SUNDIALS::IDA and SUNDIALS::ARKode classes.

    TimeStepper supports any vector and matrix type having constructors and methods:

    @@ -207,7 +207,7 @@
    Mat & petsc_matrix();
    ...

    In particular, the supported types are the ones that can wrap PETSc's native vector and matrix classes, that are able to modify them in place, and that can return PETSc native types when requested.

    -

    To use explicit solvers (like for example explicit Runge-Kutta methods), the user only needs to provide the implementation of $G$ via the TimeStepper::explicit_function. For implicit solvers, users have also the alternative of providing the $F$ function via TimeStepper::implicit_function. IMEX methods are also supported by providing both callbacks.

    +

    To use explicit solvers (like for example explicit Runge-Kutta methods), the user only needs to provide the implementation of $G$ via the TimeStepper::explicit_function. For implicit solvers, users have also the alternative of providing the $F$ function via TimeStepper::implicit_function. IMEX methods are also supported by providing both callbacks.

    The default linearization procedure of an implicit solver instantiated with this class consists in using Jacobian-Free-Newton-Krylov; the action of tangent matrices inside a linear solver process are approximated via matrix-free finite-differencing of the nonlinear residual equations that are ODE-solver specific. For details, consult the PETSc manual.

    In alternative, users can also provide the implementations of the Jacobians. This can be accomplished in two ways:

    Definition at line 3035 of file tensor.h.

    @@ -2490,7 +2490,7 @@
    -

    Return the $l_\infty$ norm of the given rank-2 tensor, where $\|\mathbf T\|_\infty = \max_i \sum_j |T_{ij}|$ (maximum of the sums over rows).

    +

    Return the $l_\infty$ norm of the given rank-2 tensor, where $\|\mathbf T\|_\infty = \max_i \sum_j |T_{ij}|$ (maximum of the sums over rows).

    Definition at line 3061 of file tensor.h.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPolarManifold.html differs (JavaScript source, ASCII text, with very long lines (1036)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPolarManifold.html 2023-11-25 15:25:57.693268137 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPolarManifold.html 2023-11-25 15:25:57.693268137 +0100 @@ -439,7 +439,7 @@
    -

    Given a point in the spacedim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the polar coordinate system to the Euclidean coordinate system. In other words, it is a matrix of size $\text{spacedim}\times\text{spacedim}$.

    +

    Given a point in the spacedim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the polar coordinate system to the Euclidean coordinate system. In other words, it is a matrix of size $\text{spacedim}\times\text{spacedim}$.

    This function is used in the computations required by the get_tangent_vector() function.

    Refer to the general documentation of this class for more information.

    @@ -713,7 +713,7 @@
    -

    Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the chartdim-dimensional to the spacedim-dimensional space. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

    +

    Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the chartdim-dimensional to the spacedim-dimensional space. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

    This function is used in the computations required by the get_tangent_vector() function. Since not all users of the Manifold class interface will require calling that function, the current function is implemented but will trigger an exception whenever called. This allows derived classes to avoid implementing the push_forward_gradient function if this functionality is not needed in the user program.

    Refer to the general documentation of this class for more information.

    @@ -756,24 +756,24 @@
    -

    Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

    -

    For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

    -\begin{align*}
+<p>Return a vector that, at <picture><source srcset=$\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

    +

    For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

    +\begin{align*}
   \zeta(t) &= \xi_1 +  t (\xi_2-\xi_1)
  \\          &= F^{-1}(\mathbf x_1) + t\left[F^{-1}(\mathbf x_2)
                                             -F^{-1}(\mathbf x_1)\right]
-\end{align*} +\end{align*}" src="form_1440.png"/>

    In image space, i.e., in the space in which we operate, this leads to the curve

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   \mathbf s(t) &= F(\zeta(t))
  \\          &= F(\xi_1 +  t (\xi_2-\xi_1))
  \\          &= F\left(F^{-1}(\mathbf x_1) + t\left[F^{-1}(\mathbf x_2)
                                     -F^{-1}(\mathbf x_1)\right]\right).
-\end{align*} +\end{align*}" src="form_1441.png"/>

    -

    What the current function is supposed to return is $\mathbf s'(0)$. By the chain rule, this is equal to

    -\begin{align*}
+<p> What the current function is supposed to return is <picture><source srcset=$\mathbf s'(0)$. By the chain rule, this is equal to

    +\begin{align*}
   \mathbf s'(0) &=
     \frac{d}{dt}\left. F\left(F^{-1}(\mathbf x_1)
                        + t\left[F^{-1}(\mathbf x_2)
@@ -782,11 +782,11 @@
 \\ &= \nabla_\xi F\left(F^{-1}(\mathbf x_1)\right)
                    \left[F^{-1}(\mathbf x_2)
                                 -F^{-1}(\mathbf x_1)\right].
-\end{align*} +\end{align*}" src="form_1442.png"/>

    This formula may then have to be slightly modified by considering any periodicity that was assumed in the call to the constructor.

    -

    Thus, the computation of tangent vectors also requires the implementation of derivatives $\nabla_\xi F(\xi)$ of the push-forward mapping. Here, $F^{-1}(\mathbf x_2)-F^{-1}(\mathbf x_1)$ is a chartdim-dimensional vector, and $\nabla_\xi F\left(F^{-1}(\mathbf
-x_1)\right) = \nabla_\xi F\left(\xi_1\right)$ is a spacedim-times-chartdim-dimensional matrix. Consequently, and as desired, the operation results in a spacedim-dimensional vector.

    +

    Thus, the computation of tangent vectors also requires the implementation of derivatives $\nabla_\xi F(\xi)$ of the push-forward mapping. Here, $F^{-1}(\mathbf x_2)-F^{-1}(\mathbf x_1)$ is a chartdim-dimensional vector, and $\nabla_\xi F\left(F^{-1}(\mathbf
+x_1)\right) = \nabla_\xi F\left(\xi_1\right)$ is a spacedim-times-chartdim-dimensional matrix. Consequently, and as desired, the operation results in a spacedim-dimensional vector.

    Parameters
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPolynomialsBernardiRaugel.html differs (JavaScript source, Unicode text, UTF-8 text, with very long lines (1174)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPolynomialsBernardiRaugel.html 2023-11-25 15:25:57.706601197 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPolynomialsBernardiRaugel.html 2023-11-25 15:25:57.706601197 +0100 @@ -142,7 +142,7 @@
    template<int dim>
    class PolynomialsBernardiRaugel< dim >

    This class implements the Bernardi-Raugel polynomials similarly to the description in the Mathematics of Computation paper from 1985 by Christine Bernardi and Geneviève Raugel.

    The Bernardi-Raugel polynomials are originally defined as an enrichment of the $(P_1)^d$ elements on simplicial meshes for Stokes problems by the addition of bubble functions, yielding a locking-free finite element which is a subset of $(P_2)^d$ elements. This implementation is an enrichment of $(Q_1)^d$ elements which is a subset of $(Q_2)^d$ elements for quadrilateral and hexahedral meshes.

    -

    The $BR_1$ bubble functions are defined to have magnitude 1 at the center of face $e_i$ and direction $\mathbf{n}_i$ normal to face $e_i$, and magnitude 0 on all other vertices and faces. Ordering is consistent with the face numbering in GeometryInfo. The vector $\mathbf{n}_i$ points in the positive axis direction and not necessarily normal to the element for consistent orientation across edges.

    +

    The $BR_1$ bubble functions are defined to have magnitude 1 at the center of face $e_i$ and direction $\mathbf{n}_i$ normal to face $e_i$, and magnitude 0 on all other vertices and faces. Ordering is consistent with the face numbering in GeometryInfo. The vector $\mathbf{n}_i$ points in the positive axis direction and not necessarily normal to the element for consistent orientation across edges.

    2d bubble functions (in order)
    $x=0$ edge: $\mathbf{p}_1 = \mathbf{n}_1 (1-x)(y)(1-y)$
     @f$x=1@f$ edge: @f$\mathbf{p}_2 = \mathbf{n}_2 (x)(y)(1-y)@f$
     
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPolynomialsBernstein.html differs (JavaScript source, ASCII text, with very long lines (475))
    --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPolynomialsBernstein.html	2023-11-25 15:25:57.723267526 +0100
    +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPolynomialsBernstein.html	2023-11-25 15:25:57.726600789 +0100
    @@ -1314,7 +1314,7 @@
       
     
    x1The first point that describes the geodesic, and the one at which the "direction" is to be evaluated.
    -

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    +

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    Definition at line 314 of file polynomial.h.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPolynomialsRT__Bubbles.html differs (JavaScript source, ASCII text, with very long lines (965)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPolynomialsRT__Bubbles.html 2023-11-25 15:25:57.739933854 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPolynomialsRT__Bubbles.html 2023-11-25 15:25:57.739933854 +0100 @@ -138,18 +138,18 @@

    This space is of the form Vk = RTk-1 + Bk, where Bk is defined as follows:

    In 2d:

    -\begin{align*}
+<picture><source srcset=\begin{align*}
  B_k^1(E) = \text{span}\left\{x^{a_1-1} y^{a_2}\begin{pmatrix} (a_2+1) x \\
    -a_1 y \end{pmatrix}\text{ : } a_2=k \right\} \\
  B_k^2(E) = \text{span}\left\{x^{b_1} y^{b_2-1}\begin{pmatrix} -b_2 x \\
     (b_1+1) y \end{pmatrix}\text{ : } b_1=k \right\}
-\end{align*} +\end{align*}" src="form_706.png"/>

    In 3d:

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   B_k^1(E) = \text{span}\left\{x^{a_1-1} y^{a_2} z^{a_3}\begin{pmatrix}
 (a_2+a_3+2) x \\
     -a_1 y \\ -a_1 z \end{pmatrix}\text{ : } a_2=k \text{ or } a_3=k
@@ -161,11 +161,11 @@
   B_k^3(E) = \text{span}\left\{x^{c_1}y^{c_2}z^{c_3-1}\begin{pmatrix} -c_3 x
 \\ -c_3y \\ (c_1+c_2+2)z \end{pmatrix}\text{ : } c_1=k \text{ or } c_2=k
 \right\},
- \end{align*} + \end{align*}" src="form_707.png"/>

    -

    where $0 \le a_1, a_2, a_3 \le k$.

    +

    where $0 \le a_1, a_2, a_3 \le k$.

    Note
    Unlike the classical Raviart-Thomas space, the lowest order for the enhanced space is 1, similarly to the Brezzi-Douglas-Marini (BDM) polynomial space.

    The total dimension of the space dim(Vk) = d*(k+1)^d, where d is the space dimension. This allows to associate shape functions with the Gauss-Lobatto quadrature points as shown in the figures below.

    @@ -176,7 +176,7 @@

    - +
    Left - $2d,\,k=3$, right - $3d,\,k=2$.
    Left - $2d,\,k=3$, right - $3d,\,k=2$.

    Definition at line 91 of file polynomials_rt_bubbles.h.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPolynomials_1_1HermiteInterpolation.html differs (JavaScript source, ASCII text, with very long lines (541)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPolynomials_1_1HermiteInterpolation.html 2023-11-25 15:25:57.756600182 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPolynomials_1_1HermiteInterpolation.html 2023-11-25 15:25:57.756600182 +0100 @@ -1258,7 +1258,7 @@
    -

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    +

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    Definition at line 314 of file polynomial.h.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPolynomials_1_1HermiteLikeInterpolation.html differs (JavaScript source, ASCII text, with very long lines (6544)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPolynomials_1_1HermiteLikeInterpolation.html 2023-11-25 15:25:57.773266507 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPolynomials_1_1HermiteLikeInterpolation.html 2023-11-25 15:25:57.773266507 +0100 @@ -1282,7 +1282,7 @@
    -

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    +

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    Definition at line 314 of file polynomial.h.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPolynomials_1_1Hierarchical.html differs (JavaScript source, ASCII text, with very long lines (715)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPolynomials_1_1Hierarchical.html 2023-11-25 15:25:57.793266100 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPolynomials_1_1Hierarchical.html 2023-11-25 15:25:57.793266100 +0100 @@ -1345,7 +1345,7 @@
    -

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    +

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    Definition at line 314 of file polynomial.h.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPolynomials_1_1LagrangeEquidistant.html differs (JavaScript source, ASCII text, with very long lines (838)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPolynomials_1_1LagrangeEquidistant.html 2023-11-25 15:25:57.809932429 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPolynomials_1_1LagrangeEquidistant.html 2023-11-25 15:25:57.809932429 +0100 @@ -1305,7 +1305,7 @@
    -

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    +

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    Definition at line 314 of file polynomial.h.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPolynomials_1_1Legendre.html differs (JavaScript source, ASCII text, with very long lines (811)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPolynomials_1_1Legendre.html 2023-11-25 15:25:57.826598757 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPolynomials_1_1Legendre.html 2023-11-25 15:25:57.826598757 +0100 @@ -1248,7 +1248,7 @@
    -

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    +

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    Definition at line 314 of file polynomial.h.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPolynomials_1_1Lobatto.html differs (JavaScript source, ASCII text, with very long lines (1908)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPolynomials_1_1Lobatto.html 2023-11-25 15:25:57.846598350 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPolynomials_1_1Lobatto.html 2023-11-25 15:25:57.846598350 +0100 @@ -218,7 +218,7 @@

    Detailed Description

    Lobatto polynomials of arbitrary degree on [0,1].

    -

    These polynomials are the integrated Legendre polynomials on [0,1]. The first two polynomials are the standard linear shape functions given by $l_0(x) = 1-x$ and $l_1(x) = x$. For $i\geq2$ we use the definition $l_i(x) = \frac{1}{\Vert L_{i-1}\Vert_2}\int_0^x L_{i-1}(t)\,dt$, where $L_i$ denotes the $i$-th Legendre polynomial on $[0,1]$. The Lobatto polynomials $l_0,\ldots,l_k$ form a complete basis of the polynomials space of degree $k$.

    +

    These polynomials are the integrated Legendre polynomials on [0,1]. The first two polynomials are the standard linear shape functions given by $l_0(x) = 1-x$ and $l_1(x) = x$. For $i\geq2$ we use the definition $l_i(x) = \frac{1}{\Vert L_{i-1}\Vert_2}\int_0^x L_{i-1}(t)\,dt$, where $L_i$ denotes the $i$-th Legendre polynomial on $[0,1]$. The Lobatto polynomials $l_0,\ldots,l_k$ form a complete basis of the polynomials space of degree $k$.

    Calling the constructor with a given index k will generate the polynomial with index k. But only for $k\geq 1$ the index equals the degree of the polynomial. For k==0 also a polynomial of degree 1 is generated.

    These polynomials are used for the construction of the shape functions of Nédélec elements of arbitrary order.

    @@ -1278,7 +1278,7 @@
    -

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    +

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    Definition at line 314 of file polynomial.h.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPolynomials_1_1Monomial.html differs (JavaScript source, ASCII text, with very long lines (534)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPolynomials_1_1Monomial.html 2023-11-25 15:25:57.863264678 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPolynomials_1_1Monomial.html 2023-11-25 15:25:57.863264678 +0100 @@ -1370,7 +1370,7 @@
    -

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    +

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    Definition at line 314 of file polynomial.h.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPolynomials_1_1PiecewisePolynomial.html differs (JavaScript source, ASCII text, with very long lines (908)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPolynomials_1_1PiecewisePolynomial.html 2023-11-25 15:25:57.879931003 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPolynomials_1_1PiecewisePolynomial.html 2023-11-25 15:25:57.879931003 +0100 @@ -192,7 +192,7 @@

    Detailed Description

    template<typename number>
    -class Polynomials::PiecewisePolynomial< number >

    Definition of piecewise 1d polynomials for the unit interval. This space allows the description of interpolating polynomials on parts of the unit interval, similarly to the definition of finite element basis functions on subdivided elements. The primary purpose of this class is to allow constructing the shape functions of the FE_Q_iso_Q1 class that has a number of interpolation points in each coordinate direction, but instead of using them for higher-order polynomials just chooses piecewise linear shape functions – in effect, it is a $Q_1$ element defined on a subdivision of the reference cell, and replicated on each of these sub-cells.

    +class Polynomials::PiecewisePolynomial< number >

    Definition of piecewise 1d polynomials for the unit interval. This space allows the description of interpolating polynomials on parts of the unit interval, similarly to the definition of finite element basis functions on subdivided elements. The primary purpose of this class is to allow constructing the shape functions of the FE_Q_iso_Q1 class that has a number of interpolation points in each coordinate direction, but instead of using them for higher-order polynomials just chooses piecewise linear shape functions – in effect, it is a $Q_1$ element defined on a subdivision of the reference cell, and replicated on each of these sub-cells.

    This class is not derived from the ScalarPolynomialsBase base class because it is not actually a polynomial – it is a piecewise polynomial. However, it is interface-compatible with the Polynomials::Polynomial class, and consequently can be used as template argument for TensorProductPolynomials.

    Definition at line 64 of file polynomials_piecewise.h.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPolynomials_1_1Polynomial.html differs (JavaScript source, ASCII text, with very long lines (761)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPolynomials_1_1Polynomial.html 2023-11-25 15:25:57.896597332 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPolynomials_1_1Polynomial.html 2023-11-25 15:25:57.896597332 +0100 @@ -1282,7 +1282,7 @@
    -

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    +

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    Definition at line 314 of file polynomial.h.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPolynomials_1_1PolynomialsHermite.html differs (JavaScript source, ASCII text, with very long lines (3111)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPolynomials_1_1PolynomialsHermite.html 2023-11-25 15:25:57.916596925 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classPolynomials_1_1PolynomialsHermite.html 2023-11-25 15:25:57.916596925 +0100 @@ -225,8 +225,8 @@  

    Detailed Description

    -

    This class implements Hermite interpolation polynomials (see [CiarletRiavart1972interpolation]) enforcing the maximum possible level of regularity $r$ in the FEM basis given a polynomial degree of $2r+1$. The polynomials all represent either a non-zero shape value or derivative at $x=0$ and $x=1$ on the reference interval $x \in [0,1]$.

    -

    Indices $j = 0, 1, \dots, r$ refer to polynomials corresponding to a non-zero derivative (or shape value for $j=0$) of order $j$ at $x=0$, and indices $j = r+1, r+2, \dots, 2r+1$ refer to polynomials with a non-zero derivative of order $j-(r+1)$ (or value for $j=r+1$) at $x=1$. In particular, the $0^{th}$ function has a value of $1$ at $x=0$, and the $(r+1)^{th}$ function has a value of $1$ at $x=1$.The basis is rescaled such that a function corresponding to a non-zero $j^{th}$ derivative has derivative value $j! 4^{j}$ at the corresponding node. This is done to prevent the $L^{2}$-norm of the basis functions from reducing exponentially with the chosen regularity.

    +

    This class implements Hermite interpolation polynomials (see [CiarletRiavart1972interpolation]) enforcing the maximum possible level of regularity $r$ in the FEM basis given a polynomial degree of $2r+1$. The polynomials all represent either a non-zero shape value or derivative at $x=0$ and $x=1$ on the reference interval $x \in [0,1]$.

    +

    Indices $j = 0, 1, \dots, r$ refer to polynomials corresponding to a non-zero derivative (or shape value for $j=0$) of order $j$ at $x=0$, and indices $j = r+1, r+2, \dots, 2r+1$ refer to polynomials with a non-zero derivative of order $j-(r+1)$ (or value for $j=r+1$) at $x=1$. In particular, the $0^{th}$ function has a value of $1$ at $x=0$, and the $(r+1)^{th}$ function has a value of $1$ at $x=1$.The basis is rescaled such that a function corresponding to a non-zero $j^{th}$ derivative has derivative value $j! 4^{j}$ at the corresponding node. This is done to prevent the $L^{2}$-norm of the basis functions from reducing exponentially with the chosen regularity.

    Definition at line 61 of file polynomials_hermite.h.

    Member Typedef Documentation

    @@ -304,8 +304,8 @@
    -

    Constructor for an individual Hermite polynomial. We write $f_{j}$ for a polynomial that has a non-zero $j^{th}$ derivative at $x=0$ and $g_{j}$ for a polynomial with a non-zero $j^{th}$ derivative at $x=1$, meaning $f_{j}$ will have index $=j$ and $g_{j}$ will have index $= j + \mathtt{regularity} + 1$. The resulting polynomials will be degree $2\times \mathtt{regularity} +1$ and obey the following conditions:

    -\begin{align*}
+<p>Constructor for an individual Hermite polynomial. We write <picture><source srcset=$f_{j}$ for a polynomial that has a non-zero $j^{th}$ derivative at $x=0$ and $g_{j}$ for a polynomial with a non-zero $j^{th}$ derivative at $x=1$, meaning $f_{j}$ will have index $=j$ and $g_{j}$ will have index $= j + \mathtt{regularity} + 1$. The resulting polynomials will be degree $2\times \mathtt{regularity} +1$ and obey the following conditions:

    +\begin{align*}
 &\begin{matrix}
   \left. \frac{d^{i}}{dx^{i}} f_{j}(x) \right\vert_{x=0}
          = i! 4^{i} \delta_{i, j}, \hfill
@@ -320,18 +320,18 @@
          = i! 4^{i} \delta_{i, j}, \hfill
          &\qquad \hfill 0 \leq i \leq \mathtt{regularity},
 \end{matrix} \qquad 0 \leq j \leq \mathtt{regularity},
-\end{align*} +\end{align*}" src="form_680.png"/>

    -

    where $\delta_{i,j}$ is equal to $1$ whenever $i=j$, and equal to $0$ otherwise. These polynomials have explicit formulas given by

    -\begin{align*}
+<p> where <picture><source srcset=$\delta_{i,j}$ is equal to $1$ whenever $i=j$, and equal to $0$ otherwise. These polynomials have explicit formulas given by

    +\begin{align*}
   f_{j}(x) &= 4^{j} x^{j} (1-x)^{\mathtt{regularity}+1}
 \sum_{k=0}^{\mathtt{regularity} - j} \;^{\mathtt{regularity} + k} C_{k}
 x^{k}, \\ g_{j}(x) &= 4^{j} x^{\mathtt{regularity}+1} (x-1)^{j}
 \sum_{k=0}^{\mathtt{regularity} - j} \;^{\mathtt{regularity} + k} C_{k}
 (1-x)^{k},
-\end{align*} +\end{align*}" src="form_684.png"/>

    -

    where $^{n} C_{r} = \frac{n!}{r!(n-r)!}$ is the $r^{th}$ binomial coefficient of degree $n, \; 0 \leq r \leq n$.

    +

    where $^{n} C_{r} = \frac{n!}{r!(n-r)!}$ is the $r^{th}$ binomial coefficient of degree $n, \; 0 \leq r \leq n$.

    Parameters
    @@ -367,7 +367,7 @@
    regularityThe highest derivative for which the basis is used to enforce regularity.
    -

    This function generates a vector of Polynomial objects representing a complete basis of degree $2\times\mathtt{regularity} +1$ on the reference interval $[0,1]$.

    +

    This function generates a vector of Polynomial objects representing a complete basis of degree $2\times\mathtt{regularity} +1$ on the reference interval $[0,1]$.

    Parameters
    @@ -1279,7 +1279,7 @@
    regularityThe generated basis can be used to strongly enforce continuity in all derivatives up to and including this order.
    -

    The order of the highest derivative in which the Hermite basis can be used to impose continuity across element boundaries. It's related to the degree $p$ by $p = 2 \times\mathtt{regularity} +1$.

    +

    The order of the highest derivative in which the Hermite basis can be used to impose continuity across element boundaries. It's related to the degree $p$ by $p = 2 \times\mathtt{regularity} +1$.

    Definition at line 132 of file polynomials_hermite.h.

    @@ -1329,7 +1329,7 @@
    -

    This stores whether the shape function corresponds to a non-zero value or derivative at $x=0$ on the reference interval ( $\mathtt{side} =0$) or at $x=1$ ( $\mathtt{side} =1$).

    +

    This stores whether the shape function corresponds to a non-zero value or derivative at $x=0$ on the reference interval ( $\mathtt{side} =0$) or at $x=1$ ( $\mathtt{side} =1$).

    Definition at line 145 of file polynomials_hermite.h.

    @@ -1405,7 +1405,7 @@
    -

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    +

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    Definition at line 314 of file polynomial.h.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classQGaussChebyshev.html differs (JavaScript source, ASCII text, with very long lines (1649)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classQGaussChebyshev.html 2023-11-25 15:25:57.933263253 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classQGaussChebyshev.html 2023-11-25 15:25:57.933263253 +0100 @@ -196,7 +196,7 @@

    Detailed Description

    template<int dim>
    -class QGaussChebyshev< dim >

    Gauss-Chebyshev quadrature rules integrate the weighted product $\int_{-1}^1 f(x) w(x) dx$ with weight given by: $w(x) = 1/\sqrt{1-x^2}$. The nodes and weights are known analytically, and are exact for monomials up to the order $2n-1$, where $n$ is the number of quadrature points. Here we rescale the quadrature formula so that it is defined on the interval $[0,1]$ instead of $[-1,1]$. So the quadrature formulas integrate exactly the integral $\int_0^1 f(x) w(x) dx$ with the weight: $w(x) =
+class QGaussChebyshev< dim ></div><p>Gauss-Chebyshev quadrature rules integrate the weighted product <picture><source srcset=$\int_{-1}^1 f(x) w(x) dx$ with weight given by: $w(x) = 1/\sqrt{1-x^2}$. The nodes and weights are known analytically, and are exact for monomials up to the order $2n-1$, where $n$ is the number of quadrature points. Here we rescale the quadrature formula so that it is defined on the interval $[0,1]$ instead of $[-1,1]$. So the quadrature formulas integrate exactly the integral $\int_0^1 f(x) w(x) dx$ with the weight: $w(x) =
 1/\sqrt{x(1-x)}$. For details see: M. Abramowitz & I.A. Stegun: Handbook of Mathematical Functions, par. 25.4.38

    Definition at line 493 of file quadrature_lib.h.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classQGaussLobatto.html differs (JavaScript source, ASCII text, with very long lines (719)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classQGaussLobatto.html 2023-11-25 15:25:57.949929581 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classQGaussLobatto.html 2023-11-25 15:25:57.949929581 +0100 @@ -197,7 +197,7 @@ class QGaussLobatto< dim >

    The Gauss-Lobatto family of quadrature rules for numerical integration.

    This modification of the Gauss quadrature uses the two interval end points as well. Being exact for polynomials of degree 2n-3, this formula is suboptimal by two degrees.

    The quadrature points are interval end points plus the roots of the derivative of the Legendre polynomial Pn-1 of degree n-1. The quadrature weights are 2/(n(n-1)(Pn-1(xi)2).

    -
    Note
    This implementation has not been optimized concerning numerical stability and efficiency. It can be easily adapted to the general case of Gauss-Lobatto-Jacobi-Bouzitat quadrature with arbitrary parameters $\alpha$, $\beta$, of which the Gauss-Lobatto-Legendre quadrature ( $\alpha
+<dl class=
    Note
    This implementation has not been optimized concerning numerical stability and efficiency. It can be easily adapted to the general case of Gauss-Lobatto-Jacobi-Bouzitat quadrature with arbitrary parameters $\alpha$, $\beta$, of which the Gauss-Lobatto-Legendre quadrature ( $\alpha
 = \beta = 0$) is a special case.
    See also
    http://en.wikipedia.org/wiki/Handbook_of_Mathematical_Functions
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classQGaussLobattoChebyshev.html differs (JavaScript source, ASCII text, with very long lines (1989)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classQGaussLobattoChebyshev.html 2023-11-25 15:25:57.963262642 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classQGaussLobattoChebyshev.html 2023-11-25 15:25:57.963262642 +0100 @@ -196,7 +196,7 @@

    Detailed Description

    template<int dim>
    -class QGaussLobattoChebyshev< dim >

    Gauss-Lobatto-Chebyshev quadrature rules integrate the weighted product $\int_{-1}^1 f(x) w(x) dx$ with weight given by: $w(x) = 1/\sqrt{1-x^2}$, with the additional constraint that two of the quadrature points are located at the endpoints of the quadrature interval. The nodes and weights are known analytically, and are exact for monomials up to the order $2n-3$, where $n$ is the number of quadrature points. Here we rescale the quadrature formula so that it is defined on the interval $[0,1]$ instead of $[-1,1]$. So the quadrature formulas integrate exactly the integral $\int_0^1 f(x) w(x) dx$ with the weight: $w(x) = 1/\sqrt{x(1-x)}$. For details see: M. Abramowitz & I.A. Stegun: Handbook of Mathematical Functions, par. 25.4.40

    +class QGaussLobattoChebyshev< dim >

    Gauss-Lobatto-Chebyshev quadrature rules integrate the weighted product $\int_{-1}^1 f(x) w(x) dx$ with weight given by: $w(x) = 1/\sqrt{1-x^2}$, with the additional constraint that two of the quadrature points are located at the endpoints of the quadrature interval. The nodes and weights are known analytically, and are exact for monomials up to the order $2n-3$, where $n$ is the number of quadrature points. Here we rescale the quadrature formula so that it is defined on the interval $[0,1]$ instead of $[-1,1]$. So the quadrature formulas integrate exactly the integral $\int_0^1 f(x) w(x) dx$ with the weight: $w(x) = 1/\sqrt{x(1-x)}$. For details see: M. Abramowitz & I.A. Stegun: Handbook of Mathematical Functions, par. 25.4.40

    Definition at line 561 of file quadrature_lib.h.

    Member Typedef Documentation

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classQGaussLog.html differs (JavaScript source, ASCII text, with very long lines (765)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classQGaussLog.html 2023-11-25 15:25:57.983262235 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classQGaussLog.html 2023-11-25 15:25:57.983262235 +0100 @@ -202,8 +202,8 @@

    Detailed Description

    template<int dim>
    -class QGaussLog< dim >

    A class for Gauss quadrature with logarithmic weighting function. This formula is used to integrate $\ln|x|\;f(x)$ on the interval $[0,1]$, where $f$ is a smooth function without singularities. The collection of quadrature points and weights has been obtained using Numerical Recipes.

    -

    Notice that only the function $f(x)$ should be provided, i.e., $\int_0^1
+class QGaussLog< dim ></div><p>A class for Gauss quadrature with logarithmic weighting function. This formula is used to integrate <picture><source srcset=$\ln|x|\;f(x)$ on the interval $[0,1]$, where $f$ is a smooth function without singularities. The collection of quadrature points and weights has been obtained using Numerical Recipes.

    +

    Notice that only the function $f(x)$ should be provided, i.e., $\int_0^1
 f(x) \ln|x| dx = \sum_{i=0}^N w_i f(q_i)$. Setting the revert flag to true at construction time switches the weight from $\ln|x|$ to $\ln|1-x|$.

    The weights and functions have been tabulated up to order 12.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classQGaussLogR.html differs (JavaScript source, ASCII text, with very long lines (1263)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classQGaussLogR.html 2023-11-25 15:25:57.999928563 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classQGaussLogR.html 2023-11-25 15:25:57.999928563 +0100 @@ -197,15 +197,15 @@

    Detailed Description

    template<int dim>
    -class QGaussLogR< dim >

    A class for Gauss quadrature with arbitrary logarithmic weighting function. This formula is used to integrate $\ln(|x-x_0|/\alpha)\;f(x)$ on the interval $[0,1]$, where $f$ is a smooth function without singularities, and $x_0$ and $\alpha$ are given at construction time, and are the location of the singularity $x_0$ and an arbitrary scaling factor in the singularity.

    -

    You have to make sure that the point $x_0$ is not one of the Gauss quadrature points of order $N$, otherwise an exception is thrown, since the quadrature weights cannot be computed correctly.

    +class QGaussLogR< dim >

    A class for Gauss quadrature with arbitrary logarithmic weighting function. This formula is used to integrate $\ln(|x-x_0|/\alpha)\;f(x)$ on the interval $[0,1]$, where $f$ is a smooth function without singularities, and $x_0$ and $\alpha$ are given at construction time, and are the location of the singularity $x_0$ and an arbitrary scaling factor in the singularity.

    +

    You have to make sure that the point $x_0$ is not one of the Gauss quadrature points of order $N$, otherwise an exception is thrown, since the quadrature weights cannot be computed correctly.

    This quadrature formula is rather expensive, since it uses internally two Gauss quadrature formulas of order n to integrate the nonsingular part of the factor, and two GaussLog quadrature formulas to integrate on the separate segments $[0,x_0]$ and $[x_0,1]$. If the singularity is one of the extremes and the factor alpha is 1, then this quadrature is the same as QGaussLog.

    The last argument from the constructor allows you to use this quadrature rule in one of two possible ways:

    \[ \int_0^1 g(x) dx = \int_0^1 f(x)
 \ln\left(\frac{|x-x_0|}{\alpha}\right) dx = \sum_{i=0}^N w_i g(q_i) =
 \sum_{i=0}^N \bar{w}_i f(q_i) \]

    -

    Which one of the two sets of weights is provided, can be selected by the factor_out_singular_weight parameter. If it is false (the default), then the $\bar{w}_i$ weights are computed, and you should provide only the smooth function $f(x)$, since the singularity is included inside the quadrature. If the parameter is set to true, then the singularity is factored out of the quadrature formula, and you should provide a function $g(x)$, which should at least be similar to $\ln(|x-x_0|/\alpha)$.

    +

    Which one of the two sets of weights is provided, can be selected by the factor_out_singular_weight parameter. If it is false (the default), then the $\bar{w}_i$ weights are computed, and you should provide only the smooth function $f(x)$, since the singularity is included inside the quadrature. If the parameter is set to true, then the singularity is factored out of the quadrature formula, and you should provide a function $g(x)$, which should at least be similar to $\ln(|x-x_0|/\alpha)$.

    Notice that this quadrature rule is worthless if you try to use it for regular functions once you factored out the singularity.

    The weights and functions have been tabulated up to order 12.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classQGaussOneOverR.html differs (JavaScript source, ASCII text, with very long lines (1424)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classQGaussOneOverR.html 2023-11-25 15:25:58.016594888 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classQGaussOneOverR.html 2023-11-25 15:25:58.016594888 +0100 @@ -202,9 +202,9 @@

    Detailed Description

    template<int dim>
    -class QGaussOneOverR< dim >

    A class for Gauss quadrature with $1/R$ weighting function. This formula can be used to integrate $1/R \ f(x)$ on the reference element $[0,1]^2$, where $f$ is a smooth function without singularities, and $R$ is the distance from the point $x$ to the vertex $\xi$, given at construction time by specifying its index. Notice that this distance is evaluated in the reference element.

    -

    This quadrature formula is obtained from two QGauss quadrature formulas, upon transforming them into polar coordinate system centered at the singularity, and then again into another reference element. This allows for the singularity to be cancelled by part of the Jacobian of the transformation, which contains $R$. In practice the reference element is transformed into a triangle by collapsing one of the sides adjacent to the singularity. The Jacobian of this transformation contains $R$, which is removed before scaling the original quadrature, and this process is repeated for the next half element.

    -

    Upon construction it is possible to specify whether we want the singularity removed, or not. In other words, this quadrature can be used to integrate $g(x) = 1/R\ f(x)$, or simply $f(x)$, with the $1/R$ factor already included in the quadrature weights.

    +class QGaussOneOverR< dim >

    A class for Gauss quadrature with $1/R$ weighting function. This formula can be used to integrate $1/R \ f(x)$ on the reference element $[0,1]^2$, where $f$ is a smooth function without singularities, and $R$ is the distance from the point $x$ to the vertex $\xi$, given at construction time by specifying its index. Notice that this distance is evaluated in the reference element.

    +

    This quadrature formula is obtained from two QGauss quadrature formulas, upon transforming them into polar coordinate system centered at the singularity, and then again into another reference element. This allows for the singularity to be cancelled by part of the Jacobian of the transformation, which contains $R$. In practice the reference element is transformed into a triangle by collapsing one of the sides adjacent to the singularity. The Jacobian of this transformation contains $R$, which is removed before scaling the original quadrature, and this process is repeated for the next half element.

    +

    Upon construction it is possible to specify whether we want the singularity removed, or not. In other words, this quadrature can be used to integrate $g(x) = 1/R\ f(x)$, or simply $f(x)$, with the $1/R$ factor already included in the quadrature weights.

    Definition at line 291 of file quadrature_lib.h.

    Member Typedef Documentation

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classQGaussRadauChebyshev.html differs (JavaScript source, ASCII text, with very long lines (2028)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classQGaussRadauChebyshev.html 2023-11-25 15:25:58.033261217 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classQGaussRadauChebyshev.html 2023-11-25 15:25:58.033261217 +0100 @@ -204,7 +204,7 @@

    Detailed Description

    template<int dim>
    -class QGaussRadauChebyshev< dim >

    Gauss-Radau-Chebyshev quadrature rules integrate the weighted product $\int_{-1}^1 f(x) w(x) dx$ with weight given by: $w(x) = 1/\sqrt{1-x^2}$ with the additional constraint that a quadrature point lies at one of the two extrema of the interval. The nodes and weights are known analytically, and are exact for monomials up to the order $2n-2$, where $n$ is the number of quadrature points. Here we rescale the quadrature formula so that it is defined on the interval $[0,1]$ instead of $[-1,1]$. So the quadrature formulas integrate exactly the integral $\int_0^1 f(x) w(x) dx$ with the weight: $w(x) = 1/\sqrt{x(1-x)}$. By default the quadrature is constructed with the left endpoint as quadrature node, but the quadrature node can be imposed at the right endpoint through the variable ep that can assume the values left or right.

    +class QGaussRadauChebyshev< dim >

    Gauss-Radau-Chebyshev quadrature rules integrate the weighted product $\int_{-1}^1 f(x) w(x) dx$ with weight given by: $w(x) = 1/\sqrt{1-x^2}$ with the additional constraint that a quadrature point lies at one of the two extrema of the interval. The nodes and weights are known analytically, and are exact for monomials up to the order $2n-2$, where $n$ is the number of quadrature points. Here we rescale the quadrature formula so that it is defined on the interval $[0,1]$ instead of $[-1,1]$. So the quadrature formulas integrate exactly the integral $\int_0^1 f(x) w(x) dx$ with the weight: $w(x) = 1/\sqrt{x(1-x)}$. By default the quadrature is constructed with the left endpoint as quadrature node, but the quadrature node can be imposed at the right endpoint through the variable ep that can assume the values left or right.

    Definition at line 516 of file quadrature_lib.h.

    Member Typedef Documentation

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classQR.html differs (JavaScript source, ASCII text, with very long lines (684)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classQR.html 2023-11-25 15:25:58.043261012 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classQR.html 2023-11-25 15:25:58.046594280 +0100 @@ -166,7 +166,7 @@

    Detailed Description

    template<typename VectorType>
    class QR< VectorType >

    A class to compute and store the QR factorization of a matrix represented by a set of column vectors.

    -

    The class is design to update a given (possibly empty) QR factorization of a matrix $A$ (constructed incrementally by providing its columns) due to the addition of a new column vector to $A$. This is equivalent to constructing an orthonormal basis by the Gram-Schmidt procedure. The class also provides update functionality when the first column is removed.

    +

    The class is design to update a given (possibly empty) QR factorization of a matrix $A$ (constructed incrementally by providing its columns) due to the addition of a new column vector to $A$. This is equivalent to constructing an orthonormal basis by the Gram-Schmidt procedure. The class also provides update functionality when the first column is removed.

    The VectorType template argument may either be a parallel and serial vector, and only need to have basic operations such as additions, scalar product, etc. It also needs to have a copy-constructor.

    See sections 6.5.2-6.5.3 on pp. 335-338 in

    @Book{Golub2013,
    title = {Matrix computations},
    @@ -332,28 +332,28 @@

    Remove first column and update QR factorization.

    -

    Starting from the given QR decomposition $QR= A = [a_1\,\dots a_n], \quad a_i \in {\mathbb R}^m$ we aim at computing factorization of $\tilde Q \tilde R= \tilde A = [a_2\,\dots a_n], \quad a_i \in {\mathbb
-R}^m$.

    -

    The standard approach is to partition $R$ as

    -\[
+<p>Starting from the given <a class=QR decomposition $QR= A = [a_1\,\dots a_n], \quad a_i \in {\mathbb R}^m$ we aim at computing factorization of $\tilde Q \tilde R= \tilde A = [a_2\,\dots a_n], \quad a_i \in {\mathbb
+R}^m$.

    +

    The standard approach is to partition $R$ as

    +\[
 R =
 \begin{bmatrix}
 r_{11} & w^T \\
 0      & R_{33}
 \end{bmatrix}
-\] +\]" src="form_1771.png"/>

    It then follows that

    -\[
+<picture><source srcset=\[
 Q^T \tilde A =
 \begin{bmatrix}
 0 & w^T \\
 0 & R_{33}
 \end{bmatrix}
-\] +\]" src="form_1772.png"/>

    is upper Hessenberg where unwanted sub-diagonal elements can be zeroed by a sequence of Givens rotations.

    -

    Note that $\tilde R^T \tilde R = \tilde A^T \tilde A$, where the RHS is included in $A^T A = R^T R$. Therefore $\tilde R$ can be obtained by Cholesky decomposition.

    +

    Note that $\tilde R^T \tilde R = \tilde A^T \tilde A$, where the RHS is included in $A^T A = R^T R$. Therefore $\tilde R$ can be obtained by Cholesky decomposition.

    Implements BaseQR< VectorType >.

    @@ -393,7 +393,7 @@
    -

    Set $y = Qx$. The size of $x$ should be consistent with the size of the R matrix.

    +

    Set $y = Qx$. The size of $x$ should be consistent with the size of the R matrix.

    Implements BaseQR< VectorType >.

    @@ -434,7 +434,7 @@
    -

    Set $y = Q^Tx$. The size of $x$ should be consistent with the size of column vectors.

    +

    Set $y = Q^Tx$. The size of $x$ should be consistent with the size of column vectors.

    Implements BaseQR< VectorType >.

    @@ -474,7 +474,7 @@
    -

    Set $y = QRx$. The size of $x$ should be consistent with the size of the R matrix.

    +

    Set $y = QRx$. The size of $x$ should be consistent with the size of the R matrix.

    Implements BaseQR< VectorType >.

    @@ -515,7 +515,7 @@
    -

    Set $y = R^T Q^Tx$. The size of $x$ should be consistent with the size of column vectors.

    +

    Set $y = R^T Q^Tx$. The size of $x$ should be consistent with the size of column vectors.

    Implements BaseQR< VectorType >.

    @@ -655,7 +655,7 @@
    -

    Solve $Rx=y$. Vectors x and y should be consistent with the current size of the subspace. If transpose is true, $R^Tx=y$ is solved instead.

    +

    Solve $Rx=y$. Vectors x and y should be consistent with the current size of the subspace. If transpose is true, $R^Tx=y$ is solved instead.

    @@ -723,7 +723,7 @@
    -

    Compute $y=Hx$ where $H$ is the matrix formed by the column vectors stored by this object.

    +

    Compute $y=Hx$ where $H$ is the matrix formed by the column vectors stored by this object.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classQTelles.html differs (JavaScript source, Unicode text, UTF-8 text, with very long lines (981)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classQTelles.html 2023-11-25 15:25:58.063260605 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classQTelles.html 2023-11-25 15:25:58.063260605 +0100 @@ -222,7 +222,7 @@ \end{align*}" src="form_741.png"/>

    Since the library assumes $[0,1]$ as reference interval, we will map these values on the proper reference interval in the implementation.

    -

    This variable change can be used to integrate singular integrals. One example is $f(x)/|x-x_0|$ on the reference interval $[0,1]$, where $x_0$ is given at construction time, and is the location of the singularity $x_0$, and $f(x)$ is a smooth non singular function.

    +

    This variable change can be used to integrate singular integrals. One example is $f(x)/|x-x_0|$ on the reference interval $[0,1]$, where $x_0$ is given at construction time, and is the location of the singularity $x_0$, and $f(x)$ is a smooth non singular function.

    Singular quadrature formula are rather expensive, nevertheless Telles' quadrature formula are much easier to compute with respect to other singular integration techniques as Lachat-Watson.

    We have implemented the case for $dim = 1$. When we deal the case $dim >1$ we have computed the quadrature formula has a tensorial product of one dimensional Telles' quadrature formulas considering the different components of the singularity.

    The weights and functions for Gauss Legendre formula have been tabulated up to order 12.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classQWitherdenVincentSimplex.html differs (JavaScript source, ASCII text, with very long lines (1196)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classQWitherdenVincentSimplex.html 2023-11-25 15:25:58.076593670 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classQWitherdenVincentSimplex.html 2023-11-25 15:25:58.079926935 +0100 @@ -199,7 +199,7 @@

    Detailed Description

    template<int dim>
    class QWitherdenVincentSimplex< dim >

    Witherden-Vincent rules for simplex entities.

    -

    Like QGauss, users should specify a number n_points_1d as an indication of what polynomial degree to be integrated exactly (e.g., for $n$ points, the rule can integrate polynomials of degree $2 n - 1$ exactly). Additionally, since these rules were derived for simplices, there are also even-ordered rules (i.e., they integrate polynomials of degree $2 n$) available which do not have analogous 1d rules.

    +

    Like QGauss, users should specify a number n_points_1d as an indication of what polynomial degree to be integrated exactly (e.g., for $n$ points, the rule can integrate polynomials of degree $2 n - 1$ exactly). Additionally, since these rules were derived for simplices, there are also even-ordered rules (i.e., they integrate polynomials of degree $2 n$) available which do not have analogous 1d rules.

    The given value for n_points_1d = 1, 2, 3, 4, 5, 6, 7 (where the last two are only implemented in 2d) results in the following number of quadrature points in 2d and 3d:

    -

    Compute the value of the $i$-th linear shape function at location $\xi$ for the current reference-cell type.

    +

    Compute the value of the $i$-th linear shape function at location $\xi$ for the current reference-cell type.

    Definition at line 2371 of file reference_cell.h.

    @@ -529,7 +529,7 @@
    -

    Compute the gradient of the $i$-th linear shape function at location $\xi$ for the current reference-cell type.

    +

    Compute the gradient of the $i$-th linear shape function at location $\xi$ for the current reference-cell type.

    Definition at line 2460 of file reference_cell.h.

    @@ -570,7 +570,7 @@
    -

    Return a default linear mapping matching the current reference cell. If this reference cell is a hypercube, then the returned mapping is a MappingQ1; otherwise, it is an object of type MappingFE initialized with FE_SimplexP (if the reference cell is a triangle or tetrahedron), with FE_PyramidP (if the reference cell is a pyramid), or with FE_WedgeP (if the reference cell is a wedge). In other words, the term "linear" in the name of the function has to be understood as $d$-linear (i.e., bilinear or trilinear) for some of the coordinate directions.

    +

    Return a default linear mapping matching the current reference cell. If this reference cell is a hypercube, then the returned mapping is a MappingQ1; otherwise, it is an object of type MappingFE initialized with FE_SimplexP (if the reference cell is a triangle or tetrahedron), with FE_PyramidP (if the reference cell is a pyramid), or with FE_WedgeP (if the reference cell is a wedge). In other words, the term "linear" in the name of the function has to be understood as $d$-linear (i.e., bilinear or trilinear) for some of the coordinate directions.

    Definition at line 149 of file reference_cell.cc.

    @@ -916,7 +916,7 @@
    -

    Return the reference-cell type of face face_no of the current object. For example, if the current object is ReferenceCells::Tetrahedron, then face_no must be between in the interval $[0,4)$ and the function will always return ReferenceCells::Triangle. If the current object is ReferenceCells::Hexahedron, then face_no must be between in the interval $[0,6)$ and the function will always return ReferenceCells::Quadrilateral. For wedges and pyramids, the returned object may be either ReferenceCells::Triangle or ReferenceCells::Quadrilateral, depending on the given index.

    +

    Return the reference-cell type of face face_no of the current object. For example, if the current object is ReferenceCells::Tetrahedron, then face_no must be between in the interval $[0,4)$ and the function will always return ReferenceCells::Triangle. If the current object is ReferenceCells::Hexahedron, then face_no must be between in the interval $[0,6)$ and the function will always return ReferenceCells::Quadrilateral. For wedges and pyramids, the returned object may be either ReferenceCells::Triangle or ReferenceCells::Quadrilateral, depending on the given index.

    Definition at line 1441 of file reference_cell.h.

    @@ -1412,7 +1412,7 @@
    -

    Return the $d$-dimensional volume of the reference cell that corresponds to the current object, where $d$ is the dimension of the space it lives in. For example, since the quadrilateral reference cell is $[0,1]^2$, its volume is one, whereas the volume of the reference triangle is 0.5 because it occupies the area $\{0 \le x,y \le 1, x+y\le 1\}$.

    +

    Return the $d$-dimensional volume of the reference cell that corresponds to the current object, where $d$ is the dimension of the space it lives in. For example, since the quadrilateral reference cell is $[0,1]^2$, its volume is one, whereas the volume of the reference triangle is 0.5 because it occupies the area $\{0 \le x,y \le 1, x+y\le 1\}$.

    For ReferenceCells::Vertex, the reference cell is a zero-dimensional point in a zero-dimensional space. As a consequence, one cannot meaningfully define a volume for it. The function returns one for this case, because this makes it possible to define useful quadrature rules based on the center of a reference cell and its volume.

    Definition at line 2494 of file reference_cell.h.

    @@ -1441,9 +1441,9 @@

    Return the barycenter (i.e., the center of mass) of the reference cell that corresponds to the current object. The function is not called center() because one can define the center of an object in a number of different ways whereas the barycenter of a reference cell $K$ is unambiguously defined as

    -\[
+<picture><source srcset=\[
   \mathbf x_K = \frac{1}{V} \int_K \mathbf x \; dx
-\] +\]" src="form_1470.png"/>

    where $V$ is the volume of the reference cell (see also the volume() function).

    @@ -1485,7 +1485,7 @@
    -

    Return true if the given point is inside the reference cell of the present space dimension up to some tolerance. This function accepts an additional parameter (which defaults to zero) which specifies by how much the point position may actually be outside the true reference cell. This is useful because in practice we may often not be able to compute the coordinates of a point in reference coordinates exactly, but only up to numerical roundoff. For example, strictly speaking one would expect that for points on the boundary of the reference cell, the function would return true if the tolerance was zero. But in practice, this may or may not actually be true; for example, the point $(1/3, 2/3)$ is on the boundary of the reference triangle because $1/3+2/3 \le 1$, but since neither of its coordinates are exactly representable in floating point arithmetic, the floating point representations of $1/3$ and $2/3$ may or may not add up to anything that is less than or equal to one.

    +

    Return true if the given point is inside the reference cell of the present space dimension up to some tolerance. This function accepts an additional parameter (which defaults to zero) which specifies by how much the point position may actually be outside the true reference cell. This is useful because in practice we may often not be able to compute the coordinates of a point in reference coordinates exactly, but only up to numerical roundoff. For example, strictly speaking one would expect that for points on the boundary of the reference cell, the function would return true if the tolerance was zero. But in practice, this may or may not actually be true; for example, the point $(1/3, 2/3)$ is on the boundary of the reference triangle because $1/3+2/3 \le 1$, but since neither of its coordinates are exactly representable in floating point arithmetic, the floating point representations of $1/3$ and $2/3$ may or may not add up to anything that is less than or equal to one.

    The tolerance parameter may be less than zero, indicating that the point should be safely inside the cell.

    Definition at line 2558 of file reference_cell.h.

    @@ -1547,8 +1547,8 @@
    -

    Return $i$-th unit tangential vector of a face of the reference cell. The vectors are arranged such that the cross product between the two vectors returns the unit normal vector.

    -
    Precondition
    $i$ must be between zero and dim-1.
    +

    Return $i$-th unit tangential vector of a face of the reference cell. The vectors are arranged such that the cross product between the two vectors returns the unit normal vector.

    +
    Precondition
    $i$ must be between zero and dim-1.

    Definition at line 2667 of file reference_cell.h.

    @@ -1943,7 +1943,7 @@
    -

    Given a set of node indices of the form $(i)$ or $(i,j)$ or $(i,j,k)$ (depending on whether the reference cell is in 1d, 2d, or 3d), return the index the VTK format uses for this node for cells that are subdivided as many times in each of the coordinate directions as described by the second argument. For a uniformly subdivided cell, the second argument is an array whose elements will all be equal.

    +

    Given a set of node indices of the form $(i)$ or $(i,j)$ or $(i,j,k)$ (depending on whether the reference cell is in 1d, 2d, or 3d), return the index the VTK format uses for this node for cells that are subdivided as many times in each of the coordinate directions as described by the second argument. For a uniformly subdivided cell, the second argument is an array whose elements will all be equal.

    The last argument, legacy_format, indicates whether to use the old, VTK legacy format (when true) or the new, VTU format (when false).

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classRol_1_1VectorAdaptor.html differs (JavaScript source, ASCII text, with very long lines (688)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classRol_1_1VectorAdaptor.html 2023-11-25 15:25:58.136592448 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classRol_1_1VectorAdaptor.html 2023-11-25 15:25:58.136592448 +0100 @@ -500,7 +500,7 @@
    -

    Return the $L^{2}$ norm of the wrapped vector.

    +

    Return the $L^{2}$ norm of the wrapped vector.

    The returned type is of VectorAdaptor::value_type so as to maintain consistency with ROL::Vector<VectorAdaptor::value_type> and more importantly to not to create an overloaded version namely, VectorAdaptor::real_type norm() const; if real_type and value_type are not of the same type.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSLEPcWrappers_1_1SolverArnoldi.html differs (JavaScript source, ASCII text, with very long lines (772)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSLEPcWrappers_1_1SolverArnoldi.html 2023-11-25 15:25:58.149925508 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSLEPcWrappers_1_1SolverArnoldi.html 2023-11-25 15:25:58.149925508 +0100 @@ -310,9 +310,9 @@
    -

    Same as above, but here a composite method for solving the system $A
-x=\lambda B x$, for real matrices, vectors, and values $A, B, x,
-\lambda$.

    +

    Same as above, but here a composite method for solving the system $A
+x=\lambda B x$, for real matrices, vectors, and values $A, B, x,
+\lambda$.

    Definition at line 735 of file slepc_solver.h.

    @@ -380,9 +380,9 @@
    -

    Same as above, but here a composite method for solving the system $A
-x=\lambda B x$ with real matrices $A, B$ and imaginary eigenpairs $x,
-\lambda$.

    +

    Same as above, but here a composite method for solving the system $A
+x=\lambda B x$ with real matrices $A, B$ and imaginary eigenpairs $x,
+\lambda$.

    Definition at line 773 of file slepc_solver.h.

    @@ -664,8 +664,8 @@
    -

    Access the real parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
-\mathrm{n\_converged}-1$.

    +

    Access the real parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
+\mathrm{n\_converged}-1$.

    Definition at line 260 of file slepc_solver.cc.

    @@ -719,8 +719,8 @@
    -

    Access the real and imaginary parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
-\mathrm{n\_converged}-1$.

    +

    Access the real and imaginary parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
+\mathrm{n\_converged}-1$.

    Definition at line 273 of file slepc_solver.cc.

    @@ -785,8 +785,8 @@
    -

    Same as above, but here initialize solver for the linear system $A
-x=\lambda B x$.

    +

    Same as above, but here initialize solver for the linear system $A
+x=\lambda B x$.

    Definition at line 87 of file slepc_solver.cc.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSLEPcWrappers_1_1SolverBase.html differs (JavaScript source, ASCII text, with very long lines (772)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSLEPcWrappers_1_1SolverBase.html 2023-11-25 15:25:58.166591838 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSLEPcWrappers_1_1SolverBase.html 2023-11-25 15:25:58.166591838 +0100 @@ -321,9 +321,9 @@
    -

    Same as above, but here a composite method for solving the system $A
-x=\lambda B x$, for real matrices, vectors, and values $A, B, x,
-\lambda$.

    +

    Same as above, but here a composite method for solving the system $A
+x=\lambda B x$, for real matrices, vectors, and values $A, B, x,
+\lambda$.

    Definition at line 735 of file slepc_solver.h.

    @@ -383,9 +383,9 @@
    -

    Same as above, but here a composite method for solving the system $A
-x=\lambda B x$ with real matrices $A, B$ and imaginary eigenpairs $x,
-\lambda$.

    +

    Same as above, but here a composite method for solving the system $A
+x=\lambda B x$ with real matrices $A, B$ and imaginary eigenpairs $x,
+\lambda$.

    Definition at line 773 of file slepc_solver.h.

    @@ -611,8 +611,8 @@
    -

    Access the real parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
-\mathrm{n\_converged}-1$.

    +

    Access the real parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
+\mathrm{n\_converged}-1$.

    Definition at line 260 of file slepc_solver.cc.

    @@ -666,8 +666,8 @@
    -

    Access the real and imaginary parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
-\mathrm{n\_converged}-1$.

    +

    Access the real and imaginary parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
+\mathrm{n\_converged}-1$.

    Definition at line 273 of file slepc_solver.cc.

    @@ -732,8 +732,8 @@
    -

    Same as above, but here initialize solver for the linear system $A
-x=\lambda B x$.

    +

    Same as above, but here initialize solver for the linear system $A
+x=\lambda B x$.

    Definition at line 87 of file slepc_solver.cc.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSLEPcWrappers_1_1SolverGeneralizedDavidson.html differs (JavaScript source, ASCII text, with very long lines (772)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSLEPcWrappers_1_1SolverGeneralizedDavidson.html 2023-11-25 15:25:58.179924898 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSLEPcWrappers_1_1SolverGeneralizedDavidson.html 2023-11-25 15:25:58.179924898 +0100 @@ -310,9 +310,9 @@
    -

    Same as above, but here a composite method for solving the system $A
-x=\lambda B x$, for real matrices, vectors, and values $A, B, x,
-\lambda$.

    +

    Same as above, but here a composite method for solving the system $A
+x=\lambda B x$, for real matrices, vectors, and values $A, B, x,
+\lambda$.

    Definition at line 735 of file slepc_solver.h.

    @@ -380,9 +380,9 @@
    -

    Same as above, but here a composite method for solving the system $A
-x=\lambda B x$ with real matrices $A, B$ and imaginary eigenpairs $x,
-\lambda$.

    +

    Same as above, but here a composite method for solving the system $A
+x=\lambda B x$ with real matrices $A, B$ and imaginary eigenpairs $x,
+\lambda$.

    Definition at line 773 of file slepc_solver.h.

    @@ -664,8 +664,8 @@
    -

    Access the real parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
-\mathrm{n\_converged}-1$.

    +

    Access the real parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
+\mathrm{n\_converged}-1$.

    Definition at line 260 of file slepc_solver.cc.

    @@ -719,8 +719,8 @@
    -

    Access the real and imaginary parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
-\mathrm{n\_converged}-1$.

    +

    Access the real and imaginary parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
+\mathrm{n\_converged}-1$.

    Definition at line 273 of file slepc_solver.cc.

    @@ -785,8 +785,8 @@
    -

    Same as above, but here initialize solver for the linear system $A
-x=\lambda B x$.

    +

    Same as above, but here initialize solver for the linear system $A
+x=\lambda B x$.

    Definition at line 87 of file slepc_solver.cc.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSLEPcWrappers_1_1SolverJacobiDavidson.html differs (JavaScript source, ASCII text, with very long lines (772)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSLEPcWrappers_1_1SolverJacobiDavidson.html 2023-11-25 15:25:58.196591227 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSLEPcWrappers_1_1SolverJacobiDavidson.html 2023-11-25 15:25:58.196591227 +0100 @@ -310,9 +310,9 @@
    -

    Same as above, but here a composite method for solving the system $A
-x=\lambda B x$, for real matrices, vectors, and values $A, B, x,
-\lambda$.

    +

    Same as above, but here a composite method for solving the system $A
+x=\lambda B x$, for real matrices, vectors, and values $A, B, x,
+\lambda$.

    Definition at line 735 of file slepc_solver.h.

    @@ -380,9 +380,9 @@
    -

    Same as above, but here a composite method for solving the system $A
-x=\lambda B x$ with real matrices $A, B$ and imaginary eigenpairs $x,
-\lambda$.

    +

    Same as above, but here a composite method for solving the system $A
+x=\lambda B x$ with real matrices $A, B$ and imaginary eigenpairs $x,
+\lambda$.

    Definition at line 773 of file slepc_solver.h.

    @@ -664,8 +664,8 @@
    -

    Access the real parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
-\mathrm{n\_converged}-1$.

    +

    Access the real parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
+\mathrm{n\_converged}-1$.

    Definition at line 260 of file slepc_solver.cc.

    @@ -719,8 +719,8 @@
    -

    Access the real and imaginary parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
-\mathrm{n\_converged}-1$.

    +

    Access the real and imaginary parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
+\mathrm{n\_converged}-1$.

    Definition at line 273 of file slepc_solver.cc.

    @@ -785,8 +785,8 @@
    -

    Same as above, but here initialize solver for the linear system $A
-x=\lambda B x$.

    +

    Same as above, but here initialize solver for the linear system $A
+x=\lambda B x$.

    Definition at line 87 of file slepc_solver.cc.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSLEPcWrappers_1_1SolverKrylovSchur.html differs (JavaScript source, ASCII text, with very long lines (772)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSLEPcWrappers_1_1SolverKrylovSchur.html 2023-11-25 15:25:58.209924287 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSLEPcWrappers_1_1SolverKrylovSchur.html 2023-11-25 15:25:58.213257555 +0100 @@ -310,9 +310,9 @@
    -

    Same as above, but here a composite method for solving the system $A
-x=\lambda B x$, for real matrices, vectors, and values $A, B, x,
-\lambda$.

    +

    Same as above, but here a composite method for solving the system $A
+x=\lambda B x$, for real matrices, vectors, and values $A, B, x,
+\lambda$.

    Definition at line 735 of file slepc_solver.h.

    @@ -380,9 +380,9 @@
    -

    Same as above, but here a composite method for solving the system $A
-x=\lambda B x$ with real matrices $A, B$ and imaginary eigenpairs $x,
-\lambda$.

    +

    Same as above, but here a composite method for solving the system $A
+x=\lambda B x$ with real matrices $A, B$ and imaginary eigenpairs $x,
+\lambda$.

    Definition at line 773 of file slepc_solver.h.

    @@ -664,8 +664,8 @@
    -

    Access the real parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
-\mathrm{n\_converged}-1$.

    +

    Access the real parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
+\mathrm{n\_converged}-1$.

    Definition at line 260 of file slepc_solver.cc.

    @@ -719,8 +719,8 @@
    -

    Access the real and imaginary parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
-\mathrm{n\_converged}-1$.

    +

    Access the real and imaginary parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
+\mathrm{n\_converged}-1$.

    Definition at line 273 of file slepc_solver.cc.

    @@ -785,8 +785,8 @@
    -

    Same as above, but here initialize solver for the linear system $A
-x=\lambda B x$.

    +

    Same as above, but here initialize solver for the linear system $A
+x=\lambda B x$.

    Definition at line 87 of file slepc_solver.cc.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSLEPcWrappers_1_1SolverLAPACK.html differs (JavaScript source, ASCII text, with very long lines (772)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSLEPcWrappers_1_1SolverLAPACK.html 2023-11-25 15:25:58.226590616 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSLEPcWrappers_1_1SolverLAPACK.html 2023-11-25 15:25:58.226590616 +0100 @@ -310,9 +310,9 @@
    -

    Same as above, but here a composite method for solving the system $A
-x=\lambda B x$, for real matrices, vectors, and values $A, B, x,
-\lambda$.

    +

    Same as above, but here a composite method for solving the system $A
+x=\lambda B x$, for real matrices, vectors, and values $A, B, x,
+\lambda$.

    Definition at line 735 of file slepc_solver.h.

    @@ -380,9 +380,9 @@
    -

    Same as above, but here a composite method for solving the system $A
-x=\lambda B x$ with real matrices $A, B$ and imaginary eigenpairs $x,
-\lambda$.

    +

    Same as above, but here a composite method for solving the system $A
+x=\lambda B x$ with real matrices $A, B$ and imaginary eigenpairs $x,
+\lambda$.

    Definition at line 773 of file slepc_solver.h.

    @@ -664,8 +664,8 @@
    -

    Access the real parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
-\mathrm{n\_converged}-1$.

    +

    Access the real parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
+\mathrm{n\_converged}-1$.

    Definition at line 260 of file slepc_solver.cc.

    @@ -719,8 +719,8 @@
    -

    Access the real and imaginary parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
-\mathrm{n\_converged}-1$.

    +

    Access the real and imaginary parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
+\mathrm{n\_converged}-1$.

    Definition at line 273 of file slepc_solver.cc.

    @@ -785,8 +785,8 @@
    -

    Same as above, but here initialize solver for the linear system $A
-x=\lambda B x$.

    +

    Same as above, but here initialize solver for the linear system $A
+x=\lambda B x$.

    Definition at line 87 of file slepc_solver.cc.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSLEPcWrappers_1_1SolverLanczos.html differs (JavaScript source, ASCII text, with very long lines (772)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSLEPcWrappers_1_1SolverLanczos.html 2023-11-25 15:25:58.239923676 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSLEPcWrappers_1_1SolverLanczos.html 2023-11-25 15:25:58.243256944 +0100 @@ -310,9 +310,9 @@
    -

    Same as above, but here a composite method for solving the system $A
-x=\lambda B x$, for real matrices, vectors, and values $A, B, x,
-\lambda$.

    +

    Same as above, but here a composite method for solving the system $A
+x=\lambda B x$, for real matrices, vectors, and values $A, B, x,
+\lambda$.

    Definition at line 735 of file slepc_solver.h.

    @@ -380,9 +380,9 @@
    -

    Same as above, but here a composite method for solving the system $A
-x=\lambda B x$ with real matrices $A, B$ and imaginary eigenpairs $x,
-\lambda$.

    +

    Same as above, but here a composite method for solving the system $A
+x=\lambda B x$ with real matrices $A, B$ and imaginary eigenpairs $x,
+\lambda$.

    Definition at line 773 of file slepc_solver.h.

    @@ -664,8 +664,8 @@
    -

    Access the real parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
-\mathrm{n\_converged}-1$.

    +

    Access the real parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
+\mathrm{n\_converged}-1$.

    Definition at line 260 of file slepc_solver.cc.

    @@ -719,8 +719,8 @@
    -

    Access the real and imaginary parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
-\mathrm{n\_converged}-1$.

    +

    Access the real and imaginary parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
+\mathrm{n\_converged}-1$.

    Definition at line 273 of file slepc_solver.cc.

    @@ -785,8 +785,8 @@
    -

    Same as above, but here initialize solver for the linear system $A
-x=\lambda B x$.

    +

    Same as above, but here initialize solver for the linear system $A
+x=\lambda B x$.

    Definition at line 87 of file slepc_solver.cc.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSLEPcWrappers_1_1SolverPower.html differs (JavaScript source, ASCII text, with very long lines (772)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSLEPcWrappers_1_1SolverPower.html 2023-11-25 15:25:58.256590004 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSLEPcWrappers_1_1SolverPower.html 2023-11-25 15:25:58.256590004 +0100 @@ -310,9 +310,9 @@
    -

    Same as above, but here a composite method for solving the system $A
-x=\lambda B x$, for real matrices, vectors, and values $A, B, x,
-\lambda$.

    +

    Same as above, but here a composite method for solving the system $A
+x=\lambda B x$, for real matrices, vectors, and values $A, B, x,
+\lambda$.

    Definition at line 735 of file slepc_solver.h.

    @@ -380,9 +380,9 @@
    -

    Same as above, but here a composite method for solving the system $A
-x=\lambda B x$ with real matrices $A, B$ and imaginary eigenpairs $x,
-\lambda$.

    +

    Same as above, but here a composite method for solving the system $A
+x=\lambda B x$ with real matrices $A, B$ and imaginary eigenpairs $x,
+\lambda$.

    Definition at line 773 of file slepc_solver.h.

    @@ -664,8 +664,8 @@
    -

    Access the real parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
-\mathrm{n\_converged}-1$.

    +

    Access the real parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
+\mathrm{n\_converged}-1$.

    Definition at line 260 of file slepc_solver.cc.

    @@ -719,8 +719,8 @@
    -

    Access the real and imaginary parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
-\mathrm{n\_converged}-1$.

    +

    Access the real and imaginary parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
+\mathrm{n\_converged}-1$.

    Definition at line 273 of file slepc_solver.cc.

    @@ -785,8 +785,8 @@
    -

    Same as above, but here initialize solver for the linear system $A
-x=\lambda B x$.

    +

    Same as above, but here initialize solver for the linear system $A
+x=\lambda B x$.

    Definition at line 87 of file slepc_solver.cc.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSUNDIALS_1_1ARKode.html differs (JavaScript source, ASCII text, with very long lines (1421)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSUNDIALS_1_1ARKode.html 2023-11-25 15:25:58.279922865 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSUNDIALS_1_1ARKode.html 2023-11-25 15:25:58.279922865 +0100 @@ -192,85 +192,85 @@

    The class ARKode is a wrapper to SUNDIALS variable-step, embedded, additive Runge-Kutta solver which is a general purpose solver for systems of ordinary differential equations characterized by the presence of both fast and slow dynamics.

    Fast dynamics are treated implicitly, and slow dynamics are treated explicitly, using nested families of implicit and explicit Runge-Kutta solvers.

    Citing directly from ARKode documentation:

    -

    ARKode solves ODE initial value problems (IVPs) in $R^N$. These problems should be posed in explicit form as

    +

    ARKode solves ODE initial value problems (IVPs) in $R^N$. These problems should be posed in explicit form as

    -\[
+<picture><source srcset=\[
     M\dot y = f_E(t, y) + f_I (t, y), \qquad y(t_0) = y_0.
-  \] + \]" src="form_2555.png"/>

    -

    Here, $t$ is the independent variable (e.g. time), and the dependent variables are given by $y \in R^N$, and we use notation $\dot y$ to denote $dy/dt$. $M$ is a user-supplied nonsingular operator from $R^N \to R^N$. This operator may depend on $t$ but not on $y$.

    -

    For standard systems of ordinary differential equations and for problems arising from the spatial semi-discretization of partial differential equations using finite difference or finite volume methods, $M$ is typically the identity matrix, $I$. For PDEs using a finite-element spatial semi-discretization $M$ is typically a well-conditioned mass matrix.

    +

    Here, $t$ is the independent variable (e.g. time), and the dependent variables are given by $y \in R^N$, and we use notation $\dot y$ to denote $dy/dt$. $M$ is a user-supplied nonsingular operator from $R^N \to R^N$. This operator may depend on $t$ but not on $y$.

    +

    For standard systems of ordinary differential equations and for problems arising from the spatial semi-discretization of partial differential equations using finite difference or finite volume methods, $M$ is typically the identity matrix, $I$. For PDEs using a finite-element spatial semi-discretization $M$ is typically a well-conditioned mass matrix.

    The two right-hand side functions may be described as:

    -

    ARKode may be used to solve stiff, nonstiff and multi-rate problems. Roughly speaking, stiffness is characterized by the presence of at least one rapidly damped mode, whose time constant is small compared to the time scale of the solution itself. In the implicit/explicit (ImEx) splitting above, these stiff components should be included in the right-hand side function $f_I (t, y)$.

    -

    For multi-rate problems, a user should provide both of the functions $f_E$ and $f_I$ that define the IVP system.

    -

    For nonstiff problems, only $f_E$ should be provided, and $f_I$ is assumed to be zero, i.e. the system reduces to the non-split IVP:

    +

    ARKode may be used to solve stiff, nonstiff and multi-rate problems. Roughly speaking, stiffness is characterized by the presence of at least one rapidly damped mode, whose time constant is small compared to the time scale of the solution itself. In the implicit/explicit (ImEx) splitting above, these stiff components should be included in the right-hand side function $f_I (t, y)$.

    +

    For multi-rate problems, a user should provide both of the functions $f_E$ and $f_I$ that define the IVP system.

    +

    For nonstiff problems, only $f_E$ should be provided, and $f_I$ is assumed to be zero, i.e. the system reduces to the non-split IVP:

    -\[
+<picture><source srcset=\[
     M\dot y = f_E(t, y), \qquad y(t_0) = y_0.
-  \] + \]" src="form_2565.png"/>

    -

    In this scenario, the ARK methods reduce to classical explicit Runge-Kutta methods (ERK). For these classes of methods, ARKode allows orders of accuracy $q = \{2, 3, 4, 5, 6, 8\}$, with embeddings of orders $p = \{1,
-  2, 3, 4, 5, 7\}$. These default to the Heun-Euler-2-1-2, Bogacki-Shampine-4-2-3, Zonneveld-5-3-4, Cash-Karp-6-4-5, Verner-8-5-6 and Fehlberg-13-7-8 methods, respectively.

    -

    Finally, for stiff (linear or nonlinear) problems the user may provide only $f_I$, implying that $f_E = 0$, so that the system reduces to the non-split IVP

    +

    In this scenario, the ARK methods reduce to classical explicit Runge-Kutta methods (ERK). For these classes of methods, ARKode allows orders of accuracy $q = \{2, 3, 4, 5, 6, 8\}$, with embeddings of orders $p = \{1,
+  2, 3, 4, 5, 7\}$. These default to the Heun-Euler-2-1-2, Bogacki-Shampine-4-2-3, Zonneveld-5-3-4, Cash-Karp-6-4-5, Verner-8-5-6 and Fehlberg-13-7-8 methods, respectively.

    +

    Finally, for stiff (linear or nonlinear) problems the user may provide only $f_I$, implying that $f_E = 0$, so that the system reduces to the non-split IVP

    -\[
+<picture><source srcset=\[
     M\dot y = f_I(t, y), \qquad y(t_0) = y_0.
-  \] + \]" src="form_2569.png"/>

    -

    Similarly to ERK methods, in this scenario the ARK methods reduce to classical diagonally-implicit Runge-Kutta methods (DIRK). For these classes of methods, ARKode allows orders of accuracy $q = \{2, 3, 4, 5\}$, with embeddings of orders $p = \{1, 2, 3, 4\}$. These default to the SDIRK-2-1-2, ARK-4-2-3 (implicit), SDIRK-5-3-4 and ARK-8-4-5 (implicit) methods, respectively.

    +

    Similarly to ERK methods, in this scenario the ARK methods reduce to classical diagonally-implicit Runge-Kutta methods (DIRK). For these classes of methods, ARKode allows orders of accuracy $q = \{2, 3, 4, 5\}$, with embeddings of orders $p = \{1, 2, 3, 4\}$. These default to the SDIRK-2-1-2, ARK-4-2-3 (implicit), SDIRK-5-3-4 and ARK-8-4-5 (implicit) methods, respectively.

    For both DIRK and ARK methods, an implicit system of the form

    -\[
+<picture><source srcset=\[
    G(z_i) \dealcoloneq M z_i - h_n A^I_{i,i} f_I (t^I_{n,i}, z_i) - a_i = 0
-  \] + \]" src="form_2572.png"/>

    -

    must be solved for each stage $z_i , i = 1, \ldots, s$, where we have the data

    -\[
+<p> must be solved for each stage <picture><source srcset=$z_i , i = 1, \ldots, s$, where we have the data

    +\[
    a_i \dealcoloneq
    M y_{n-1} + h_n \sum_{j=1}^{i-1} [ A^E_{i,j} f_E(t^E_{n,j}, z_j)
    + A^I_{i,j} f_I (t^I_{n,j}, z_j)]
-  \] + \]" src="form_2574.png"/>

    for the ARK methods, or

    -\[
+<picture><source srcset=\[
    a_i \dealcoloneq
    M y_{n-1} + h_n \sum_{j=1}^{i-1} A^I_{i,j} f_I (t^I_{n,j}, z_j)
-  \] + \]" src="form_2575.png"/>

    -

    for the DIRK methods. Here $A^I_{i,j}$ and $A^E_{i,j}$ are the Butcher's tables for the chosen solver.

    -

    If $f_I(t,y)$ depends nonlinearly on $y$ then the systems above correspond to a nonlinear system of equations; if $f_I (t, y)$ depends linearly on $y$ then this is a linear system of equations. By specifying the flag implicit_function_is_linear, ARKode takes some shortcuts that allow a faster solution process.

    +

    for the DIRK methods. Here $A^I_{i,j}$ and $A^E_{i,j}$ are the Butcher's tables for the chosen solver.

    +

    If $f_I(t,y)$ depends nonlinearly on $y$ then the systems above correspond to a nonlinear system of equations; if $f_I (t, y)$ depends linearly on $y$ then this is a linear system of equations. By specifying the flag implicit_function_is_linear, ARKode takes some shortcuts that allow a faster solution process.

    For systems of either type, ARKode allows a choice of solution strategy. The default solver choice is a variant of Newton's method,

    -\[
+<picture><source srcset=\[
    z_i^{m+1} = z_i^m +\delta^{m+1},
-  \] + \]" src="form_2579.png"/>

    -

    where $m$ is the Newton step index, and the Newton update $\delta^{m+1}$ requires the solution of the linear Newton system

    -\[
+<p> where <picture><source srcset=$m$ is the Newton step index, and the Newton update $\delta^{m+1}$ requires the solution of the linear Newton system

    +\[
    N(z_i^m) \delta^{m+1} = -G(z_i^m),
-  \] + \]" src="form_2581.png"/>

    where

    -\[
+<picture><source srcset=\[
   N \dealcoloneq M - \gamma J, \quad J
   \dealcoloneq \frac{\partial f_I}{\partial y},
   \qquad \gamma\dealcoloneq h_n A^I_{i,i}.
-  \] + \]" src="form_2582.png"/>

    -

    As an alternate to Newton's method, ARKode may solve for each stage $z_i ,i
-  = 1, \ldots , s$ using an Anderson-accelerated fixed point iteration

    -\[
+<p>As an alternate to Newton's method, <a class=ARKode may solve for each stage $z_i ,i
+  = 1, \ldots , s$ using an Anderson-accelerated fixed point iteration

    +\[
   z_i^{m+1} = g(z_i^{m}), m=0,1,\ldots.
-  \] + \]" src="form_2584.png"/>

    Unlike with Newton's method, this option does not require the solution of a linear system at each iteration, instead opting for solution of a low-dimensional least-squares solution to construct the nonlinear update.

    -

    Finally, if the user specifies implicit_function_is_linear, i.e., $f_I(t, y)$ depends linearly on $y$, and if the Newton-based nonlinear solver is chosen, then the system will be solved using only a single Newton iteration. Notice that in order for the Newton solver to be used, then jacobian_times_vector() should be supplied. If it is not supplied then only the fixed-point iteration will be supported, and the implicit_function_is_linear setting is ignored.

    -

    The optimal solver (Newton vs fixed-point) is highly problem-dependent. Since fixed-point solvers do not require the solution of any linear systems, each iteration may be significantly less costly than their Newton counterparts. However, this can come at the cost of slower convergence (or even divergence) in comparison with Newton-like methods. These fixed-point solvers do allow for user specification of the Anderson-accelerated subspace size, $m_k$. While the required amount of solver memory grows proportionately to $m_k N$, larger values of $m_k$ may result in faster convergence.

    -

    This improvement may be significant even for "small" values, e.g. $1 \leq
-  m_k \leq 5$, and convergence may not improve (or even deteriorate) for larger values of $m_k$. While ARKode uses a Newton-based iteration as its default solver due to its increased robustness on very stiff problems, it is highly recommended that users also consider the fixed-point solver for their cases when attempting a new problem.

    -

    For either the Newton or fixed-point solvers, it is well-known that both the efficiency and robustness of the algorithm intimately depends on the choice of a good initial guess. In ARKode, the initial guess for either nonlinear solution method is a predicted value $z_i(0)$ that is computed explicitly from the previously-computed data (e.g. $y_{n-2}, y_{n-1}$, and $z_j$ where $j < i$). Additional information on the specific predictor algorithms implemented in ARKode is provided in ARKode documentation.

    +

    Finally, if the user specifies implicit_function_is_linear, i.e., $f_I(t, y)$ depends linearly on $y$, and if the Newton-based nonlinear solver is chosen, then the system will be solved using only a single Newton iteration. Notice that in order for the Newton solver to be used, then jacobian_times_vector() should be supplied. If it is not supplied then only the fixed-point iteration will be supported, and the implicit_function_is_linear setting is ignored.

    +

    The optimal solver (Newton vs fixed-point) is highly problem-dependent. Since fixed-point solvers do not require the solution of any linear systems, each iteration may be significantly less costly than their Newton counterparts. However, this can come at the cost of slower convergence (or even divergence) in comparison with Newton-like methods. These fixed-point solvers do allow for user specification of the Anderson-accelerated subspace size, $m_k$. While the required amount of solver memory grows proportionately to $m_k N$, larger values of $m_k$ may result in faster convergence.

    +

    This improvement may be significant even for "small" values, e.g. $1 \leq
+  m_k \leq 5$, and convergence may not improve (or even deteriorate) for larger values of $m_k$. While ARKode uses a Newton-based iteration as its default solver due to its increased robustness on very stiff problems, it is highly recommended that users also consider the fixed-point solver for their cases when attempting a new problem.

    +

    For either the Newton or fixed-point solvers, it is well-known that both the efficiency and robustness of the algorithm intimately depends on the choice of a good initial guess. In ARKode, the initial guess for either nonlinear solution method is a predicted value $z_i(0)$ that is computed explicitly from the previously-computed data (e.g. $y_{n-2}, y_{n-1}$, and $z_j$ where $j < i$). Additional information on the specific predictor algorithms implemented in ARKode is provided in ARKode documentation.

    The user has to provide the implementation of at least one (or both) of the following std::functions:

    To provide a simple example, consider the harmonic oscillator problem:

    -\[
+<picture><source srcset=\[
   \begin{split}
     u'' & = -k^2 u \\
     u (0) & = 0 \\
     u'(0) & = k
   \end{split}
-  \] + \]" src="form_2592.png"/>

    We write it in terms of a first order ode:

    -\[
+<picture><source srcset=\[
   \begin{matrix}
     y_0' & =  y_1 \\
     y_1' & = - k^2 y_0
   \end{matrix}
-  \] + \]" src="form_2593.png"/>

    -

    That is $y' = A y$ where

    -\[
+<p>That is <picture><source srcset=$y' = A y$ where

    +\[
   A \dealcoloneq
   \begin{pmatrix}
   0 & 1 \\
   -k^2 &0
   \end{pmatrix}
-  \] + \]" src="form_2595.png"/>

    -

    and $y(0)=(0, k)^T$.

    -

    The exact solution is $y_0(t) = \sin(k t)$, $y_1(t) = y_0'(t) = k \cos(k
-*t)$, $y_1'(t) = -k^2 \sin(k t)$.

    +

    and $y(0)=(0, k)^T$.

    +

    The exact solution is $y_0(t) = \sin(k t)$, $y_1(t) = y_0'(t) = k \cos(k
+*t)$, $y_1'(t) = -k^2 \sin(k t)$.

    A minimal implementation, using only explicit RK methods, is given by the following code snippet:

    using VectorType = Vector<double>;
    @@ -785,8 +785,8 @@
    -

    A function object that users may supply and that is intended to compute the explicit part of the IVP right hand side. Sets $explicit_f = f_E(t,
-y)$.

    +

    A function object that users may supply and that is intended to compute the explicit part of the IVP right hand side. Sets $explicit_f = f_E(t,
+y)$.

    At least one of explicit_function() or implicit_function() must be provided. According to which one is provided, explicit, implicit, or mixed RK methods are used.

    Note
    This variable represents a user provided callback. See there for a description of how to deal with errors and other requirements and conventions. In particular, ARKode can deal with "recoverable" errors in some circumstances, so callbacks can throw exceptions of type RecoverableUserCallbackError.
    @@ -807,8 +807,8 @@
    -

    A function object that users may supply and that is intended to compute the implicit part of the IVP right hand side. Sets $implicit_f = f_I(t,
-y)$.

    +

    A function object that users may supply and that is intended to compute the implicit part of the IVP right hand side. Sets $implicit_f = f_I(t,
+y)$.

    At least one of explicit_function() or implicit_function() must be provided. According to which one is provided, explicit, implicit, or mixed RK methods are used.

    Note
    This variable represents a user provided callback. See there for a description of how to deal with errors and other requirements and conventions. In particular, ARKode can deal with "recoverable" errors in some circumstances, so callbacks can throw exceptions of type RecoverableUserCallbackError.
    @@ -830,7 +830,7 @@ /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSUNDIALS_1_1IDA.html differs (JavaScript source, ASCII text, with very long lines (1392)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSUNDIALS_1_1IDA.html 2023-11-25 15:25:58.296589190 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSUNDIALS_1_1IDA.html 2023-11-25 15:25:58.296589190 +0100 @@ -183,69 +183,69 @@

    Citing from the SUNDIALS documentation:

    Consider a system of Differential-Algebraic Equations written in the general form

    -\[
+<picture><source srcset=\[
    \begin{cases}
        F(t,y,\dot y) = 0\, , \\
        y(t_0) = y_0\, , \\
        \dot y (t_0) = \dot y_0\, .
    \end{cases}
- \] + \]" src="form_2611.png"/>

    -

    where $y,\dot y$ are vectors in $\mathbb{R}^n$, $t$ is often the time (but can also be a parametric quantity), and $F:\mathbb{R}\times\mathbb{R}^n\times \mathbb{R}^n\rightarrow\mathbb{R}^n$. Such problem is solved using Newton iteration augmented with a line search global strategy. The integration method used in IDA is the variable-order, variable-coefficient BDF (Backward Differentiation Formula), in fixed-leading-coefficient. The method order ranges from 1 to 5, with the BDF of order $q$ given by the multistep formula

    +

    where $y,\dot y$ are vectors in $\mathbb{R}^n$, $t$ is often the time (but can also be a parametric quantity), and $F:\mathbb{R}\times\mathbb{R}^n\times \mathbb{R}^n\rightarrow\mathbb{R}^n$. Such problem is solved using Newton iteration augmented with a line search global strategy. The integration method used in IDA is the variable-order, variable-coefficient BDF (Backward Differentiation Formula), in fixed-leading-coefficient. The method order ranges from 1 to 5, with the BDF of order $q$ given by the multistep formula

    -\[
+<picture><source srcset=\[
    \sum_{i=0}^q \alpha_{n,i}\,y_{n-i}=h_n\,\dot y_n\, ,
    \label{eq:bdf}
- \] + \]" src="form_2615.png"/>

    -

    where $y_n$ and $\dot y_n$ are the computed approximations of $y(t_n)$ and $\dot y(t_n)$, respectively, and the step size is $h_n=t_n-t_{n-1}$. The coefficients $\alpha_{n,i}$ are uniquely determined by the order $q$, and the history of the step sizes. The application of the BDF method to the DAE system results in a nonlinear algebraic system to be solved at each time step:

    +

    where $y_n$ and $\dot y_n$ are the computed approximations of $y(t_n)$ and $\dot y(t_n)$, respectively, and the step size is $h_n=t_n-t_{n-1}$. The coefficients $\alpha_{n,i}$ are uniquely determined by the order $q$, and the history of the step sizes. The application of the BDF method to the DAE system results in a nonlinear algebraic system to be solved at each time step:

    -\[
+<picture><source srcset=\[
    G(y_n)\equiv F\left(t_n,y_n,\dfrac{1}{h_n}\sum_{i=0}^q
   \alpha_{n,i}\,y_{n-i}\right)=0\, .
- \] + \]" src="form_2622.png"/>

    The Newton method leads to a linear system of the form

    -\[
+<picture><source srcset=\[
    J[y_{n(m+1)}-y_{n(m)}]=-G(y_{n(m)})\, ,
- \] + \]" src="form_2623.png"/>

    -

    where $y_{n(m)}$ is the $m$-th approximation to $y_n$, and $J$ is the approximation of the system Jacobian

    +

    where $y_{n(m)}$ is the $m$-th approximation to $y_n$, and $J$ is the approximation of the system Jacobian

    -\[
+<picture><source srcset=\[
    J=\dfrac{\partial G}{\partial y} = \dfrac{\partial F}{\partial y} +
   \alpha \dfrac{\partial F}{\partial \dot y}\, ,
- \] + \]" src="form_2625.png"/>

    -

    and $\alpha = \alpha_{n,0}/h_n$. It is worth mentioning that the scalar $\alpha$ changes whenever the step size or method order changes.

    +

    and $\alpha = \alpha_{n,0}/h_n$. It is worth mentioning that the scalar $\alpha$ changes whenever the step size or method order changes.

    To provide a simple example, consider the following harmonic oscillator problem:

    -\[ \begin{split}
+<picture><source srcset=\[ \begin{split}
    u'' & = -k^2 u \\
    u (0) & = 0 \\
    u'(0) & = k
  \end{split}
- \] + \]" src="form_2627.png"/>

    We write it in terms of a first order ode:

    -\[
+<picture><source srcset=\[
  \begin{matrix}
    y_0' & -y_1      & = 0 \\
    y_1' & + k^2 y_0 & = 0
  \end{matrix}
- \] + \]" src="form_2628.png"/>

    -

    That is $F(y', y, t) = y' + A y = 0 $ where A =

    -\[
+<p>That is <picture><source srcset=$F(y', y, t) = y' + A y = 0 $ where A =

    +\[
  \begin{pmatrix}
  0 & -1 \\
  k^2 &0
  \end{pmatrix}
- \] + \]" src="form_2630.png"/>

    -

    and $y(0)=(0, k)$, $y'(0) = (k, 0)$.

    -

    The exact solution is $y_0(t) = \sin(k t)$, $y_1(t) = y_0'(t)
- = k \cos(k t)$, $y_1'(t) = -k^2 \sin(k t)$.

    -

    The Jacobian to assemble is the following: $J = \alpha I + A$.

    +

    and $y(0)=(0, k)$, $y'(0) = (k, 0)$.

    +

    The exact solution is $y_0(t) = \sin(k t)$, $y_1(t) = y_0'(t)
+ = k \cos(k t)$, $y_1'(t) = -k^2 \sin(k t)$.

    +

    The Jacobian to assemble is the following: $J = \alpha I + A$.

    This is achieved by the following snippet of code:

    using VectorType = Vector<double>;
    VectorType y(2);
    @@ -332,8 +332,8 @@
  • use_y_diff: compute the algebraic components of y and differential components of y_dot, given the differential components of y. This option requires that the user specifies differential and algebraic components in the function get_differential_components.
  • use_y_dot: compute all components of y, given y_dot.
  • -

    By default, this class assumes that all components are differential, and that you want to solve a standard ode. In this case, the initial component type is set to use_y_diff, so that the y_dot at time t=initial_time is computed by solving the nonlinear problem $F(y_dot,
-y(t0), t0) = 0$ in the variable y_dot.

    +

    By default, this class assumes that all components are differential, and that you want to solve a standard ode. In this case, the initial component type is set to use_y_diff, so that the y_dot at time t=initial_time is computed by solving the nonlinear problem $F(y_dot,
+y(t0), t0) = 0$ in the variable y_dot.

    Notice that a Newton solver is used for this computation. The Newton solver parameters can be tweaked by acting on ic_alpha and ic_max_iter.

    If you reset the solver at some point, you may want to select a different computation for the initial conditions after reset. Say, for example, that you have refined a grid, and after transferring the solution to the new grid, the initial conditions are no longer consistent. Then you can choose how these are made consistent, using the same three options that you used for the initial conditions in reset_type.

    Parameters
    @@ -631,7 +631,7 @@
    -

    Compute residual. Return $F(t, y, \dot y)$.

    +

    Compute residual. Return $F(t, y, \dot y)$.

    Note
    This variable represents a user provided callback. See there for a description of how to deal with errors and other requirements and conventions. In particular, IDA can deal with "recoverable" errors in some circumstances, so callbacks can throw exceptions of type RecoverableUserCallbackError.

    Definition at line 662 of file ida.h.

    @@ -653,13 +653,13 @@

    Compute Jacobian. This function is called by IDA any time a Jacobian update is required. The user should compute the Jacobian (or update all the variables that allow the application of the Jacobian). This function is called by IDA once, before any call to solve_jacobian_system() or solve_with_jacobian().

    The Jacobian $J$ should be a (possibly inexact) computation of

    -\[
+<picture><source srcset=\[
   J=\dfrac{\partial G}{\partial y} = \dfrac{\partial F}{\partial y} +
  \alpha \dfrac{\partial F}{\partial \dot y}.
-\] +\]" src="form_2636.png"/>

    If the user uses a matrix based computation of the Jacobian, then this is the right place where an assembly routine should be called to assemble both a matrix and a preconditioner for the Jacobian system. Subsequent calls (possibly more than one) to solve_jacobian_system() or solve_with_jacobian() can assume that this function has been called at least once.

    -

    Notice that no assumption is made by this interface on what the user should do in this function. IDA only assumes that after a call to setup_jacobian() it is possible to call solve_jacobian_system() or solve_with_jacobian() to obtain a solution $x$ to the system $J x = b$.

    +

    Notice that no assumption is made by this interface on what the user should do in this function. IDA only assumes that after a call to setup_jacobian() it is possible to call solve_jacobian_system() or solve_with_jacobian() to obtain a solution $x$ to the system $J x = b$.

    Note
    This variable represents a user provided callback. See there for a description of how to deal with errors and other requirements and conventions. In particular, IDA can deal with "recoverable" errors in some circumstances, so callbacks can throw exceptions of type RecoverableUserCallbackError.

    Definition at line 701 of file ida.h.

    @@ -681,12 +681,12 @@

    Solve the Jacobian linear system. This function will be called by IDA (possibly several times) after setup_jacobian() has been called at least once. IDA tries to do its best to call setup_jacobian() the minimum amount of times. If convergence can be achieved without updating the Jacobian, then IDA does not call setup_jacobian() again. If, on the contrary, internal IDA convergence tests fail, then IDA calls again setup_jacobian() with updated vectors and coefficients so that successive calls to solve_jacobian_systems() lead to better convergence in the Newton process.

    The jacobian $J$ should be (an approximation of) the system Jacobian

    -\[
+<picture><source srcset=\[
   J=\dfrac{\partial G}{\partial y} = \dfrac{\partial F}{\partial y} +
  \alpha \dfrac{\partial F}{\partial \dot y}.
-\] +\]" src="form_2636.png"/>

    -

    A call to this function should store in dst the result of $J^{-1}$ applied to src, i.e., J*dst = src. It is the users responsibility to set up proper solvers and preconditioners inside this function.

    +

    A call to this function should store in dst the result of $J^{-1}$ applied to src, i.e., J*dst = src. It is the users responsibility to set up proper solvers and preconditioners inside this function.

    Note
    This variable represents a user provided callback. See there for a description of how to deal with errors and other requirements and conventions. In particular, IDA can deal with "recoverable" errors in some circumstances, so callbacks can throw exceptions of type RecoverableUserCallbackError.
    Deprecated:
    Use solve_with_jacobian() instead which also uses a numerical tolerance.
    @@ -709,21 +709,21 @@

    Solve the Jacobian linear system up to a specified tolerance. This function will be called by IDA (possibly several times) after setup_jacobian() has been called at least once. IDA tries to do its best to call setup_jacobian() the minimum number of times. If convergence can be achieved without updating the Jacobian, then IDA does not call setup_jacobian() again. If, on the contrary, internal IDA convergence tests fail, then IDA calls again setup_jacobian() with updated vectors and coefficients so that successive calls to solve_with_jacobian() lead to better convergence in the Newton process.

    The Jacobian $J$ should be (an approximation of) the system Jacobian

    -\[
+<picture><source srcset=\[
   J=\dfrac{\partial G}{\partial y} = \dfrac{\partial F}{\partial y} +
  \alpha \dfrac{\partial F}{\partial \dot y}.
-\] +\]" src="form_2636.png"/>

    Arguments to the function are:

    Parameters
    - +
    [in]rhsThe system right hand side to solve for.
    [out]dstThe solution of $J^{-1} * src$.
    [out]dstThe solution of $J^{-1} * src$.
    [in]toleranceThe tolerance with which to solve the linear system of equations.
    -

    A call to this function should store in dst the result of $J^{-1}$ applied to src, i.e., the solution of the linear system J*dst = src. It is the user's responsibility to set up proper solvers and preconditioners either inside this function, or already within the setup_jacobian() function. (The latter is, for example, what the step-77 program does: All expensive operations happen in setup_jacobian(), given that that function is called far less often than the current one.)

    +

    A call to this function should store in dst the result of $J^{-1}$ applied to src, i.e., the solution of the linear system J*dst = src. It is the user's responsibility to set up proper solvers and preconditioners either inside this function, or already within the setup_jacobian() function. (The latter is, for example, what the step-77 program does: All expensive operations happen in setup_jacobian(), given that that function is called far less often than the current one.)

    Note
    This variable represents a user provided callback. See there for a description of how to deal with errors and other requirements and conventions. In particular, IDA can deal with "recoverable" errors in some circumstances, so callbacks can throw exceptions of type RecoverableUserCallbackError.

    Definition at line 781 of file ida.h.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSUNDIALS_1_1IDA_1_1AdditionalData.html differs (JavaScript source, ASCII text, with very long lines (2466)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSUNDIALS_1_1IDA_1_1AdditionalData.html 2023-11-25 15:25:58.309922254 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSUNDIALS_1_1IDA_1_1AdditionalData.html 2023-11-25 15:25:58.313255519 +0100 @@ -567,8 +567,8 @@

    Type of correction for initial conditions.

    -

    If you do not provide consistent initial conditions, (i.e., conditions for which $F(y_dot(0), y(0), 0) = 0$), you can ask SUNDIALS to compute initial conditions for you by using the ic_type parameter at construction time.

    -

    Notice that you could in principle use this capabilities to solve for steady state problems by setting y_dot to zero, and asking to compute $y(0)$ that satisfies $F(0, y(0), 0) = 0$, however the nonlinear solver used inside IDA may not be robust enough for complex problems with several millions unknowns.

    +

    If you do not provide consistent initial conditions, (i.e., conditions for which $F(y_dot(0), y(0), 0) = 0$), you can ask SUNDIALS to compute initial conditions for you by using the ic_type parameter at construction time.

    +

    Notice that you could in principle use this capabilities to solve for steady state problems by setting y_dot to zero, and asking to compute $y(0)$ that satisfies $F(0, y(0), 0) = 0$, however the nonlinear solver used inside IDA may not be robust enough for complex problems with several millions unknowns.

    Definition at line 523 of file ida.h.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSUNDIALS_1_1KINSOL.html differs (JavaScript source, ASCII text, with very long lines (2207)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSUNDIALS_1_1KINSOL.html 2023-11-25 15:25:58.326588579 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSUNDIALS_1_1KINSOL.html 2023-11-25 15:25:58.326588579 +0100 @@ -174,48 +174,48 @@

    Detailed Description

    template<typename VectorType = Vector<double>>
    class SUNDIALS::KINSOL< VectorType >

    Interface to SUNDIALS' nonlinear solver (KINSOL).

    -

    KINSOL is a solver for nonlinear algebraic systems in residual form $F(u)
-= 0$ or fixed point form $G(u) = u$, where $u$ is a vector which we will assume to be in ${\mathbb R}^n$ or ${\mathbb C}^n$, but that may also have a block structure and may be distributed in parallel computations; the functions $F$ and $G$ satisfy $F,G:{\mathbb R}^N \to{\mathbb R}^N$ or $F,G:{\mathbb C}^N \to{\mathbb C}^N$. It includes a Newton-Krylov solver as well as Picard and fixed point solvers, both of which can be accelerated with Anderson acceleration. KINSOL is based on the previous Fortran package NKSOL of Brown and Saad. An example of using KINSOL can be found in the step-77 tutorial program.

    +

    KINSOL is a solver for nonlinear algebraic systems in residual form $F(u)
+= 0$ or fixed point form $G(u) = u$, where $u$ is a vector which we will assume to be in ${\mathbb R}^n$ or ${\mathbb C}^n$, but that may also have a block structure and may be distributed in parallel computations; the functions $F$ and $G$ satisfy $F,G:{\mathbb R}^N \to{\mathbb R}^N$ or $F,G:{\mathbb C}^N \to{\mathbb C}^N$. It includes a Newton-Krylov solver as well as Picard and fixed point solvers, both of which can be accelerated with Anderson acceleration. KINSOL is based on the previous Fortran package NKSOL of Brown and Saad. An example of using KINSOL can be found in the step-77 tutorial program.

    KINSOL's Newton solver employs the inexact Newton method. As this solver is intended mainly for large systems, the user is required to provide their own solver function.

    At the highest level, KINSOL implements the following iteration scheme:

      -
    • set $u_0$ = an initial guess
    • -
    • For $n = 0, 1, 2, \ldots$ until convergence do:
        -
      • Solve $J(u_n)\delta_n = -F(u_n)$
      • -
      • Set $u_{n+1} = u_n + \lambda \delta_n, 0 < \lambda \leq 1$
      • +
      • set $u_0$ = an initial guess
      • +
      • For $n = 0, 1, 2, \ldots$ until convergence do:
          +
        • Solve $J(u_n)\delta_n = -F(u_n)$
        • +
        • Set $u_{n+1} = u_n + \lambda \delta_n, 0 < \lambda \leq 1$
        • Test for convergence
      -

      Here, $u_n$ is the $n$-th iterate to $u$, and $J(u) = \nabla_u F(u)$ is the system Jacobian. At each stage in the iteration process, a scalar multiple of the step $\delta_n$, is added to $u_n$ to produce a new iterate, $u_{n+1}$. A test for convergence is made before the iteration continues.

      +

      Here, $u_n$ is the $n$-th iterate to $u$, and $J(u) = \nabla_u F(u)$ is the system Jacobian. At each stage in the iteration process, a scalar multiple of the step $\delta_n$, is added to $u_n$ to produce a new iterate, $u_{n+1}$. A test for convergence is made before the iteration continues.

      Unless specified otherwise by the user, KINSOL strives to update Jacobian information as infrequently as possible to balance the high costs of matrix operations against other costs. Specifically, these updates occur when:

      • the problem is initialized,
      • -
      • $\|\lambda \delta_{n-1} \|_{D_u,\infty} \geq 1.5$ (inexact Newton only, see below for a definition of $\| \cdot \|_{D_u,\infty}$)
      • +
      • $\|\lambda \delta_{n-1} \|_{D_u,\infty} \geq 1.5$ (inexact Newton only, see below for a definition of $\| \cdot \|_{D_u,\infty}$)
      • a specified number of nonlinear iterations have passed since the last update,
      • the linear solver failed recoverably with outdated Jacobian information,
      • the global strategy failed with outdated Jacobian information, or
      • -
      • $\|\lambda \delta_{n} \|_{D_u,\infty} \leq $ tolerance with outdated Jacobian information.
      • +
      • $\|\lambda \delta_{n} \|_{D_u,\infty} \leq $ tolerance with outdated Jacobian information.

      KINSOL allows changes to the above strategy through optional solver inputs. The user can disable the initial Jacobian information evaluation or change the default value of the number of nonlinear iterations after which a Jacobian information update is enforced.

      -

      To address the case of ill-conditioned nonlinear systems, KINSOL allows prescribing scaling factors both for the solution vector and for the residual vector. For scaling to be used, the user may supply the function get_solution_scaling(), that returns values $D_u$, which are diagonal elements of the scaling matrix such that $D_u u_n$ has all components roughly the same magnitude when $u_n$ is close to a solution, and get_function_scaling(), that supply values $D_F$, which are diagonal scaling matrix elements such that $D_F F$ has all components roughly the same magnitude when $u_n$ is not too close to a solution.

      +

      To address the case of ill-conditioned nonlinear systems, KINSOL allows prescribing scaling factors both for the solution vector and for the residual vector. For scaling to be used, the user may supply the function get_solution_scaling(), that returns values $D_u$, which are diagonal elements of the scaling matrix such that $D_u u_n$ has all components roughly the same magnitude when $u_n$ is close to a solution, and get_function_scaling(), that supply values $D_F$, which are diagonal scaling matrix elements such that $D_F F$ has all components roughly the same magnitude when $u_n$ is not too close to a solution.

      When scaling values are provided for the solution vector, these values are automatically incorporated into the calculation of the perturbations used for the default difference quotient approximations for Jacobian information if the user does not supply a Jacobian solver through the solve_with_jacobian() function.

      -

      Two methods of applying a computed step $\delta_n$ to the previously computed solution vector are implemented. The first and simplest is the standard Newton strategy which applies the update with a constant $\lambda$ always set to 1. The other method is a global strategy, which attempts to use the direction implied by $\delta_n$ in the most efficient way for furthering convergence of the nonlinear problem. This technique is implemented in the second strategy, called Linesearch. This option employs both the $\alpha$ and $\beta$ conditions of the Goldstein-Armijo linesearch algorithm given in [DennisSchnabel96] , where $\lambda$ is chosen to guarantee a sufficient decrease in $F$ relative to the step length as well as a minimum step length relative to the initial rate of decrease of $F$. One property of the algorithm is that the full Newton step tends to be taken close to the solution.

      +

      Two methods of applying a computed step $\delta_n$ to the previously computed solution vector are implemented. The first and simplest is the standard Newton strategy which applies the update with a constant $\lambda$ always set to 1. The other method is a global strategy, which attempts to use the direction implied by $\delta_n$ in the most efficient way for furthering convergence of the nonlinear problem. This technique is implemented in the second strategy, called Linesearch. This option employs both the $\alpha$ and $\beta$ conditions of the Goldstein-Armijo linesearch algorithm given in [DennisSchnabel96] , where $\lambda$ is chosen to guarantee a sufficient decrease in $F$ relative to the step length as well as a minimum step length relative to the initial rate of decrease of $F$. One property of the algorithm is that the full Newton step tends to be taken close to the solution.

      The basic fixed-point iteration scheme implemented in KINSOL is given by:

        -
      • Set $u_0 =$ an initial guess
      • -
      • For $n = 0, 1, 2, \dots$ until convergence do:
          -
        • Set $u_{n+1} = G(u_n)$
        • +
        • Set $u_0 =$ an initial guess
        • +
        • For $n = 0, 1, 2, \dots$ until convergence do:
            +
          • Set $u_{n+1} = G(u_n)$
          • Test for convergence
        -

        At each stage in the iteration process, function $G$ is applied to the current iterate to produce a new iterate, $u_{n+1}$. A test for convergence is made before the iteration continues.

        -

        For Picard iteration, as implemented in KINSOL, we consider a special form of the nonlinear function $F$, such that $F(u) = Lu - N(u)$, where $L$ is a constant nonsingular matrix and $N$ is (in general) nonlinear.

        -

        Then the fixed-point function $G$ is defined as $G(u) = u - L^{-1}F(u)$. Within each iteration, the Picard step is computed then added to $u_n$ to produce the new iterate. Next, the nonlinear residual function is evaluated at the new iterate, and convergence is checked. The Picard and fixed point methods can be significantly accelerated using Anderson's acceleration method.

        +

        At each stage in the iteration process, function $G$ is applied to the current iterate to produce a new iterate, $u_{n+1}$. A test for convergence is made before the iteration continues.

        +

        For Picard iteration, as implemented in KINSOL, we consider a special form of the nonlinear function $F$, such that $F(u) = Lu - N(u)$, where $L$ is a constant nonsingular matrix and $N$ is (in general) nonlinear.

        +

        Then the fixed-point function $G$ is defined as $G(u) = u - L^{-1}F(u)$. Within each iteration, the Picard step is computed then added to $u_n$ to produce the new iterate. Next, the nonlinear residual function is evaluated at the new iterate, and convergence is checked. The Picard and fixed point methods can be significantly accelerated using Anderson's acceleration method.

        The user has to provide the implementation of the following std::functions:

        • reinit_vector; and only one of
        • residual; or
        • iteration_function;
        -

        Specifying residual() allows the user to use Newton and Picard strategies (i.e., $F(u)=0$ will be solved), while specifying iteration_function(), a fixed point iteration will be used (i.e., $G(u)=u$ will be solved). An error will be thrown if iteration_function() is set for Picard or Newton.

        +

        Specifying residual() allows the user to use Newton and Picard strategies (i.e., $F(u)=0$ will be solved), while specifying iteration_function(), a fixed point iteration will be used (i.e., $G(u)=u$ will be solved). An error will be thrown if iteration_function() is set for Picard or Newton.

        If the use of a Newton or Picard method is desired, then the user should also supply

        • solve_jacobian_system or solve_with_jacobian; and optionally
        • setup_jacobian;
        • @@ -440,13 +440,13 @@

    A function object that users may supply and that is intended to prepare the linear solver for subsequent calls to solve_with_jacobian().

    The job of setup_jacobian() is to prepare the linear solver for subsequent calls to solve_with_jacobian(), in the solution of linear systems $Ax = b$. The exact nature of this system depends on the SolutionStrategy that has been selected.

    -

    In the cases strategy = SolutionStrategy::newton or SolutionStrategy::linesearch, $A$ is the Jacobian $J = \partial
-F/\partial u$. If strategy = SolutionStrategy::picard, $A$ is the approximate Jacobian matrix $L$. If strategy = SolutionStrategy::fixed_point, then linear systems do not arise, and this function is never called.

    +

    In the cases strategy = SolutionStrategy::newton or SolutionStrategy::linesearch, $A$ is the Jacobian $J = \partial
+F/\partial u$. If strategy = SolutionStrategy::picard, $A$ is the approximate Jacobian matrix $L$. If strategy = SolutionStrategy::fixed_point, then linear systems do not arise, and this function is never called.

    The setup_jacobian() function may call a user-supplied function, or a function within the linear solver module, to compute Jacobian-related data that is required by the linear solver. It may also preprocess that data as needed for solve_with_jacobian(), which may involve calling a generic function (such as for LU factorization) or, more generally, build preconditioners from the assembled Jacobian. In any case, the data so generated may then be used whenever a linear system is solved.

    The point of this function is that setup_jacobian() function is not called at every Newton iteration, but only as frequently as the solver determines that it is appropriate to perform the setup task. In this way, Jacobian-related data generated by setup_jacobian() is expected to be used over a number of Newton iterations. KINSOL determines itself when it is beneficial to regenerate the Jacobian and associated information (such as preconditioners computed for the Jacobian), thereby saving the effort to regenerate the Jacobian matrix and a preconditioner for it whenever possible.

    Parameters
    - +
    current_uCurrent value of $u$
    current_uCurrent value of $u$
    current_fCurrent value of $F(u)$ or $G(u)$
    @@ -473,14 +473,14 @@
    Deprecated:
    Versions of SUNDIALS after 4.0 no longer provide all of the information necessary for this callback (see below). Use the solve_with_jacobian callback described below.

    A function object that users may supply and that is intended to solve a linear system with the Jacobian matrix. This function will be called by KINSOL (possibly several times) after setup_jacobian() has been called at least once. KINSOL tries to do its best to call setup_jacobian() the minimum number of times. If convergence can be achieved without updating the Jacobian, then KINSOL does not call setup_jacobian() again. If, on the contrary, internal KINSOL convergence tests fail, then KINSOL calls setup_jacobian() again with updated vectors and coefficients so that successive calls to solve_jacobian_system() lead to better convergence in the Newton process.

    If you do not specify a solve_jacobian_system or solve_with_jacobian function, then only a fixed point iteration strategy can be used. Notice that this may not converge, or may converge very slowly.

    -

    A call to this function should store in dst the result of $J^{-1}$ applied to rhs, i.e., $J \cdot dst = rhs$. It is the user's responsibility to set up proper solvers and preconditioners inside this function (or in the setup_jacobian callback above).

    +

    A call to this function should store in dst the result of $J^{-1}$ applied to rhs, i.e., $J \cdot dst = rhs$. It is the user's responsibility to set up proper solvers and preconditioners inside this function (or in the setup_jacobian callback above).

    Arguments to the function are:

    Parameters
    - - + + - +
    [in]ycurThe current $y$ vector for the current KINSOL internal step. In the documentation above, this $y$ vector is generally denoted by $u$.
    [in]fcurThe current value of the implicit right-hand side at ycur, $f_I (t_n, ypred)$.
    [in]ycurThe current $y$ vector for the current KINSOL internal step. In the documentation above, this $y$ vector is generally denoted by $u$.
    [in]fcurThe current value of the implicit right-hand side at ycur, $f_I (t_n, ypred)$.
    [in]rhsThe system right hand side to solve for
    [out]dstThe solution of $J^{-1} * src$
    [out]dstThe solution of $J^{-1} * src$
    @@ -511,12 +511,12 @@

    A function object that users may supply and that is intended to solve a linear system with the Jacobian matrix. This function will be called by KINSOL (possibly several times) after setup_jacobian() has been called at least once. KINSOL tries to do its best to call setup_jacobian() the minimum number of times. If convergence can be achieved without updating the Jacobian, then KINSOL does not call setup_jacobian() again. If, on the contrary, internal KINSOL convergence tests fail, then KINSOL calls setup_jacobian() again with updated vectors and coefficients so that successive calls to solve_with_jacobian() lead to better convergence in the Newton process.

    If you do not specify a solve_with_jacobian function, then only a fixed point iteration strategy can be used. Notice that this may not converge, or may converge very slowly.

    -

    A call to this function should store in dst the result of $J^{-1}$ applied to rhs, i.e., $J \cdot dst = rhs$. It is the user's responsibility to set up proper solvers and preconditioners inside this function (or in the setup_jacobian callback above). The function attached to this callback is also provided with a tolerance to the linear solver, indicating that it is not necessary to solve the linear system with the Jacobian matrix exactly, but only to a tolerance that KINSOL will adapt over time.

    +

    A call to this function should store in dst the result of $J^{-1}$ applied to rhs, i.e., $J \cdot dst = rhs$. It is the user's responsibility to set up proper solvers and preconditioners inside this function (or in the setup_jacobian callback above). The function attached to this callback is also provided with a tolerance to the linear solver, indicating that it is not necessary to solve the linear system with the Jacobian matrix exactly, but only to a tolerance that KINSOL will adapt over time.

    Arguments to the function are:

    Parameters
    - +
    [in]rhsThe system right hand side to solve for.
    [out]dstThe solution of $J^{-1} * src$.
    [out]dstThe solution of $J^{-1} * src$.
    [in]toleranceThe tolerance with which to solve the linear system of equations.
    @@ -541,7 +541,7 @@

    A function object that users may supply and that is intended to return a vector whose components are the weights used by KINSOL to compute the vector norm of the solution. The implementation of this function is optional, and it is used only if implemented.

    -

    The intent for this scaling factor is for problems in which the different components of a solution have vastly different numerical magnitudes – typically because they have different physical units and represent different things. For example, if one were to solve a nonlinear Stokes problem, the solution vector has components that correspond to velocities and other components that correspond to pressures. These have different physical units and depending on which units one chooses, they may have roughly comparable numerical sizes or maybe they don't. To give just one example, in simulations of flow in the Earth's interior, one has velocities on the order of maybe ten centimeters per year, and pressures up to around 100 GPa. If one expresses this in SI units, this corresponds to velocities of around $0.000,000,003=3 \times 10^{-9}$ m/s, and pressures around $10^9 \text{kg}/\text{m}/\text{s}^2$, i.e., vastly different. In such cases, computing the $l_2$ norm of a solution-type vector (e.g., the difference between the previous and the current solution) makes no sense because the norm will either be dominated by the velocity components or the pressure components. The scaling vector this function returns is intended to provide each component of the solution with a scaling factor that is generally chosen as the inverse of a "typical velocity" or "typical pressure" so that upon multiplication of a vector component by the corresponding scaling vector component, one obtains a number that is of order of magnitude of one (i.e., a reasonably small multiple of one times the typical velocity/pressure). The KINSOL manual states this as follows: "The user should supply values \_form#href_anchor".

    +

    The intent for this scaling factor is for problems in which the different components of a solution have vastly different numerical magnitudes – typically because they have different physical units and represent different things. For example, if one were to solve a nonlinear Stokes problem, the solution vector has components that correspond to velocities and other components that correspond to pressures. These have different physical units and depending on which units one chooses, they may have roughly comparable numerical sizes or maybe they don't. To give just one example, in simulations of flow in the Earth's interior, one has velocities on the order of maybe ten centimeters per year, and pressures up to around 100 GPa. If one expresses this in SI units, this corresponds to velocities of around $0.000,000,003=3 \times 10^{-9}$ m/s, and pressures around $10^9 \text{kg}/\text{m}/\text{s}^2$, i.e., vastly different. In such cases, computing the $l_2$ norm of a solution-type vector (e.g., the difference between the previous and the current solution) makes no sense because the norm will either be dominated by the velocity components or the pressure components. The scaling vector this function returns is intended to provide each component of the solution with a scaling factor that is generally chosen as the inverse of a "typical velocity" or "typical pressure" so that upon multiplication of a vector component by the corresponding scaling vector component, one obtains a number that is of order of magnitude of one (i.e., a reasonably small multiple of one times the typical velocity/pressure). The KINSOL manual states this as follows: "The user should supply values \_form#href_anchor".

    If no function is provided to a KINSOL object, then this is interpreted as implicitly saying that all of these scaling factors should be considered as one.

    Note
    This variable represents a user provided callback. See there for a description of how to deal with errors and other requirements and conventions. In particular, KINSOL can deal with "recoverable" errors in some circumstances, so callbacks can throw exceptions of type RecoverableUserCallbackError.
    @@ -563,7 +563,7 @@

    A function object that users may supply and that is intended to return a vector whose components are the weights used by KINSOL to compute the vector norm of the function evaluation away from the solution. The implementation of this function is optional, and it is used only if implemented.

    -

    The point of this function and the scaling vector it returns is similar to the one discussed above for get_solution_scaling, except that it is for a vector that scales the components of the function $F(U)$, rather than the components of $U$, when computing norms. As above, if no function is provided, then this is equivalent to using a scaling vector whose components are all equal to one.

    +

    The point of this function and the scaling vector it returns is similar to the one discussed above for get_solution_scaling, except that it is for a vector that scales the components of the function $F(U)$, rather than the components of $U$, when computing norms. As above, if no function is provided, then this is equivalent to using a scaling vector whose components are all equal to one.

    Note
    This variable represents a user provided callback. See there for a description of how to deal with errors and other requirements and conventions. In particular, KINSOL can deal with "recoverable" errors in some circumstances, so callbacks can throw exceptions of type RecoverableUserCallbackError.

    Definition at line 691 of file kinsol.h.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classScaLAPACKMatrix.html differs (JavaScript source, ASCII text, with very long lines (1291)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classScaLAPACKMatrix.html 2023-11-25 15:25:58.359921237 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classScaLAPACKMatrix.html 2023-11-25 15:25:58.363254500 +0100 @@ -358,15 +358,15 @@

    Detailed Description

    template<typename NumberType>
    class ScaLAPACKMatrix< NumberType >

    A wrapper class around ScaLAPACK parallel dense linear algebra.

    -

    ScaLAPACK assumes that matrices are distributed according to the block-cyclic decomposition scheme. An $M$ by $N$ matrix is first decomposed into $\lceil M / MB \rceil$ by $\lceil N / NB \rceil$ blocks which are then uniformly distributed across the 2d process grid with $p q \le Np$ processes, where $p,q$ are grid dimensions and $Np$ is the total number of processes. The parameters MB and NB are referred to as row and column block size and determine the granularity of the block-cyclic distribution.

    -

    In the following the block-cyclic distribution of a $10 \times 9$ matrix onto a $3\times 3$ Cartesian process grid with block sizes $\text{MB}=\text{NB}=2$ is displayed.

    +

    ScaLAPACK assumes that matrices are distributed according to the block-cyclic decomposition scheme. An $M$ by $N$ matrix is first decomposed into $\lceil M / MB \rceil$ by $\lceil N / NB \rceil$ blocks which are then uniformly distributed across the 2d process grid with $p q \le Np$ processes, where $p,q$ are grid dimensions and $Np$ is the total number of processes. The parameters MB and NB are referred to as row and column block size and determine the granularity of the block-cyclic distribution.

    +

    In the following the block-cyclic distribution of a $10 \times 9$ matrix onto a $3\times 3$ Cartesian process grid with block sizes $\text{MB}=\text{NB}=2$ is displayed.

    Block-Cyclic Distribution
    -

    Note that the odd number of columns of the local matrices owned by the processes P2, P5 and P8 accounts for $N=9$ not being an integral multiple of $\text{NB}=2$.

    +

    Note that the odd number of columns of the local matrices owned by the processes P2, P5 and P8 accounts for $N=9$ not being an integral multiple of $\text{NB}=2$.

    The choice of the block sizes is a compromise between a sufficiently large size for efficient local/serial BLAS, but one that is also small enough to achieve good parallel load balance.

    Below we show a strong scaling example of ScaLAPACKMatrix::invert() on up to 5 nodes each composed of two Intel Xeon 2660v2 IvyBridge sockets 2.20GHz, 10 cores/socket. Calculations are performed on square processor grids 1x1, 2x2, 3x3, 4x4, 5x5, 6x6, 7x7, 8x8, 9x9, 10x10.

    @@ -621,7 +621,7 @@

    Constructor for a rectangular matrix with n_rows and n_cols and distributed using the grid process_grid.

    -

    The parameters row_block_size and column_block_size are the block sizes used for the block-cyclic distribution of the matrix. In general, it is recommended to use powers of $2$, e.g. $16,32,64, \dots$.

    +

    The parameters row_block_size and column_block_size are the block sizes used for the block-cyclic distribution of the matrix. In general, it is recommended to use powers of $2$, e.g. $16,32,64, \dots$.

    Definition at line 81 of file scalapack.cc.

    @@ -666,7 +666,7 @@

    Constructor for a square matrix of size size, and distributed using the process grid in process_grid.

    -

    The parameter block_size is used for the block-cyclic distribution of the matrix. An identical block size is used for the rows and columns of the matrix. In general, it is recommended to use powers of $2$, e.g. $16,32,64, \dots$.

    +

    The parameter block_size is used for the block-cyclic distribution of the matrix. An identical block size is used for the rows and columns of the matrix. In general, it is recommended to use powers of $2$, e.g. $16,32,64, \dots$.

    Definition at line 106 of file scalapack.cc.

    @@ -711,7 +711,7 @@

    Constructor for a general rectangular matrix that is read from the file filename and distributed using the grid process_grid.

    Loads the matrix from file filename using HDF5. In case that deal.II was built without HDF5 a call to this function will cause an exception to be thrown.

    -

    The parameters row_block_size and column_block_size are the block sizes used for the block-cyclic distribution of the matrix. In general, it is recommended to use powers of $2$, e.g. $16,32,64, \dots$.

    +

    The parameters row_block_size and column_block_size are the block sizes used for the block-cyclic distribution of the matrix. In general, it is recommended to use powers of $2$, e.g. $16,32,64, \dots$.

    Definition at line 122 of file scalapack.cc.

    @@ -797,7 +797,7 @@

    Initialize the rectangular matrix with n_rows and n_cols and distributed using the grid process_grid.

    -

    The parameters row_block_size and column_block_size are the block sizes used for the block-cyclic distribution of the matrix. In general, it is recommended to use powers of $2$, e.g. $16,32,64, \dots$.

    +

    The parameters row_block_size and column_block_size are the block sizes used for the block-cyclic distribution of the matrix. In general, it is recommended to use powers of $2$, e.g. $16,32,64, \dots$.

    Definition at line 217 of file scalapack.cc.

    @@ -842,7 +842,7 @@

    Initialize the square matrix of size size and distributed using the grid process_grid.

    -

    The parameter block_size is used for the block-cyclic distribution of the matrix. An identical block size is used for the rows and columns of the matrix. In general, it is recommended to use powers of $2$, e.g. $16,32,64, \dots$.

    +

    The parameter block_size is used for the block-cyclic distribution of the matrix. An identical block size is used for the rows and columns of the matrix. In general, it is recommended to use powers of $2$, e.g. $16,32,64, \dots$.

    Definition at line 291 of file scalapack.cc.

    @@ -1105,9 +1105,9 @@
    -

    Transposing assignment: $\mathbf{A} = \mathbf{B}^T$

    -

    The matrices $\mathbf{A}$ and $\mathbf{B}$ must have the same process grid.

    -

    The following alignment conditions have to be fulfilled: $MB_A=NB_B$ and $NB_A=MB_B$.

    +

    Transposing assignment: $\mathbf{A} = \mathbf{B}^T$

    +

    The matrices $\mathbf{A}$ and $\mathbf{B}$ must have the same process grid.

    +

    The following alignment conditions have to be fulfilled: $MB_A=NB_B$ and $NB_A=MB_B$.

    Definition at line 981 of file scalapack.cc.

    @@ -1155,13 +1155,13 @@ transpose_B Block Sizes Operation -false $MB_A=MB_B$
    - $NB_A=NB_B$ $\mathbf{A} = a \mathbf{A} + b \mathbf{B}$ +false $MB_A=MB_B$
    + $NB_A=NB_B$ $\mathbf{A} = a \mathbf{A} + b \mathbf{B}$ -true $MB_A=NB_B$
    - $NB_A=MB_B$ $\mathbf{A} = a \mathbf{A} + b \mathbf{B}^T$ +true $MB_A=NB_B$
    + $NB_A=MB_B$ $\mathbf{A} = a \mathbf{A} + b \mathbf{B}^T$ -

    The matrices $\mathbf{A}$ and $\mathbf{B}$ must have the same process grid.

    +

    The matrices $\mathbf{A}$ and $\mathbf{B}$ must have the same process grid.

    Definition at line 991 of file scalapack.cc.

    @@ -1192,9 +1192,9 @@
    -

    Matrix-addition: $\mathbf{A} = \mathbf{A} + b\, \mathbf{B}$

    -

    The matrices $\mathbf{A}$ and $\mathbf{B}$ must have the same process grid.

    -

    The following alignment conditions have to be fulfilled: $MB_A=MB_B$ and $NB_A=NB_B$.

    +

    Matrix-addition: $\mathbf{A} = \mathbf{A} + b\, \mathbf{B}$

    +

    The matrices $\mathbf{A}$ and $\mathbf{B}$ must have the same process grid.

    +

    The following alignment conditions have to be fulfilled: $MB_A=MB_B$ and $NB_A=NB_B$.

    Definition at line 1047 of file scalapack.cc.

    @@ -1225,9 +1225,9 @@
    -

    Matrix-addition: $\mathbf{A} = \mathbf{A} + b\, \mathbf{B}^T$

    -

    The matrices $\mathbf{A}$ and $\mathbf{B}$ must have the same process grid.

    -

    The following alignment conditions have to be fulfilled: $MB_A=NB_B$ and $NB_A=MB_B$.

    +

    Matrix-addition: $\mathbf{A} = \mathbf{A} + b\, \mathbf{B}^T$

    +

    The matrices $\mathbf{A}$ and $\mathbf{B}$ must have the same process grid.

    +

    The following alignment conditions have to be fulfilled: $MB_A=NB_B$ and $NB_A=MB_B$.

    Definition at line 1057 of file scalapack.cc.

    @@ -1285,24 +1285,24 @@ transpose_A transpose_B Block Sizes Operation -false false $MB_A=MB_C$
    - $NB_A=MB_B$
    - $NB_B=NB_C$ $\mathbf{C} = b \mathbf{A} \cdot \mathbf{B} + c \mathbf{C}$ +false false $MB_A=MB_C$
    + $NB_A=MB_B$
    + $NB_B=NB_C$ $\mathbf{C} = b \mathbf{A} \cdot \mathbf{B} + c \mathbf{C}$ -false true $MB_A=MB_C$
    - $NB_A=NB_B$
    - $MB_B=NB_C$ $\mathbf{C} = b \mathbf{A} \cdot \mathbf{B}^T + c \mathbf{C}$ +false true $MB_A=MB_C$
    + $NB_A=NB_B$
    + $MB_B=NB_C$ $\mathbf{C} = b \mathbf{A} \cdot \mathbf{B}^T + c \mathbf{C}$ -true false $MB_A=MB_B$
    - $NB_A=MB_C$
    - $NB_B=NB_C$ $\mathbf{C} = b \mathbf{A}^T \cdot \mathbf{B} + c \mathbf{C}$ +true false $MB_A=MB_B$
    + $NB_A=MB_C$
    + $NB_B=NB_C$ $\mathbf{C} = b \mathbf{A}^T \cdot \mathbf{B} + c \mathbf{C}$ -true true $MB_A=NB_B$
    - $NB_A=MB_C$
    - $MB_B=NB_C$ $\mathbf{C} = b \mathbf{A}^T \cdot \mathbf{B}^T + c \mathbf{C}$ +true true $MB_A=NB_B$
    + $NB_A=MB_C$
    + $MB_B=NB_C$ $\mathbf{C} = b \mathbf{A}^T \cdot \mathbf{B}^T + c \mathbf{C}$ -

    It is assumed that $\mathbf{A}$ and $\mathbf{B}$ have compatible sizes and that $\mathbf{C}$ already has the right size.

    -

    The matrices $\mathbf{A}$, $\mathbf{B}$ and $\mathbf{C}$ must have the same process grid.

    +

    It is assumed that $\mathbf{A}$ and $\mathbf{B}$ have compatible sizes and that $\mathbf{C}$ already has the right size.

    +

    The matrices $\mathbf{A}$, $\mathbf{B}$ and $\mathbf{C}$ must have the same process grid.

    Definition at line 1067 of file scalapack.cc.

    @@ -1339,11 +1339,11 @@

    Matrix-matrix-multiplication.

    -

    The optional parameter adding determines whether the result is stored in $\mathbf{C}$ or added to $\mathbf{C}$.

    -

    if (adding) $\mathbf{C} = \mathbf{C} + \mathbf{A} \cdot \mathbf{B}$

    -

    else $\mathbf{C} = \mathbf{A} \cdot \mathbf{B}$

    -

    It is assumed that $\mathbf{A}$ and $\mathbf{B}$ have compatible sizes and that $\mathbf{C}$ already has the right size.

    -

    The following alignment conditions have to be fulfilled: $MB_A=MB_C$, $NB_A=MB_B$ and $NB_B=NB_C$.

    +

    The optional parameter adding determines whether the result is stored in $\mathbf{C}$ or added to $\mathbf{C}$.

    +

    if (adding) $\mathbf{C} = \mathbf{C} + \mathbf{A} \cdot \mathbf{B}$

    +

    else $\mathbf{C} = \mathbf{A} \cdot \mathbf{B}$

    +

    It is assumed that $\mathbf{A}$ and $\mathbf{B}$ have compatible sizes and that $\mathbf{C}$ already has the right size.

    +

    The following alignment conditions have to be fulfilled: $MB_A=MB_C$, $NB_A=MB_B$ and $NB_B=NB_C$.

    Definition at line 1184 of file scalapack.cc.

    @@ -1380,11 +1380,11 @@

    Matrix-matrix-multiplication using transpose of $\mathbf{A}$.

    -

    The optional parameter adding determines whether the result is stored in $\mathbf{C}$ or added to $\mathbf{C}$.

    -

    if (adding) $\mathbf{C} = \mathbf{C} + \mathbf{A}^T \cdot \mathbf{B}$

    -

    else $\mathbf{C} = \mathbf{A}^T \cdot \mathbf{B}$

    -

    It is assumed that $\mathbf{A}$ and $\mathbf{B}$ have compatible sizes and that $\mathbf{C}$ already has the right size.

    -

    The following alignment conditions have to be fulfilled: $MB_A=MB_B$, $NB_A=MB_C$ and $NB_B=NB_C$.

    +

    The optional parameter adding determines whether the result is stored in $\mathbf{C}$ or added to $\mathbf{C}$.

    +

    if (adding) $\mathbf{C} = \mathbf{C} + \mathbf{A}^T \cdot \mathbf{B}$

    +

    else $\mathbf{C} = \mathbf{A}^T \cdot \mathbf{B}$

    +

    It is assumed that $\mathbf{A}$ and $\mathbf{B}$ have compatible sizes and that $\mathbf{C}$ already has the right size.

    +

    The following alignment conditions have to be fulfilled: $MB_A=MB_B$, $NB_A=MB_C$ and $NB_B=NB_C$.

    Definition at line 1198 of file scalapack.cc.

    @@ -1420,12 +1420,12 @@ /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classScalarFunctionFromFunctionObject.html differs (JavaScript source, ASCII text, with very long lines (4365)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classScalarFunctionFromFunctionObject.html 2023-11-25 15:25:58.383254093 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classScalarFunctionFromFunctionObject.html 2023-11-25 15:25:58.383254093 +0100 @@ -257,7 +257,7 @@
    Vector<double> solution_1d;
    Definition: vector.h:109
    -

    We will denote this solution function described by this DoFHandler and vector object by $u_h(x)$ where $x$ is a vector with just one component, and consequently is not shown in boldface. Then assume that we want this $u_h(x)$ to be used as a boundary condition for a 2d problem at the line $y=0$. Let's say that this line corresponds to boundary indicator 123. If we say that the 2d problem is associated with

    DoFHandler<2> dof_handler_2d;
    +

    We will denote this solution function described by this DoFHandler and vector object by $u_h(x)$ where $x$ is a vector with just one component, and consequently is not shown in boldface. Then assume that we want this $u_h(x)$ to be used as a boundary condition for a 2d problem at the line $y=0$. Let's say that this line corresponds to boundary indicator 123. If we say that the 2d problem is associated with

    DoFHandler<2> dof_handler_2d;

    then in order to evaluate the boundary conditions for this 2d problem, we would want to call VectorTools::interpolate_boundary_values() via

    AffineConstraints<double> boundary_values_2d;
    123,
    @@ -265,7 +265,7 @@
    boundary_values_2d);
    void interpolate_boundary_values(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const std::map< types::boundary_id, const Function< spacedim, number > * > &function_map, std::map< types::global_dof_index, number > &boundary_values, const ComponentMask &component_mask=ComponentMask())
    -

    The question here is what to use as the Function object that can be passed as third argument. It needs to be a Function<2> object, i.e., it receives a 2d input point and is supposed to return the value at that point. What we want it to do is to just take the $x$ component of the input point and evaluate the 1d solution at that point, knowing that at the boundary with indicator 123, the $y$ component of the input point must be zero. This all can be achieved via the following function object:

    The question here is what to use as the Function object that can be passed as third argument. It needs to be a Function<2> object, i.e., it receives a 2d input point and is supposed to return the value at that point. What we want it to do is to just take the $x$ component of the input point and evaluate the 1d solution at that point, knowing that at the boundary with indicator 123, the $y$ component of the input point must be zero. This all can be achieved via the following function object:

    solution_1d_as_function_object (dof_handler_1d, solution_1d);
    auto boundary_evaluator
    = [&] (const Point<2> &p)
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSolutionTransfer.html differs (JavaScript source, ASCII text, with very long lines (999)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSolutionTransfer.html 2023-11-25 15:25:58.399920423 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSolutionTransfer.html 2023-11-25 15:25:58.399920423 +0100 @@ -278,7 +278,7 @@

    Interaction with hanging nodes

    -

    This class does its best to represent on the new mesh the finite element function that existed on the old mesh, but this may lead to situations where the function on the new mesh is no longer conforming at hanging nodes. To this end, consider a situation of a twice refined mesh that started with a single square cell (i.e., we now have 16 cells). Consider also that we coarsen 4 of the cells back to the first refinement level. In this case, we end up with a mesh that will look as follows if we were to use a $Q_1$ element:

    +

    This class does its best to represent on the new mesh the finite element function that existed on the old mesh, but this may lead to situations where the function on the new mesh is no longer conforming at hanging nodes. To this end, consider a situation of a twice refined mesh that started with a single square cell (i.e., we now have 16 cells). Consider also that we coarsen 4 of the cells back to the first refinement level. In this case, we end up with a mesh that will look as follows if we were to use a $Q_1$ element:

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSolverBFGS.html differs (JavaScript source, ASCII text, with very long lines (1094)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSolverBFGS.html 2023-11-25 15:25:58.416586750 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSolverBFGS.html 2023-11-25 15:25:58.416586750 +0100 @@ -211,7 +211,7 @@ \rho^{(k)} &\dealcoloneq \frac{1}{y^{(k)} \cdot s^{(k)}} \end{align*}" src="form_2417.png"/>

    -

    for a symmetric positive definite $H$. Limited memory variant is implemented via the two-loop recursion.

    +

    for a symmetric positive definite $H$. Limited memory variant is implemented via the two-loop recursion.

    Definition at line 58 of file solver_bfgs.h.

    Member Typedef Documentation

    @@ -372,8 +372,8 @@ \]" src="form_2418.png"/>

    starting from initial state x.

    -

    The function compute takes two arguments indicating the values of $x$ and of the gradient $g=\nabla f(\mathbf x)=\frac{\partial f}{\partial
-\mathbf x}$. When called, it needs to update the gradient $g$ at the given location $x$ and return the value of the function being minimized, i.e., $f(\mathbf x)$.

    +

    The function compute takes two arguments indicating the values of $x$ and of the gradient $g=\nabla f(\mathbf x)=\frac{\partial f}{\partial
+\mathbf x}$. When called, it needs to update the gradient $g$ at the given location $x$ and return the value of the function being minimized, i.e., $f(\mathbf x)$.

    @@ -395,7 +395,7 @@

    Connect a slot to perform a custom line-search.

    -

    Given the value of function f, the current value of unknown x, the gradient g and the search direction p, return the size $\alpha$ of the step $x \leftarrow x + \alpha p$, and update x, g and f accordingly.

    +

    Given the value of function f, the current value of unknown x, the gradient g and the search direction p, return the size $\alpha$ of the step $x \leftarrow x + \alpha p$, and update x, g and f accordingly.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSolverCG.html differs (JavaScript source, ASCII text, with very long lines (1094)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSolverCG.html 2023-11-25 15:25:58.433253075 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSolverCG.html 2023-11-25 15:25:58.433253075 +0100 @@ -477,7 +477,7 @@
    -

    Solve the linear system $Ax=b$ for x.

    +

    Solve the linear system $Ax=b$ for x.

    @@ -1115,7 +1115,7 @@
    -

    Flag to indicate whether the classical Fletcher–Reeves update formula for the parameter $\beta_k$ (standard CG algorithm, minimal storage needs) or the flexible conjugate gradient method with Polak-Ribiere formula for $\beta_k$ should be used. This base class implementation of SolverCG will always use the former method, whereas the derived class SolverFlexibleCG will use the latter.

    +

    Flag to indicate whether the classical Fletcher–Reeves update formula for the parameter $\beta_k$ (standard CG algorithm, minimal storage needs) or the flexible conjugate gradient method with Polak-Ribiere formula for $\beta_k$ should be used. This base class implementation of SolverCG will always use the former method, whereas the derived class SolverFlexibleCG will use the latter.

    Definition at line 323 of file solver_cg.h.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSolverFGMRES.html differs (JavaScript source, ASCII text, with very long lines (1094)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSolverFGMRES.html 2023-11-25 15:25:58.449919405 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSolverFGMRES.html 2023-11-25 15:25:58.449919405 +0100 @@ -364,7 +364,7 @@
    -

    Solve the linear system $Ax=b$ for x.

    +

    Solve the linear system $Ax=b$ for x.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSolverFIRE.html differs (JavaScript source, ASCII text, with very long lines (1274)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSolverFIRE.html 2023-11-25 15:25:58.466585733 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSolverFIRE.html 2023-11-25 15:25:58.466585733 +0100 @@ -195,27 +195,27 @@

    Detailed Description

    template<typename VectorType = Vector<double>>
    -class SolverFIRE< VectorType >

    FIRE (Fast Inertial Relaxation Engine) for minimization of (potentially non-linear) objective function $E(\mathbf x)$, $\mathbf x$ is a vector of $n$ variables ( $n$ is the number of variables of the objective function). Like all other solver classes, it can work on any kind of vector and matrix as long as they satisfy certain requirements (for the requirements on matrices and vectors in order to work with this class, see the documentation of the Solver base class). The type of the solution vector must be passed as template argument, and defaults to Vector<double>.

    +class SolverFIRE< VectorType >

    FIRE (Fast Inertial Relaxation Engine) for minimization of (potentially non-linear) objective function $E(\mathbf x)$, $\mathbf x$ is a vector of $n$ variables ( $n$ is the number of variables of the objective function). Like all other solver classes, it can work on any kind of vector and matrix as long as they satisfy certain requirements (for the requirements on matrices and vectors in order to work with this class, see the documentation of the Solver base class). The type of the solution vector must be passed as template argument, and defaults to Vector<double>.

    FIRE is a damped dynamics method described in Structural Relaxation Made Simple by Bitzek et al. 2006, typically used to find stable equilibrium configurations of atomistic systems in computational material science. Starting from a given initial configuration of the atomistic system, the algorithm relies on inertia to obtain (nearest) configuration with least potential energy.

    Notation:

    -

    Given initial values for $\Delta t$, $\alpha = \alpha_0$, $\epsilon$, $\mathbf x = \mathbf x_0$ and $\mathbf v= \mathbf 0$ along with a given mass matrix $\mathbf M$, FIRE algorithm is as follows,

      -
    1. Calculate $\mathbf g = \nabla E(\mathbf x)$ and check for convergence ( $\mathbf g \cdot \mathbf g < \epsilon^2 $).
    2. -
    3. Update $\mathbf x$ and $V$ using simple (forward) Euler integration step,
      - $\mathbf x = \mathbf x + \Delta t \mathbf v$,
      - $\mathbf v = \mathbf v + \Delta t \mathbf M^{-1} \cdot \mathbf g$.
    4. -
    5. Calculate $p = \mathbf g \cdot \mathbf v$.
    6. -
    7. Set $\mathbf v = (1-\alpha) \mathbf v
-                  + \alpha \frac{|\mathbf v|}{|\mathbf g|} \mathbf g$.
    8. -
    9. If $p<0$ and number of steps since $p$ was last negative is larger than certain value, then increase time step $\Delta t$ and decrease $\alpha$.
    10. -
    11. If $p>0$, then decrease the time step, freeze the system i.e., $\mathbf v = \mathbf 0$ and reset $\alpha = \alpha_0$.
    12. +

      Given initial values for $\Delta t$, $\alpha = \alpha_0$, $\epsilon$, $\mathbf x = \mathbf x_0$ and $\mathbf v= \mathbf 0$ along with a given mass matrix $\mathbf M$, FIRE algorithm is as follows,

        +
      1. Calculate $\mathbf g = \nabla E(\mathbf x)$ and check for convergence ( $\mathbf g \cdot \mathbf g < \epsilon^2 $).
      2. +
      3. Update $\mathbf x$ and $V$ using simple (forward) Euler integration step,
        + $\mathbf x = \mathbf x + \Delta t \mathbf v$,
        + $\mathbf v = \mathbf v + \Delta t \mathbf M^{-1} \cdot \mathbf g$.
      4. +
      5. Calculate $p = \mathbf g \cdot \mathbf v$.
      6. +
      7. Set $\mathbf v = (1-\alpha) \mathbf v
+                  + \alpha \frac{|\mathbf v|}{|\mathbf g|} \mathbf g$.
      8. +
      9. If $p<0$ and number of steps since $p$ was last negative is larger than certain value, then increase time step $\Delta t$ and decrease $\alpha$.
      10. +
      11. If $p>0$, then decrease the time step, freeze the system i.e., $\mathbf v = \mathbf 0$ and reset $\alpha = \alpha_0$.
      12. Return to 1.

      Also see Energy-Minimization in Atomic-to-Continuum Scale-Bridging Methods by Eidel et al. 2011.

      @@ -426,8 +426,8 @@
      -

      Solve for x that minimizes $E(\mathbf x)$ for the special case when $E(\mathbf x)
-= \frac{1}{2} \mathbf x^{T} \mathbf A \mathbf x - \mathbf x^{T} \mathbf b$.

      +

      Solve for x that minimizes $E(\mathbf x)$ for the special case when $E(\mathbf x)
+= \frac{1}{2} \mathbf x^{T} \mathbf A \mathbf x - \mathbf x^{T} \mathbf b$.

      /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSolverFlexibleCG.html differs (JavaScript source, ASCII text, with very long lines (1120)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSolverFlexibleCG.html 2023-11-25 15:25:58.479918793 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSolverFlexibleCG.html 2023-11-25 15:25:58.483252061 +0100 @@ -216,11 +216,11 @@

      Detailed Description

      template<typename VectorType = Vector<double>>
      -class SolverFlexibleCG< VectorType >

      This class implements a flexible variant of the conjugate gradient method, which is based on a different formula to compute $\beta_k$ in the process of constructing a new search direction that is A-orthogonal against the previous one. Rather than using the Fletcher–Reeves update formula with $\beta_k = \frac{\mathbf{r}^T_{k+1} \mathbf{z}_{k+1}}{\mathbf{r}^T_{k}
-\mathbf{z}_{k}}$ for computing the new search direction (here $\mathbf{r}_{k+1}$ is the residual in step $k+1$ and $\mathbf{z}_{k+1} =
-P^{-1} \mathbf{r}_{k+1}$) as in the classical conjugate gradient algorithm, this class selects the Polak-Ribiere formula $\beta_k =
+class SolverFlexibleCG< VectorType ></div><p>This class implements a flexible variant of the conjugate gradient method, which is based on a different formula to compute <picture><source srcset=$\beta_k$ in the process of constructing a new search direction that is A-orthogonal against the previous one. Rather than using the Fletcher–Reeves update formula with $\beta_k = \frac{\mathbf{r}^T_{k+1} \mathbf{z}_{k+1}}{\mathbf{r}^T_{k}
+\mathbf{z}_{k}}$ for computing the new search direction (here $\mathbf{r}_{k+1}$ is the residual in step $k+1$ and $\mathbf{z}_{k+1} =
+P^{-1} \mathbf{r}_{k+1}$) as in the classical conjugate gradient algorithm, this class selects the Polak-Ribiere formula $\beta_k =
 \frac{\mathbf{r}^T_{k+1} \left(\mathbf{z}_{k+1} -
-\mathbf{z}_{k}\right)}{\mathbf{r}^T_{k} \mathbf{z}_{k}}$. The additional term $\mathbf{r}^T_{k+1} \mathbf{z}_{k}$ is zero for linear symmetric-positive definite preconditioners due to the construction of the search directions, so the behavior of SolverFlexibleCG is equivalent for those kinds of situations and merely increases costs by requiring an additional stored vector and associated vector operations. While there are no theoretical guarantees for convergence as in the classical CG algorithm, the current class has been documented to be much more robust for variable preconditioners (e.g., involving some iterative inverse that is not fully converged) or a preconditioner with some slight non-symmetry (like weighted Schwarz methods), which results from the local optimality of the search direction with at least as good progress as the locally optimal steepest descent method.

      +\mathbf{z}_{k}\right)}{\mathbf{r}^T_{k} \mathbf{z}_{k}}$" src="form_1891.png"/>. The additional term $\mathbf{r}^T_{k+1} \mathbf{z}_{k}$ is zero for linear symmetric-positive definite preconditioners due to the construction of the search directions, so the behavior of SolverFlexibleCG is equivalent for those kinds of situations and merely increases costs by requiring an additional stored vector and associated vector operations. While there are no theoretical guarantees for convergence as in the classical CG algorithm, the current class has been documented to be much more robust for variable preconditioners (e.g., involving some iterative inverse that is not fully converged) or a preconditioner with some slight non-symmetry (like weighted Schwarz methods), which results from the local optimality of the search direction with at least as good progress as the locally optimal steepest descent method.

      Definition at line 354 of file solver_cg.h.

      Member Typedef Documentation

      @@ -417,7 +417,7 @@
      -

      Solve the linear system $Ax=b$ for x.

      +

      Solve the linear system $Ax=b$ for x.

      @@ -1079,7 +1079,7 @@
      -

      Flag to indicate whether the classical Fletcher–Reeves update formula for the parameter $\beta_k$ (standard CG algorithm, minimal storage needs) or the flexible conjugate gradient method with Polak-Ribiere formula for $\beta_k$ should be used. This base class implementation of SolverCG will always use the former method, whereas the derived class SolverFlexibleCG will use the latter.

      +

      Flag to indicate whether the classical Fletcher–Reeves update formula for the parameter $\beta_k$ (standard CG algorithm, minimal storage needs) or the flexible conjugate gradient method with Polak-Ribiere formula for $\beta_k$ should be used. This base class implementation of SolverCG will always use the former method, whereas the derived class SolverFlexibleCG will use the latter.

      Definition at line 323 of file solver_cg.h.

      /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSolverGMRES.html differs (JavaScript source, ASCII text, with very long lines (1094)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSolverGMRES.html 2023-11-25 15:25:58.499918386 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSolverGMRES.html 2023-11-25 15:25:58.499918386 +0100 @@ -446,7 +446,7 @@
      -

      Solve the linear system $Ax=b$ for x.

      +

      Solve the linear system $Ax=b$ for x.

      /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSolverMinRes.html differs (JavaScript source, ASCII text, with very long lines (1094)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSolverMinRes.html 2023-11-25 15:25:58.516584715 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSolverMinRes.html 2023-11-25 15:25:58.516584715 +0100 @@ -403,7 +403,7 @@
      -

      Solve the linear system $Ax=b$ for x.

      +

      Solve the linear system $Ax=b$ for x.

      /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSolverQMRS.html differs (JavaScript source, ASCII text, with very long lines (1094)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSolverQMRS.html 2023-11-25 15:25:58.533251043 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSolverQMRS.html 2023-11-25 15:25:58.533251043 +0100 @@ -374,7 +374,7 @@
      -

      Solve the linear system $Ax=b$ for x.

      +

      Solve the linear system $Ax=b$ for x.

      /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSolverRichardson.html differs (JavaScript source, ASCII text, with very long lines (1094)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSolverRichardson.html 2023-11-25 15:25:58.546584104 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSolverRichardson.html 2023-11-25 15:25:58.546584104 +0100 @@ -404,7 +404,7 @@
      -

      Solve the linear system $Ax=b$ for x.

      +

      Solve the linear system $Ax=b$ for x.

      @@ -448,7 +448,7 @@
      -

      Solve $A^Tx=b$ for $x$.

      +

      Solve $A^Tx=b$ for $x$.

      /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSparseDirectUMFPACK.html differs (JavaScript source, ASCII text, with very long lines (708)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSparseDirectUMFPACK.html 2023-11-25 15:25:58.566583697 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSparseDirectUMFPACK.html 2023-11-25 15:25:58.566583697 +0100 @@ -617,8 +617,8 @@

      The solution will be returned in place of the right hand side vector.

      Parameters
      - - + +
      [in,out]rhs_and_solutionA vector that contains the right hand side $b$ of a linear system $Ax=b$ upon calling this function, and that contains the solution $x$ of the linear system after calling this function.
      [in]transposeIf set to true, this function solves the linear $A^T x = b$ instead of $Ax=b$.
      [in,out]rhs_and_solutionA vector that contains the right hand side $b$ of a linear system $Ax=b$ upon calling this function, and that contains the solution $x$ of the linear system after calling this function.
      [in]transposeIf set to true, this function solves the linear $A^T x = b$ instead of $Ax=b$.
      @@ -652,7 +652,7 @@

      Like the previous function, but for a complex-valued right hand side and solution vector.

      -

      If the matrix that was previously factorized had complex-valued entries, then the rhs_and_solution vector will, upon return from this function, simply contain the solution of the linear system $Ax=b$. If the matrix was real-valued, then this is also true, but the solution will simply be computed by applying the factorized $A^{-1}$ to both the real and imaginary parts of the right hand side vector.

      +

      If the matrix that was previously factorized had complex-valued entries, then the rhs_and_solution vector will, upon return from this function, simply contain the solution of the linear system $Ax=b$. If the matrix was real-valued, then this is also true, but the solution will simply be computed by applying the factorized $A^{-1}$ to both the real and imaginary parts of the right hand side vector.

      Definition at line 418 of file sparse_direct.cc.

      /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSparseILU.html differs (JavaScript source, ASCII text, with very long lines (670)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSparseILU.html 2023-11-25 15:25:58.596583085 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSparseILU.html 2023-11-25 15:25:58.596583085 +0100 @@ -2086,7 +2086,7 @@
      -

      Symmetrize the matrix by forming the mean value between the existing matrix and its transpose, $A = \frac 12(A+A^T)$.

      +

      Symmetrize the matrix by forming the mean value between the existing matrix and its transpose, $A = \frac 12(A+A^T)$.

      This operation assumes that the underlying sparsity pattern represents a symmetric object. If this is not the case, then the result of this operation will not be a symmetric matrix, since it only explicitly symmetrizes by looping over the lower left triangular part for efficiency reasons; if there are entries in the upper right triangle, then these elements are missed in the symmetrization. Symmetrization of the sparsity pattern can be obtain by SparsityPattern::symmetrize().

      @@ -2381,7 +2381,7 @@
      -

      Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

      +

      Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

      Obviously, the matrix needs to be quadratic for this operation, and for the result to actually be a norm it also needs to be either real symmetric or complex hermitian.

      The underlying template types of both this matrix and the given vector should either both be real or complex-valued, but not mixed, for this function to make sense.

      Note
      If deal.II is configured with threads, this operation will run multi-threaded by splitting the work into smaller chunks (assuming there is enough work to make this worthwhile).
      @@ -2423,7 +2423,7 @@
      -

      Compute the matrix scalar product $\left(u,Mv\right)$.

      +

      Compute the matrix scalar product $\left(u,Mv\right)$.

      Note
      If deal.II is configured with threads, this operation will run multi-threaded by splitting the work into smaller chunks (assuming there is enough work to make this worthwhile).
      @@ -2518,7 +2518,7 @@

      Perform the matrix-matrix multiplication C = A * B, or, if an optional vector argument is given, C = A * diag(V) * B, where diag(V) defines a diagonal matrix with the vector entries.

      This function assumes that the calling matrix A and the argument B have compatible sizes. By default, the output matrix C will be resized appropriately.

      -

      By default, i.e., if the optional argument rebuild_sparsity_pattern is true, the sparsity pattern of the matrix C will be changed to ensure that all entries that result from the product $AB$ can be stored in $C$. This is an expensive operation, and if there is a way to predict the sparsity pattern up front, you should probably build it yourself before calling this function with false as last argument. In this case, the rebuilding of the sparsity pattern is bypassed.

      +

      By default, i.e., if the optional argument rebuild_sparsity_pattern is true, the sparsity pattern of the matrix C will be changed to ensure that all entries that result from the product $AB$ can be stored in $C$. This is an expensive operation, and if there is a way to predict the sparsity pattern up front, you should probably build it yourself before calling this function with false as last argument. In this case, the rebuilding of the sparsity pattern is bypassed.

      When setting rebuild_sparsity_pattern to true (i.e., leaving it at the default value), it is important to realize that the matrix C passed as first argument still has to be initialized with a sparsity pattern (either at the time of creation of the SparseMatrix object, or via the SparseMatrix::reinit() function). This is because we could create a sparsity pattern inside the current function, and then associate C with it, but there would be no way to transfer ownership of this sparsity pattern to anyone once the current function finishes. Consequently, the function requires that C be already associated with a sparsity pattern object, and this object is then reset to fit the product of A and B.

      As a consequence of this, however, it is also important to realize that the sparsity pattern of C is modified and that this would render invalid all other SparseMatrix objects that happen to also use that sparsity pattern object.

      @@ -2598,8 +2598,8 @@
      -

      Return the $l_1$-norm of the matrix, that is $|M|_1=\max_{\mathrm{all\
-columns\ }j}\sum_{\mathrm{all\ rows\ } i} |M_{ij}|$, (max. sum of columns). This is the natural matrix norm that is compatible to the $l_1$-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin- Hoffmann: Numerische Mathematik)

      +

      Return the $l_1$-norm of the matrix, that is $|M|_1=\max_{\mathrm{all\
+columns\ }j}\sum_{\mathrm{all\ rows\ } i} |M_{ij}|$, (max. sum of columns). This is the natural matrix norm that is compatible to the $l_1$-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin- Hoffmann: Numerische Mathematik)

      @@ -2627,8 +2627,8 @@
      -

      Return the $l_\infty$-norm of the matrix, that is $|M|_\infty=\max_{\mathrm{all\ rows\ }i}\sum_{\mathrm{all\ columns\ }j}
-|M_{ij}|$, (max. sum of rows). This is the natural matrix norm that is compatible to the $l_\infty$-norm of vectors, i.e. $|Mv|_\infty \leq
+<p>Return the <picture><source srcset=$l_\infty$-norm of the matrix, that is $|M|_\infty=\max_{\mathrm{all\ rows\ }i}\sum_{\mathrm{all\ columns\ }j}
+|M_{ij}|$, (max. sum of rows). This is the natural matrix norm that is compatible to the $l_\infty$-norm of vectors, i.e. $|Mv|_\infty \leq
 |M|_\infty |v|_\infty$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

      /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSparseLUDecomposition.html differs (JavaScript source, ASCII text, with very long lines (706)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSparseLUDecomposition.html 2023-11-25 15:25:58.629915742 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSparseLUDecomposition.html 2023-11-25 15:25:58.629915742 +0100 @@ -1854,7 +1854,7 @@
      -

      Symmetrize the matrix by forming the mean value between the existing matrix and its transpose, $A = \frac 12(A+A^T)$.

      +

      Symmetrize the matrix by forming the mean value between the existing matrix and its transpose, $A = \frac 12(A+A^T)$.

      This operation assumes that the underlying sparsity pattern represents a symmetric object. If this is not the case, then the result of this operation will not be a symmetric matrix, since it only explicitly symmetrizes by looping over the lower left triangular part for efficiency reasons; if there are entries in the upper right triangle, then these elements are missed in the symmetrization. Symmetrization of the sparsity pattern can be obtain by SparsityPattern::symmetrize().

      @@ -2252,7 +2252,7 @@
      -

      Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

      +

      Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

      Obviously, the matrix needs to be quadratic for this operation, and for the result to actually be a norm it also needs to be either real symmetric or complex hermitian.

      The underlying template types of both this matrix and the given vector should either both be real or complex-valued, but not mixed, for this function to make sense.

      Note
      If deal.II is configured with threads, this operation will run multi-threaded by splitting the work into smaller chunks (assuming there is enough work to make this worthwhile).
      @@ -2294,7 +2294,7 @@
      -

      Compute the matrix scalar product $\left(u,Mv\right)$.

      +

      Compute the matrix scalar product $\left(u,Mv\right)$.

      Note
      If deal.II is configured with threads, this operation will run multi-threaded by splitting the work into smaller chunks (assuming there is enough work to make this worthwhile).
      @@ -2389,7 +2389,7 @@

      Perform the matrix-matrix multiplication C = A * B, or, if an optional vector argument is given, C = A * diag(V) * B, where diag(V) defines a diagonal matrix with the vector entries.

      This function assumes that the calling matrix A and the argument B have compatible sizes. By default, the output matrix C will be resized appropriately.

      -

      By default, i.e., if the optional argument rebuild_sparsity_pattern is true, the sparsity pattern of the matrix C will be changed to ensure that all entries that result from the product $AB$ can be stored in $C$. This is an expensive operation, and if there is a way to predict the sparsity pattern up front, you should probably build it yourself before calling this function with false as last argument. In this case, the rebuilding of the sparsity pattern is bypassed.

      +

      By default, i.e., if the optional argument rebuild_sparsity_pattern is true, the sparsity pattern of the matrix C will be changed to ensure that all entries that result from the product $AB$ can be stored in $C$. This is an expensive operation, and if there is a way to predict the sparsity pattern up front, you should probably build it yourself before calling this function with false as last argument. In this case, the rebuilding of the sparsity pattern is bypassed.

      When setting rebuild_sparsity_pattern to true (i.e., leaving it at the default value), it is important to realize that the matrix C passed as first argument still has to be initialized with a sparsity pattern (either at the time of creation of the SparseMatrix object, or via the SparseMatrix::reinit() function). This is because we could create a sparsity pattern inside the current function, and then associate C with it, but there would be no way to transfer ownership of this sparsity pattern to anyone once the current function finishes. Consequently, the function requires that C be already associated with a sparsity pattern object, and this object is then reset to fit the product of A and B.

      As a consequence of this, however, it is also important to realize that the sparsity pattern of C is modified and that this would render invalid all other SparseMatrix objects that happen to also use that sparsity pattern object.

      @@ -2469,8 +2469,8 @@
      -

      Return the $l_1$-norm of the matrix, that is $|M|_1=\max_{\mathrm{all\
-columns\ }j}\sum_{\mathrm{all\ rows\ } i} |M_{ij}|$, (max. sum of columns). This is the natural matrix norm that is compatible to the $l_1$-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin- Hoffmann: Numerische Mathematik)

      +

      Return the $l_1$-norm of the matrix, that is $|M|_1=\max_{\mathrm{all\
+columns\ }j}\sum_{\mathrm{all\ rows\ } i} |M_{ij}|$, (max. sum of columns). This is the natural matrix norm that is compatible to the $l_1$-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin- Hoffmann: Numerische Mathematik)

      @@ -2498,8 +2498,8 @@
      -

      Return the $l_\infty$-norm of the matrix, that is $|M|_\infty=\max_{\mathrm{all\ rows\ }i}\sum_{\mathrm{all\ columns\ }j}
-|M_{ij}|$, (max. sum of rows). This is the natural matrix norm that is compatible to the $l_\infty$-norm of vectors, i.e. $|Mv|_\infty \leq
+<p>Return the <picture><source srcset=$l_\infty$-norm of the matrix, that is $|M|_\infty=\max_{\mathrm{all\ rows\ }i}\sum_{\mathrm{all\ columns\ }j}
+|M_{ij}|$, (max. sum of rows). This is the natural matrix norm that is compatible to the $l_\infty$-norm of vectors, i.e. $|Mv|_\infty \leq
 |M|_\infty |v|_\infty$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

      /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSparseMIC.html differs (JavaScript source, ASCII text, with very long lines (670)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSparseMIC.html 2023-11-25 15:25:58.663248397 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSparseMIC.html 2023-11-25 15:25:58.663248397 +0100 @@ -425,8 +425,8 @@
      template<typename number>
      class SparseMIC< number >

      Implementation of the Modified Incomplete Cholesky (MIC(0)) preconditioner for symmetric matrices. This class conforms to the state and usage specification in SparseLUDecomposition.

      The decomposition

      -

      Let a symmetric, positive-definite, sparse matrix $A$ be in the form $A = D
-- L - L^T$, where $D$ is the diagonal part of $A$ and $-L$ is a strictly lower triangular matrix. The MIC(0) decomposition of the matrix $A$ is defined by $B = (X-L)X^{-1}(X-L^T)$, where $X$ is a diagonal matrix defined by the condition $\text{rowsum}(A) = \text{rowsum}(B)$.

      +

      Let a symmetric, positive-definite, sparse matrix $A$ be in the form $A = D
+- L - L^T$, where $D$ is the diagonal part of $A$ and $-L$ is a strictly lower triangular matrix. The MIC(0) decomposition of the matrix $A$ is defined by $B = (X-L)X^{-1}(X-L^T)$, where $X$ is a diagonal matrix defined by the condition $\text{rowsum}(A) = \text{rowsum}(B)$.

      Definition at line 46 of file sparse_mic.h.

      Member Typedef Documentation

      @@ -2152,7 +2152,7 @@
      -

      Symmetrize the matrix by forming the mean value between the existing matrix and its transpose, $A = \frac 12(A+A^T)$.

      +

      Symmetrize the matrix by forming the mean value between the existing matrix and its transpose, $A = \frac 12(A+A^T)$.

      This operation assumes that the underlying sparsity pattern represents a symmetric object. If this is not the case, then the result of this operation will not be a symmetric matrix, since it only explicitly symmetrizes by looping over the lower left triangular part for efficiency reasons; if there are entries in the upper right triangle, then these elements are missed in the symmetrization. Symmetrization of the sparsity pattern can be obtain by SparsityPattern::symmetrize().

      @@ -2447,7 +2447,7 @@
      -

      Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

      +

      Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

      Obviously, the matrix needs to be quadratic for this operation, and for the result to actually be a norm it also needs to be either real symmetric or complex hermitian.

      The underlying template types of both this matrix and the given vector should either both be real or complex-valued, but not mixed, for this function to make sense.

      Note
      If deal.II is configured with threads, this operation will run multi-threaded by splitting the work into smaller chunks (assuming there is enough work to make this worthwhile).
      @@ -2489,7 +2489,7 @@
      -

      Compute the matrix scalar product $\left(u,Mv\right)$.

      +

      Compute the matrix scalar product $\left(u,Mv\right)$.

      Note
      If deal.II is configured with threads, this operation will run multi-threaded by splitting the work into smaller chunks (assuming there is enough work to make this worthwhile).
      @@ -2584,7 +2584,7 @@

      Perform the matrix-matrix multiplication C = A * B, or, if an optional vector argument is given, C = A * diag(V) * B, where diag(V) defines a diagonal matrix with the vector entries.

      This function assumes that the calling matrix A and the argument B have compatible sizes. By default, the output matrix C will be resized appropriately.

      -

      By default, i.e., if the optional argument rebuild_sparsity_pattern is true, the sparsity pattern of the matrix C will be changed to ensure that all entries that result from the product $AB$ can be stored in $C$. This is an expensive operation, and if there is a way to predict the sparsity pattern up front, you should probably build it yourself before calling this function with false as last argument. In this case, the rebuilding of the sparsity pattern is bypassed.

      +

      By default, i.e., if the optional argument rebuild_sparsity_pattern is true, the sparsity pattern of the matrix C will be changed to ensure that all entries that result from the product $AB$ can be stored in $C$. This is an expensive operation, and if there is a way to predict the sparsity pattern up front, you should probably build it yourself before calling this function with false as last argument. In this case, the rebuilding of the sparsity pattern is bypassed.

      When setting rebuild_sparsity_pattern to true (i.e., leaving it at the default value), it is important to realize that the matrix C passed as first argument still has to be initialized with a sparsity pattern (either at the time of creation of the SparseMatrix object, or via the SparseMatrix::reinit() function). This is because we could create a sparsity pattern inside the current function, and then associate C with it, but there would be no way to transfer ownership of this sparsity pattern to anyone once the current function finishes. Consequently, the function requires that C be already associated with a sparsity pattern object, and this object is then reset to fit the product of A and B.

      As a consequence of this, however, it is also important to realize that the sparsity pattern of C is modified and that this would render invalid all other SparseMatrix objects that happen to also use that sparsity pattern object.

      @@ -2664,8 +2664,8 @@
      -

      Return the $l_1$-norm of the matrix, that is $|M|_1=\max_{\mathrm{all\
-columns\ }j}\sum_{\mathrm{all\ rows\ } i} |M_{ij}|$, (max. sum of columns). This is the natural matrix norm that is compatible to the $l_1$-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin- Hoffmann: Numerische Mathematik)

      +

      Return the $l_1$-norm of the matrix, that is $|M|_1=\max_{\mathrm{all\
+columns\ }j}\sum_{\mathrm{all\ rows\ } i} |M_{ij}|$, (max. sum of columns). This is the natural matrix norm that is compatible to the $l_1$-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin- Hoffmann: Numerische Mathematik)

      @@ -2693,8 +2693,8 @@
      -

      Return the $l_\infty$-norm of the matrix, that is $|M|_\infty=\max_{\mathrm{all\ rows\ }i}\sum_{\mathrm{all\ columns\ }j}
-|M_{ij}|$, (max. sum of rows). This is the natural matrix norm that is compatible to the $l_\infty$-norm of vectors, i.e. $|Mv|_\infty \leq
+<p>Return the <picture><source srcset=$l_\infty$-norm of the matrix, that is $|M|_\infty=\max_{\mathrm{all\ rows\ }i}\sum_{\mathrm{all\ columns\ }j}
+|M_{ij}|$, (max. sum of rows). This is the natural matrix norm that is compatible to the $l_\infty$-norm of vectors, i.e. $|Mv|_\infty \leq
 |M|_\infty |v|_\infty$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

      /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSparseMatrix.html differs (JavaScript source, ASCII text, with very long lines (679)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSparseMatrix.html 2023-11-25 15:25:58.693247785 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSparseMatrix.html 2023-11-25 15:25:58.693247785 +0100 @@ -1561,7 +1561,7 @@
      -

      Symmetrize the matrix by forming the mean value between the existing matrix and its transpose, $A = \frac 12(A+A^T)$.

      +

      Symmetrize the matrix by forming the mean value between the existing matrix and its transpose, $A = \frac 12(A+A^T)$.

      This operation assumes that the underlying sparsity pattern represents a symmetric object. If this is not the case, then the result of this operation will not be a symmetric matrix, since it only explicitly symmetrizes by looping over the lower left triangular part for efficiency reasons; if there are entries in the upper right triangle, then these elements are missed in the symmetrization. Symmetrization of the sparsity pattern can be obtain by SparsityPattern::symmetrize().

      @@ -1997,7 +1997,7 @@
      -

      Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

      +

      Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

      Obviously, the matrix needs to be quadratic for this operation, and for the result to actually be a norm it also needs to be either real symmetric or complex hermitian.

      The underlying template types of both this matrix and the given vector should either both be real or complex-valued, but not mixed, for this function to make sense.

      Note
      If deal.II is configured with threads, this operation will run multi-threaded by splitting the work into smaller chunks (assuming there is enough work to make this worthwhile).
      @@ -2031,7 +2031,7 @@
      -

      Compute the matrix scalar product $\left(u,Mv\right)$.

      +

      Compute the matrix scalar product $\left(u,Mv\right)$.

      Note
      If deal.II is configured with threads, this operation will run multi-threaded by splitting the work into smaller chunks (assuming there is enough work to make this worthwhile).
      @@ -2110,7 +2110,7 @@

      Perform the matrix-matrix multiplication C = A * B, or, if an optional vector argument is given, C = A * diag(V) * B, where diag(V) defines a diagonal matrix with the vector entries.

      This function assumes that the calling matrix A and the argument B have compatible sizes. By default, the output matrix C will be resized appropriately.

      -

      By default, i.e., if the optional argument rebuild_sparsity_pattern is true, the sparsity pattern of the matrix C will be changed to ensure that all entries that result from the product $AB$ can be stored in $C$. This is an expensive operation, and if there is a way to predict the sparsity pattern up front, you should probably build it yourself before calling this function with false as last argument. In this case, the rebuilding of the sparsity pattern is bypassed.

      +

      By default, i.e., if the optional argument rebuild_sparsity_pattern is true, the sparsity pattern of the matrix C will be changed to ensure that all entries that result from the product $AB$ can be stored in $C$. This is an expensive operation, and if there is a way to predict the sparsity pattern up front, you should probably build it yourself before calling this function with false as last argument. In this case, the rebuilding of the sparsity pattern is bypassed.

      When setting rebuild_sparsity_pattern to true (i.e., leaving it at the default value), it is important to realize that the matrix C passed as first argument still has to be initialized with a sparsity pattern (either at the time of creation of the SparseMatrix object, or via the SparseMatrix::reinit() function). This is because we could create a sparsity pattern inside the current function, and then associate C with it, but there would be no way to transfer ownership of this sparsity pattern to anyone once the current function finishes. Consequently, the function requires that C be already associated with a sparsity pattern object, and this object is then reset to fit the product of A and B.

      As a consequence of this, however, it is also important to realize that the sparsity pattern of C is modified and that this would render invalid all other SparseMatrix objects that happen to also use that sparsity pattern object.

      @@ -2174,8 +2174,8 @@
      -

      Return the $l_1$-norm of the matrix, that is $|M|_1=\max_{\mathrm{all\
-columns\ }j}\sum_{\mathrm{all\ rows\ } i} |M_{ij}|$, (max. sum of columns). This is the natural matrix norm that is compatible to the $l_1$-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin- Hoffmann: Numerische Mathematik)

      +

      Return the $l_1$-norm of the matrix, that is $|M|_1=\max_{\mathrm{all\
+columns\ }j}\sum_{\mathrm{all\ rows\ } i} |M_{ij}|$, (max. sum of columns). This is the natural matrix norm that is compatible to the $l_1$-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin- Hoffmann: Numerische Mathematik)

      @@ -2195,8 +2195,8 @@
      -

      Return the $l_\infty$-norm of the matrix, that is $|M|_\infty=\max_{\mathrm{all\ rows\ }i}\sum_{\mathrm{all\ columns\ }j}
-|M_{ij}|$, (max. sum of rows). This is the natural matrix norm that is compatible to the $l_\infty$-norm of vectors, i.e. $|Mv|_\infty \leq
+<p>Return the <picture><source srcset=$l_\infty$-norm of the matrix, that is $|M|_\infty=\max_{\mathrm{all\ rows\ }i}\sum_{\mathrm{all\ columns\ }j}
+|M_{ij}|$, (max. sum of rows). This is the natural matrix norm that is compatible to the $l_\infty$-norm of vectors, i.e. $|Mv|_\infty \leq
 |M|_\infty |v|_\infty$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

      /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSparseMatrixEZ.html differs (JavaScript source, ASCII text, with very long lines (792)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSparseMatrixEZ.html 2023-11-25 15:25:58.716580645 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSparseMatrixEZ.html 2023-11-25 15:25:58.716580645 +0100 @@ -1345,7 +1345,7 @@
      -

      Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

      +

      Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

      @@ -1376,7 +1376,7 @@
      -

      Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult but takes the transposed matrix.

      +

      Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult but takes the transposed matrix.

      @@ -1407,7 +1407,7 @@
      -

      Adding Matrix-vector multiplication. Add $M*src$ on $dst$ with $M$ being this matrix.

      +

      Adding Matrix-vector multiplication. Add $M*src$ on $dst$ with $M$ being this matrix.

      @@ -1438,7 +1438,7 @@
      -

      Adding Matrix-vector multiplication. Add $M^T*src$ to $dst$ with $M$ being this matrix. This function does the same as vmult_add but takes the transposed matrix.

      +

      Adding Matrix-vector multiplication. Add $M^T*src$ to $dst$ with $M$ being this matrix. This function does the same as vmult_add but takes the transposed matrix.

      @@ -1575,7 +1575,7 @@
      -

      Apply SOR preconditioning matrix to src. The result of this method is $dst = (om D - L)^{-1} src$.

      +

      Apply SOR preconditioning matrix to src. The result of this method is $dst = (om D - L)^{-1} src$.

      @@ -1612,7 +1612,7 @@
      -

      Apply transpose SOR preconditioning matrix to src. The result of this method is $dst = (om D - U)^{-1} src$.

      +

      Apply transpose SOR preconditioning matrix to src. The result of this method is $dst = (om D - U)^{-1} src$.

      @@ -1657,7 +1657,7 @@
      -

      Add the matrix A conjugated by B, that is, $B A B^T$ to this object. If the parameter transpose is true, compute $B^T A B$.

      +

      Add the matrix A conjugated by B, that is, $B A B^T$ to this object. If the parameter transpose is true, compute $B^T A B$.

      This function requires that B has a const_iterator traversing all matrix entries and that A has a function el(i,j) for access to a specific entry.

      Definition at line 1461 of file sparse_matrix_ez.h.

      /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSparseMatrixIterators_1_1Iterator.html differs (JavaScript source, ASCII text, with very long lines (1282)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSparseMatrixIterators_1_1Iterator.html 2023-11-25 15:25:58.733246970 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSparseMatrixIterators_1_1Iterator.html 2023-11-25 15:25:58.733246970 +0100 @@ -141,7 +141,7 @@

      The typical use for these iterators is to iterate over the elements of a sparse matrix or over the elements of individual rows. Note that there is no guarantee that the elements of a row are actually traversed in an order in which columns monotonically increase. See the documentation of the SparsityPattern class for more information.

      The first template argument denotes the underlying numeric type, the second the constness of the matrix.

      Since there is a specialization of this class for Constness=false, this class is for iterators to constant matrices.

      -
      Note
      This class operates directly on the internal data structures of the SparsityPattern and SparseMatrix classes. As a consequence, some operations are cheap and some are not. In particular, it is cheap to access the column index and the value of an entry pointed to. On the other hand, it is expensive to access the row index (this requires $O(\log(N))$ operations for a matrix with $N$ row). As a consequence, when you design algorithms that use these iterators, it is common practice to not loop over all elements of a sparse matrix at once, but to have an outer loop over all rows and within this loop iterate over the elements of this row. This way, you only ever need to dereference the iterator to obtain the column indices and values whereas the (expensive) lookup of the row index can be avoided by using the loop index instead.
      +
      Note
      This class operates directly on the internal data structures of the SparsityPattern and SparseMatrix classes. As a consequence, some operations are cheap and some are not. In particular, it is cheap to access the column index and the value of an entry pointed to. On the other hand, it is expensive to access the row index (this requires $O(\log(N))$ operations for a matrix with $N$ row). As a consequence, when you design algorithms that use these iterators, it is common practice to not loop over all elements of a sparse matrix at once, but to have an outer loop over all rows and within this loop iterate over the elements of this row. This way, you only ever need to dereference the iterator to obtain the column indices and values whereas the (expensive) lookup of the row index can be avoided by using the loop index instead.

      Definition at line 347 of file sparse_matrix.h.

      Member Typedef Documentation

      /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSparsityPattern.html differs (JavaScript source, ASCII text, with very long lines (1189)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSparsityPattern.html 2023-11-25 15:25:58.756579831 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSparsityPattern.html 2023-11-25 15:25:58.756579831 +0100 @@ -1282,7 +1282,7 @@
      -

      Compute the bandwidth of the matrix represented by this structure. The bandwidth is the maximum of $|i-j|$ for which the index pair $(i,j)$ represents a nonzero entry of the matrix. Consequently, the maximum bandwidth a $n\times m$ matrix can have is $\max\{n-1,m-1\}$, a diagonal matrix has bandwidth 0, and there are at most $2*q+1$ entries per row if the bandwidth is $q$. The returned quantity is sometimes called "half +

      Compute the bandwidth of the matrix represented by this structure. The bandwidth is the maximum of $|i-j|$ for which the index pair $(i,j)$ represents a nonzero entry of the matrix. Consequently, the maximum bandwidth a $n\times m$ matrix can have is $\max\{n-1,m-1\}$, a diagonal matrix has bandwidth 0, and there are at most $2*q+1$ entries per row if the bandwidth is $q$. The returned quantity is sometimes called "half bandwidth" in the literature.

      Definition at line 674 of file sparsity_pattern.cc.

      /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSparsityPatternIterators_1_1Iterator.html differs (JavaScript source, ASCII text, with very long lines (1211)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSparsityPatternIterators_1_1Iterator.html 2023-11-25 15:25:58.769912893 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSparsityPatternIterators_1_1Iterator.html 2023-11-25 15:25:58.769912893 +0100 @@ -158,7 +158,7 @@

      Detailed Description

      An iterator class for walking over the elements of a sparsity pattern.

      The typical use for these iterators is to iterate over the elements of a sparsity pattern (or, since they also serve as the basis for iterating over the elements of an associated matrix, over the elements of a sparse matrix), or over the elements of individual rows. There is no guarantee that the elements of a row are actually traversed in an order in which column numbers monotonically increase. See the documentation of the SparsityPattern class for more information.

      -
      Note
      This class operates directly on the internal data structures of the SparsityPattern class. As a consequence, some operations are cheap and some are not. In particular, it is cheap to access the column index of the sparsity pattern entry pointed to. On the other hand, it is expensive to access the row index (this requires $O(\log(N))$ operations for a matrix with $N$ row). As a consequence, when you design algorithms that use these iterators, it is common practice to not loop over all elements of a sparsity pattern at once, but to have an outer loop over all rows and within this loop iterate over the elements of this row. This way, you only ever need to dereference the iterator to obtain the column indices whereas the (expensive) lookup of the row index can be avoided by using the loop index instead.
      +
      Note
      This class operates directly on the internal data structures of the SparsityPattern class. As a consequence, some operations are cheap and some are not. In particular, it is cheap to access the column index of the sparsity pattern entry pointed to. On the other hand, it is expensive to access the row index (this requires $O(\log(N))$ operations for a matrix with $N$ row). As a consequence, when you design algorithms that use these iterators, it is common practice to not loop over all elements of a sparsity pattern at once, but to have an outer loop over all rows and within this loop iterate over the elements of this row. This way, you only ever need to dereference the iterator to obtain the column indices whereas the (expensive) lookup of the row index can be avoided by using the loop index instead.

      Definition at line 280 of file sparsity_pattern.h.

      Member Typedef Documentation

      /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSphericalManifold.html differs (JavaScript source, ASCII text, with very long lines (1341)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSphericalManifold.html 2023-11-25 15:25:58.789912484 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSphericalManifold.html 2023-11-25 15:25:58.789912484 +0100 @@ -217,20 +217,20 @@ class SphericalManifold< dim, spacedim >

      Manifold description for a spherical space coordinate system.

      You can use this Manifold object to describe any sphere, circle, hypersphere or hyperdisc in two or three dimensions. This manifold can be used as a co-dimension one manifold descriptor of a spherical surface embedded in a higher dimensional space, or as a co-dimension zero manifold descriptor for a body with positive volume, provided that the center of the spherical space is excluded from the domain. An example for the use of this function would be in the description of a hyper-shell or hyper-ball geometry, for example after creating a coarse mesh using GridGenerator::hyper_ball(). (However, it is worth mentioning that generating a good mesh for a disk or ball is complicated and requires addition steps. See the "Possibilities for extensions" section of step-6 for an extensive discussion of how one would construct such meshes and what one needs to do for it.)

      The two template arguments match the meaning of the two template arguments in Triangulation<dim, spacedim>, however this Manifold can be used to describe both thin and thick objects, and the behavior is identical when dim <= spacedim, i.e., the functionality of SphericalManifold<2,3> is identical to SphericalManifold<3,3>.

      -

      While PolarManifold reflects the usual notion of polar coordinates, it may not be suitable for domains that contain either the north or south poles. Consider for instance the pair of points $x_1=(1,\pi/3,0)$ and $x_2=(1,\pi/3,\pi)$ in polar coordinates (lying on the surface of a sphere with radius one, on a parallel at height $\pi/3$). In this case connecting the points with a straight line in polar coordinates would take the long road around the globe, without passing through the north pole.

      +

      While PolarManifold reflects the usual notion of polar coordinates, it may not be suitable for domains that contain either the north or south poles. Consider for instance the pair of points $x_1=(1,\pi/3,0)$ and $x_2=(1,\pi/3,\pi)$ in polar coordinates (lying on the surface of a sphere with radius one, on a parallel at height $\pi/3$). In this case connecting the points with a straight line in polar coordinates would take the long road around the globe, without passing through the north pole.

      These two points would be connected (using a PolarManifold) by the curve

      -\begin{align*}
+<picture><source srcset=\begin{align*}
   s: [0,1]  & \rightarrow &  \mathbb S^3 \\
           t & \mapsto     &  (1,\pi/3,0) + (0,0,t\pi)
-\end{align*} +\end{align*}" src="form_1449.png"/>

      This curve is not a geodesic on the sphere, and it is not how we would connect those two points. A better curve, would be the one passing through the North pole:

      -\[
+<picture><source srcset=\[
  s(t) = x_1 \cos(\alpha(t)) + \kappa \times x_1 \sin(\alpha(t)) +
  \kappa ( \kappa \cdot x_1) (1-\cos(\alpha(t))).
-\] +\]" src="form_1450.png"/>

      -

      where $\kappa = \frac{x_1 \times x_2}{\Vert x_1 \times x_2 \Vert}$ and $\alpha(t) = t \cdot \arccos(x_1 \cdot x_2)$ for $t\in[0,1]$. Indeed, this is a geodesic, and it is the natural choice when connecting points on the surface of the sphere. In the examples above, the PolarManifold class implements the first way of connecting two points on the surface of a sphere, while SphericalManifold implements the second way, i.e., this Manifold connects points using geodesics. If more than two points are involved through a SphericalManifold::get_new_points() call, a so-called spherical average is used where the final point minimizes the weighted distance to all other points via geodesics.

      +

      where $\kappa = \frac{x_1 \times x_2}{\Vert x_1 \times x_2 \Vert}$ and $\alpha(t) = t \cdot \arccos(x_1 \cdot x_2)$ for $t\in[0,1]$. Indeed, this is a geodesic, and it is the natural choice when connecting points on the surface of the sphere. In the examples above, the PolarManifold class implements the first way of connecting two points on the surface of a sphere, while SphericalManifold implements the second way, i.e., this Manifold connects points using geodesics. If more than two points are involved through a SphericalManifold::get_new_points() call, a so-called spherical average is used where the final point minimizes the weighted distance to all other points via geodesics.

      In particular, this class implements a Manifold that joins any two points in space by first projecting them onto the surface of a sphere with unit radius, then connecting them with a geodesic, and finally rescaling the final radius so that the resulting one is the weighted average of the starting radii. This Manifold is identical to PolarManifold in dimension two, while for dimension three it returns points that are more uniformly distributed on the sphere, and it is invariant with respect to rotations of the coordinate system, therefore avoiding the problems that PolarManifold has at the poles. Notice, in particular, that computing tangent vectors at the poles with a PolarManifold is not well defined, while it is perfectly fine with this class.

      For mathematical reasons, it is impossible to construct a unique map of a sphere using only geodesic curves, and therefore, using this class with MappingManifold is discouraged. If you use this Manifold to describe the geometry of a sphere, you should use MappingQ as the underlying mapping, and not MappingManifold.

      This Manifold can be used only on geometries where a ball with finite radius is removed from the center. Indeed, the center is a singular point for this manifold, and if you try to connect two points across the center, they would travel on spherical coordinates, avoiding the center.

      /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSymmetricTensor.html differs (JavaScript source, ASCII text, with very long lines (743)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSymmetricTensor.html 2023-11-25 15:25:58.823245141 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classSymmetricTensor.html 2023-11-25 15:25:58.823245141 +0100 @@ -299,7 +299,7 @@ std::ostream &&#href_anchor"memTemplItemRight" valign="bottom">operator<< (std::ostream &out, const SymmetricTensor< 4, dim, Number > &t) &#href_anchor"details" id="details">

      Detailed Description

      template<int rank_, int dim, typename Number>
      -class SymmetricTensor< rank_, dim, Number >

      Provide a class that stores symmetric tensors of rank 2,4,... efficiently, i.e. only store those off-diagonal elements of the full tensor that are not redundant. For example, for symmetric $2\times 2$ tensors, this would be the elements 11, 22, and 12, while the element 21 is equal to the 12 element. Within this documentation, second order symmetric tensors are denoted as bold-faced upper-case Latin letters such as $\mathbf A, \mathbf B, \dots$ or bold-faced Greek letters such as $\boldsymbol{\varepsilon}$, $\boldsymbol{\sigma}$. The Cartesian coordinates of a second-order tensor such as $\mathbf A$ are represented as $A_{ij}$ where $i,j$ are indices ranging from 0 to dim-1.

      +class SymmetricTensor< rank_, dim, Number >

      Provide a class that stores symmetric tensors of rank 2,4,... efficiently, i.e. only store those off-diagonal elements of the full tensor that are not redundant. For example, for symmetric $2\times 2$ tensors, this would be the elements 11, 22, and 12, while the element 21 is equal to the 12 element. Within this documentation, second order symmetric tensors are denoted as bold-faced upper-case Latin letters such as $\mathbf A, \mathbf B, \dots$ or bold-faced Greek letters such as $\boldsymbol{\varepsilon}$, $\boldsymbol{\sigma}$. The Cartesian coordinates of a second-order tensor such as $\mathbf A$ are represented as $A_{ij}$ where $i,j$ are indices ranging from 0 to dim-1.

      Using this class for symmetric tensors of rank 2 has advantages over matrices in many cases since the dimension is known to the compiler as well as the location of the data. It is therefore possible to produce far more efficient code than for matrices with runtime-dependent dimension. It is also more efficient than using the more general Tensor class, since fewer elements are stored, and the class automatically makes sure that the tensor represents a symmetric object.

      For tensors of higher rank, the savings in storage are even higher. For example for the $3 \times 3 \times 3 \times 3$ tensors of rank 4, only 36 instead of the full 81 entries have to be stored. These rank 4 tensors are denoted by blackboard-style upper-case Latin letters such as $\mathbb A$ with components $\mathcal{A}_{ijkl}$.

      While the definition of a symmetric rank-2 tensor is obvious, tensors of rank 4 are considered symmetric if they are operators mapping symmetric rank-2 tensors onto symmetric rank-2 tensors. This so-called minor symmetry of the rank 4 tensor requires that for every set of four indices $i, j, k, l$, the identity $\mathcal{C}_{ijkl} = \mathcal{C}_{jikl} =
@@ -625,7 +625,7 @@
   </tr>
 </table>
 </div><div class= -

      This operator assigns a scalar to a tensor. To avoid confusion with what exactly it means to assign a scalar value to a tensor, zero is the only value allowed for d, allowing the intuitive notation $\mathbf A = 0$ to reset all elements of the tensor to zero.

      +

      This operator assigns a scalar to a tensor. To avoid confusion with what exactly it means to assign a scalar value to a tensor, zero is the only value allowed for d, allowing the intuitive notation $\mathbf A = 0$ to reset all elements of the tensor to zero.

      @@ -887,8 +887,8 @@
      -

      Double contraction product between the present symmetric tensor and a tensor of rank 2. For example, if the present object is the symmetric rank-2 tensor $\mathbf{A}$ and it is multiplied by another symmetric rank-2 tensor $\mathbf{B}$, then the result is the scalar-product double contraction $\mathbf A : \mathbf B = \sum_{i,j} A_{ij} B_{ij}$. In this case, the return value evaluates to a single scalar. While it is possible to define other scalar products (and associated induced norms), this one seems to be the most appropriate one.

      -

      If the present object is a rank-4 tensor such as $\mathbb A$, then the result is a rank-2 tensor $\mathbf C = \mathbb A : \mathbf B$, i.e., the operation contracts over the last two indices of the present object and the indices of the argument, and the result is a tensor of rank 2 ( $C_{ij} = \sum_{k,l} \mathcal{A}_{ijkl} B_{kl}$).

      +

      Double contraction product between the present symmetric tensor and a tensor of rank 2. For example, if the present object is the symmetric rank-2 tensor $\mathbf{A}$ and it is multiplied by another symmetric rank-2 tensor $\mathbf{B}$, then the result is the scalar-product double contraction $\mathbf A : \mathbf B = \sum_{i,j} A_{ij} B_{ij}$. In this case, the return value evaluates to a single scalar. While it is possible to define other scalar products (and associated induced norms), this one seems to be the most appropriate one.

      +

      If the present object is a rank-4 tensor such as $\mathbb A$, then the result is a rank-2 tensor $\mathbf C = \mathbb A : \mathbf B$, i.e., the operation contracts over the last two indices of the present object and the indices of the argument, and the result is a tensor of rank 2 ( $C_{ij} = \sum_{k,l} \mathcal{A}_{ijkl} B_{kl}$).

      Note that the multiplication operator for symmetric tensors is defined to be a double contraction over two indices, while it is defined as a single contraction over only one index for regular Tensor objects. For symmetric tensors it therefore acts in a way that is commonly denoted by a "colon multiplication" in the mathematical literature (the two dots of the colon suggesting that it is a contraction over two indices), which corresponds to a scalar product between tensors.

      It is worth pointing out that this definition of operator* between symmetric tensors is different to how the (in general non-symmetric) Tensor class defines operator*, namely as the single-contraction product over the last index of the first operand and the first index of the second operand. For the double contraction of Tensor objects, you will need to use the double_contract() function.

      To maintain at least a modicum of resemblance between the interfaces of Tensor and SymmetricTensor, there are also global functions double_contract() for symmetric tensors that then do the same work as this operator. However, rather than returning the result as a return value, they write it into the first argument to the function in the same way as the corresponding functions for the Tensor class do things.

      @@ -1232,7 +1232,7 @@
      -

      The opposite of the previous function: given an index $i$ in the unrolled form of the tensor, return what set of indices $(k,l)$ (for rank-2 tensors) or $(k,l,m,n)$ (for rank-4 tensors) corresponds to it.

      +

      The opposite of the previous function: given an index $i$ in the unrolled form of the tensor, return what set of indices $(k,l)$ (for rank-2 tensors) or $(k,l,m,n)$ (for rank-4 tensors) corresponds to it.

      @@ -1470,7 +1470,7 @@
      -

      Return the fourth-order symmetric identity tensor $\mathbb S$ which maps symmetric second-order tensors, such as $\mathbf A$, to themselves.

      +

      Return the fourth-order symmetric identity tensor $\mathbb S$ which maps symmetric second-order tensors, such as $\mathbf A$, to themselves.

      \[
   \mathbb S : \mathbf A = \mathbf A
 \] @@ -1924,7 +1924,7 @@

      -

      Compute the second invariant of a tensor of rank 2. The second invariant of a tensor $\mathbf A$ is defined as $I_2 (\mathbf A) = II(\mathbf A) = \frac 12
+<p>Compute the second invariant of a tensor of rank 2. The second invariant of a tensor <picture><source srcset=$\mathbf A$ is defined as $I_2 (\mathbf A) = II(\mathbf A) = \frac 12
 \left[ (\text{tr} \mathbf A)^2 - \text{tr} (\mathbf{A}^2) \right]$.

      For the kind of arguments to this function, i.e., a rank-2 tensor of size 1, the result is simply zero.

      @@ -1956,11 +1956,11 @@
      -

      Compute the second invariant of a tensor of rank 2. The second invariant of a tensor $\mathbf A$ is defined as $I_2 (\mathbf A) = II(\mathbf A) = \frac 12
+<p>Compute the second invariant of a tensor of rank 2. The second invariant of a tensor <picture><source srcset=$\mathbf A$ is defined as $I_2 (\mathbf A) = II(\mathbf A) = \frac 12
 \left[ (\text{tr} \mathbf A)^2 - \text{tr} (\mathbf{A}^2) \right]$.

      For the kind of arguments to this function, i.e., a symmetric rank-2 tensor of size 2, the result is (counting indices starting at one) $I_2(\mathbf A) = II(\mathbf A) = \frac 12
   \left[ (A_{11} + A_{22})^2 - (A_{11}^2+2 A_{12}^2+ A_{22}^2) \right]
-  = A_{11} A_{22} - A_{12}^2$. As expected, for the $2\times 2$ symmetric tensors this function handles, this equals the determinant of the tensor. (This is so because for $2\times 2$ symmetric tensors, there really are only two invariants, so the second and third invariant are the same; the determinant is the third invariant.)

      + = A_{11} A_{22} - A_{12}^2$" src="form_810.png"/>. As expected, for the $2\times 2$ symmetric tensors this function handles, this equals the determinant of the tensor. (This is so because for $2\times 2$ symmetric tensors, there really are only two invariants, so the second and third invariant are the same; the determinant is the third invariant.)

      Definition at line 2917 of file symmetric_tensor.h.

      @@ -1990,7 +1990,7 @@
      -

      Compute the second invariant of a tensor of rank 2. The second invariant of a tensor $\mathbf A$ is defined as $I_2 (\mathbf A) = II(\mathbf A) = \frac 12
+<p>Compute the second invariant of a tensor of rank 2. The second invariant of a tensor <picture><source srcset=$\mathbf A$ is defined as $I_2 (\mathbf A) = II(\mathbf A) = \frac 12
 \left[ (\text{tr} \mathbf A)^2 - \text{tr} (\mathbf{A}^2) \right]$.

      Definition at line 2934 of file symmetric_tensor.h.

      @@ -2049,8 +2049,8 @@
      -

      Return the eigenvalues of a symmetric $2\times 2$ tensor. The array of eigenvalues is sorted in descending order.

      -

      For $2\times 2$ tensors, the eigenvalues of tensor $\mathbf T$ are the roots of the characteristic polynomial $0 = \lambda^2
+<p>Return the eigenvalues of a symmetric <picture><source srcset=$2\times 2$ tensor. The array of eigenvalues is sorted in descending order.

      +

      For $2\times 2$ tensors, the eigenvalues of tensor $\mathbf T$ are the roots of the characteristic polynomial $0 = \lambda^2
 - \lambda\;\text{tr}\mathbf{T} + \det \mathbf{T}$ as given by $\lambda_1, \lambda_2 = \frac{1}{2} \left[ \text{tr} \mathbf{T} \pm
 \sqrt{(\text{tr} \mathbf{T})^2 - 4 \det \mathbf{T}} \right]$.

      Warning
      The algorithm employed here determines the eigenvalues by computing the roots of the characteristic polynomial. In the case that there exists a common root (the eigenvalues are equal), the computation is subject to round-off errors of order $\sqrt{\epsilon}$. As an alternative, the eigenvectors() function provides a more robust, but costly, method to compute the eigenvalues of a symmetric tensor.
      @@ -2662,7 +2662,7 @@
      -

      Compute the scalar product $\mathbf A: \mathbf B=\sum_{i,j} A_{ij}B_{ij}$ between two tensors $\mathbf A, \mathbf B$ of rank 2. In the current case where both arguments are symmetric tensors, this is equivalent to calling the expression A*B which uses SymmetricTensor::operator*().

      +

      Compute the scalar product $\mathbf A: \mathbf B=\sum_{i,j} A_{ij}B_{ij}$ between two tensors $\mathbf A, \mathbf B$ of rank 2. In the current case where both arguments are symmetric tensors, this is equivalent to calling the expression A*B which uses SymmetricTensor::operator*().

      Definition at line 3737 of file symmetric_tensor.h.

      @@ -2701,7 +2701,7 @@
      -

      Compute the scalar product $\mathbf A: \mathbf B=\sum_{i,j} A_{ij}B_{ij}$ between two tensors $\mathbf A, \mathbf B$ of rank 2. We don't use operator* for this operation since the product between two tensors is usually assumed to be the contraction over the last index of the first tensor and the first index of the second tensor. For example, if B is a Tensor, calling A*B (instead of scalar_product(A,B)) provides $(\mathbf A \cdot\mathbf B)_{ij}=\sum_k A_{ik}B_{kj}$.

      +

      Compute the scalar product $\mathbf A: \mathbf B=\sum_{i,j} A_{ij}B_{ij}$ between two tensors $\mathbf A, \mathbf B$ of rank 2. We don't use operator* for this operation since the product between two tensors is usually assumed to be the contraction over the last index of the first tensor and the first index of the second tensor. For example, if B is a Tensor, calling A*B (instead of scalar_product(A,B)) provides $(\mathbf A \cdot\mathbf B)_{ij}=\sum_k A_{ik}B_{kj}$.

      Definition at line 3759 of file symmetric_tensor.h.

      @@ -2740,7 +2740,7 @@
      -

      Compute the scalar product $\mathbf A:\mathbf B=\sum_{i,j} A_{ij}B_{ij}$ between two tensors $\mathbf A, \mathbf B$ of rank 2. We don't use operator* for this operation since the product between two tensors is usually assumed to be the contraction over the last index of the first tensor and the first index of the second tensor. For example, if A is a Tensor, calling A*B (instead of scalar_product(A,B)) provides $(\mathbf A \cdot\mathbf B)_{ij}=\sum_k A_{ik}B_{kj}$.

      +

      Compute the scalar product $\mathbf A:\mathbf B=\sum_{i,j} A_{ij}B_{ij}$ between two tensors $\mathbf A, \mathbf B$ of rank 2. We don't use operator* for this operation since the product between two tensors is usually assumed to be the contraction over the last index of the first tensor and the first index of the second tensor. For example, if A is a Tensor, calling A*B (instead of scalar_product(A,B)) provides $(\mathbf A \cdot\mathbf B)_{ij}=\sum_k A_{ik}B_{kj}$.

      Definition at line 3786 of file symmetric_tensor.h.

      @@ -3127,13 +3127,13 @@
      -

      The dot product (single contraction) for tensors: Return a tensor of rank $(\text{rank}_1 + \text{rank}_2 - 2)$ that is the contraction of the last index of a tensor src1 of rank rank_1 with the first index of a tensor src2 of rank rank_2:

      -\[
+<p>The dot product (single contraction) for tensors: Return a tensor of rank <picture><source srcset=$(\text{rank}_1 + \text{rank}_2 - 2)$ that is the contraction of the last index of a tensor src1 of rank rank_1 with the first index of a tensor src2 of rank rank_2:

      +\[
   \text{result}_{i_1,\ldots,i_{r1},j_1,\ldots,j_{r2}}
   = \sum_{k}
     \text{left}_{i_1,\ldots,i_{r1}, k}
     \text{right}_{k, j_1,\ldots,j_{r2}}
-\] +\]" src="form_827.png"/>

      Note
      As one operand is a Tensor, the multiplication operator only performs a contraction over a single pair of indices. This is in contrast to the multiplication operator for SymmetricTensor, which does the double contraction.
      @@ -3174,13 +3174,13 @@
      -

      The dot product (single contraction) for tensors: Return a tensor of rank $(\text{rank}_1 + \text{rank}_2 - 2)$ that is the contraction of the last index of a tensor src1 of rank rank_1 with the first index of a tensor src2 of rank rank_2:

      -\[
+<p>The dot product (single contraction) for tensors: Return a tensor of rank <picture><source srcset=$(\text{rank}_1 + \text{rank}_2 - 2)$ that is the contraction of the last index of a tensor src1 of rank rank_1 with the first index of a tensor src2 of rank rank_2:

      +\[
   \text{result}_{i_1,\ldots,i_{r1},j_1,\ldots,j_{r2}}
   = \sum_{k}
     \text{left}_{i_1,\ldots,i_{r1}, k}
     \text{right}_{k, j_1,\ldots,j_{r2}}
-\] +\]" src="form_827.png"/>

      Note
      As one operand is a Tensor, the multiplication operator only performs a contraction over a single pair of indices. This is in contrast to the multiplication operator for SymmetricTensor, which does the double contraction.
      @@ -3346,7 +3346,7 @@
      n_independent_components
      -

      An integer denoting the number of independent components that fully describe a symmetric tensor. In $d$ space dimensions, this number equals $\frac 12 (d^2+d)$ for symmetric tensors of rank 2.

      +

      An integer denoting the number of independent components that fully describe a symmetric tensor. In $d$ space dimensions, this number equals $\frac 12 (d^2+d)$ for symmetric tensors of rank 2.

      Definition at line 743 of file symmetric_tensor.h.

      /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classTableBase.html differs (JavaScript source, ASCII text, with very long lines (933)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classTableBase.html 2023-11-25 15:25:58.843244734 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/classTableBase.html 2023-11-25 15:25:58.843244734 +0100 @@ -231,7 +231,7 @@

      In some way, this class is similar to the Tensor class, in that it templatizes on the number of dimensions. However, there are two major differences. The first is that the Tensor class stores only numeric values (as doubles), while the Table class stores arbitrary objects. The second is that the Tensor class has fixed sizes in each dimension, also given as a template argument, while this class can handle arbitrary and different sizes in each dimension.

      This has two consequences. First, since the size is not known at compile time, it has to do explicit memory allocation. Second, the layout of individual elements is not known at compile time, so access is slower than for the Tensor class where the number of elements are their location is known at compile time and the compiler can optimize with this knowledge (for example when unrolling loops). On the other hand, this class is of course more flexible, for example when you want a two-dimensional table with the number of rows equal to the number of degrees of freedom on a cell, and the number of columns equal to the number of quadrature points. Both numbers may only be known at run-time, so a flexible table is needed here. Furthermore, you may want to store, say, the gradients of shape functions, so the data type is not a single scalar value, but a tensor itself.

      Dealing with large data sets

      -

      The Table classes (derived from this class) are frequently used to store large data tables. A modest example is given in step-53 where we store a $380 \times 220$ table of geographic elevation data for a region of Africa, and this data requires about 670 kB if memory; however, tables that store three- or more-dimensional data (say, information about the density, pressure, and temperature in the earth interior on a regular grid of (latitude, longitude, depth) points) can easily run into hundreds of megabytes or more. These tables are then often provided to classes such as InterpolatedTensorProductGridData or InterpolatedUniformGridData.

      +

      The Table classes (derived from this class) are frequently used to store large data tables. A modest example is given in step-53 where we store a $380 \times 220$ table of geographic elevation data for a region of Africa, and this data requires about 670 kB if memory; however, tables that store three- or more-dimensional data (say, information about the density, pressure, and temperature in the earth interior on a regular grid of (latitude, longitude, depth) points) can easily run into hundreds of megabytes or more. These tables are then often provided to classes such as InterpolatedTensorProductGridData or InterpolatedUniformGridData.

      If you need to load such tables on single-processor (or multi-threaded) jobs, then there is nothing you can do about the size of these tables: The table just has to fit into memory. But, if your program is parallelized via MPI, then a typical first implementation would create a table object on every process and fill it on every MPI process by reading the data from a file. This is inefficient from two perspectives:

      In order to evaluate such an expression in an application code, we have to access three different kinds of objects: a quadrature object that describes locations $\hat{\bf x}_q$ and weights $w_q$ of quadrature points on the reference cell; a finite element object that describes the gradients $\hat\nabla \varphi_i(\hat{\bf x}_q)$ of shape functions on the unit cell; and a mapping object that provides the Jacobian as well as its determinant. Dealing with all these objects would be cumbersome and error prone.

      On the other hand, these three kinds of objects almost always appear together, and it is in fact very rare for deal.II application codes to do anything with quadrature, finite element, or mapping objects besides using them together. For this reason, deal.II uses the FEValues abstraction combining information on the shape functions, the geometry of the actual mesh cell and a quadrature rule on a reference cell. Upon construction it takes one object of each of the three mentioned categories. Later, it can be "re-initialized" for a concrete grid cell and then provides mapped quadrature points and weights, mapped shape function values and derivatives as well as some properties of the transformation from the reference cell to the actual mesh cell.

      /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/group__auto__symb__diff.html differs (JavaScript source, ASCII text, with very long lines (4820)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/group__auto__symb__diff.html 2023-11-25 15:26:00.049886843 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/group__auto__symb__diff.html 2023-11-25 15:26:00.049886843 +0100 @@ -101,7 +101,7 @@ &#href_anchor"memitem:namespaceDifferentiation_1_1SD">namespace  Differentiation::SD &#href_anchor"details" id="details">

      Detailed Description

      A module dedicated to the implementation of functions and classes that relate to automatic and symbolic differentiation.

      -

      Below we provide a very brief introduction as to what automatic and symbolic differentiation are, what variations of these computational/numerical schemes exist, and how they are integrated within deal.II's framework. The purpose of all of these schemes is to automatically compute the derivative of functions, or approximations of it, in cases where one does not want to compute them by hand. Common examples are situations in the finite element context is where one wants to solve a nonlinear problem that is given by requiring that some residual $F(u,\nabla u)=0$ where $F$ is a complicated function that needs to be differentiated to apply Newton's method; and situations where one is given a parameter dependent problem ${\cal A}(q,u,\nabla u) = f$ and wants to form derivatives with regards to the parameters $q$, for example to optimize an output functional with regards to $q$, or for a sensitivity analysis with regards to $q$. One should think of $q$ as design parameters: say, the width or shape of a wing, the stiffness coefficients of a material chosen to build an object, the power sent to a device, the chemical composition of the gases sent to a burner. In all of these cases, one should think of $F$ and $\cal A$ as complicated and cumbersome to differentiate – at least when doing it by hand. A relatively simple case of a nonlinear problem that already highlights the tedium of computing derivatives by hand is shown in step-15. However, in reality, one might, for example, think about problems such as chemically reactive flows where the fluid equations have coefficients such as the density and viscosity that depend strongly and nonlinearly on the chemical composition, temperature, and pressure of the fluid at each point; and where the chemical species react with each other based on reaction coefficients that also depend nonlinearly and in complicated ways on the chemical composition, temperature, and pressure. In many cases, the exact formulas for all of these coefficients can take several lines to write out, may include exponentials and (harmonic or geometric) averages of several nonlinear terms, and/or may contain table lookup of and interpolation between data points. Just getting these terms right is difficult enough; computing derivatives of these terms is impractical in most applications and, in reality, impossible to get right. Higher derivatives are even more impossible to do without computer aid. Automatic or symbolic differentiation is a way out of this: One only has to implement the function that computes these coefficients in terms of their inputs only once, and gets the (correct!) derivatives without further coding effort (though at a non-negligible computational cost either at run time, compile time, or both).

      +

      Below we provide a very brief introduction as to what automatic and symbolic differentiation are, what variations of these computational/numerical schemes exist, and how they are integrated within deal.II's framework. The purpose of all of these schemes is to automatically compute the derivative of functions, or approximations of it, in cases where one does not want to compute them by hand. Common examples are situations in the finite element context is where one wants to solve a nonlinear problem that is given by requiring that some residual $F(u,\nabla u)=0$ where $F$ is a complicated function that needs to be differentiated to apply Newton's method; and situations where one is given a parameter dependent problem ${\cal A}(q,u,\nabla u) = f$ and wants to form derivatives with regards to the parameters $q$, for example to optimize an output functional with regards to $q$, or for a sensitivity analysis with regards to $q$. One should think of $q$ as design parameters: say, the width or shape of a wing, the stiffness coefficients of a material chosen to build an object, the power sent to a device, the chemical composition of the gases sent to a burner. In all of these cases, one should think of $F$ and $\cal A$ as complicated and cumbersome to differentiate – at least when doing it by hand. A relatively simple case of a nonlinear problem that already highlights the tedium of computing derivatives by hand is shown in step-15. However, in reality, one might, for example, think about problems such as chemically reactive flows where the fluid equations have coefficients such as the density and viscosity that depend strongly and nonlinearly on the chemical composition, temperature, and pressure of the fluid at each point; and where the chemical species react with each other based on reaction coefficients that also depend nonlinearly and in complicated ways on the chemical composition, temperature, and pressure. In many cases, the exact formulas for all of these coefficients can take several lines to write out, may include exponentials and (harmonic or geometric) averages of several nonlinear terms, and/or may contain table lookup of and interpolation between data points. Just getting these terms right is difficult enough; computing derivatives of these terms is impractical in most applications and, in reality, impossible to get right. Higher derivatives are even more impossible to do without computer aid. Automatic or symbolic differentiation is a way out of this: One only has to implement the function that computes these coefficients in terms of their inputs only once, and gets the (correct!) derivatives without further coding effort (though at a non-negligible computational cost either at run time, compile time, or both).

      Automatic differentiation

      Automatic differentiation (commonly also referred to as algorithmic differentiation), is a numerical method that can be used to "automatically" compute the first, and perhaps higher-order, derivatives of function(s) with respect to one or more input variables. Although this comes at a certain computational cost, the benefits to using such a tool may be significant. When used correctly the derivatives of often complicated functions can be computed to a very high accuracy. Although the exact accuracy achievable by these frameworks largely depends on their underlying mathematical formulation, some implementations compute with a precision on the order of machine accuracy. Note that this is different to classical numerical differentiation (using, for example, a finite difference approximation of a function by evaluating it at different points), which has an accuracy that depends on both the perturbation size as well as the chosen finite-difference scheme; the error of these methods is measurably larger than well-formulated automatic differentiation approaches.

      @@ -149,38 +149,38 @@
    13. reverse-mode (or reverse accumulation) auto-differentiation.
    14. As a point of interest, the optimal Jacobian accumulation, which performs a minimal set of computations, lies somewhere between these two limiting cases. Its computation for a general composite function remains an open problem in graph theory.

      -

      With the aid of the diagram below (it and some of the listed details courtesy of this Wikipedia article), let us think about the representation of the calculation of the function $f (\mathbf{x}) = \sin (x_{1}) + x_{1} x_{2}$ and its derivatives:

      -
      Forward mode automatic differentiation
      Forward mode automatic differentiation
      Reverse mode automatic differentiation
      Reverse mode automatic differentiation

      Specifically, we will briefly describe what forward and reverse auto-differentiation are. Note that in the diagram, along the edges of the graph in text are the directional derivative of function $w$ with respect to the $i$-th variable, represented by the notation $\dot{w} = \dfrac{d w}{d x_{i}}$. The specific computations used to render the function value and its directional derivatives for this example are tabulated in the source article. For a second illustrative example, we refer the interested reader to this article.

      -

      Consider first that any composite function $f(x)$, here represented as having two independent variables, can be dissected into a composition of its elementary functions

      -\[
+<p>With the aid of the diagram below (it and some of the listed details courtesy of this <a href=Wikipedia article), let us think about the representation of the calculation of the function $f (\mathbf{x}) = \sin (x_{1}) + x_{1} x_{2}$ and its derivatives:

      +
      Forward mode automatic differentiation
      Forward mode automatic differentiation
      Reverse mode automatic differentiation
      Reverse mode automatic differentiation

      Specifically, we will briefly describe what forward and reverse auto-differentiation are. Note that in the diagram, along the edges of the graph in text are the directional derivative of function $w$ with respect to the $i$-th variable, represented by the notation $\dot{w} = \dfrac{d w}{d x_{i}}$. The specific computations used to render the function value and its directional derivatives for this example are tabulated in the source article. For a second illustrative example, we refer the interested reader to this article.

      +

      Consider first that any composite function $f(x)$, here represented as having two independent variables, can be dissected into a composition of its elementary functions

      +\[
   f (\mathbf{x})
   = f_{0} \circ f_{1} \circ f_{2} \circ \ldots \circ f_{n} (\mathbf{x})
   \quad .
-\] +\]" src="form_10.png"/>

      -

      As was previously mentioned, if each of the primitive operations $f_{n}$ is smooth and differentiable, then the chain-rule can be universally employed to compute the total derivative of $f$, namely $\dfrac{d f(x)}{d \mathbf{x}}$. What distinguishes the "forward" from the "reverse" mode is how the chain-rule is evaluated, but ultimately both compute the total derivative

      -\[
+<p> As was previously mentioned, if each of the primitive operations <picture><source srcset=$f_{n}$ is smooth and differentiable, then the chain-rule can be universally employed to compute the total derivative of $f$, namely $\dfrac{d f(x)}{d \mathbf{x}}$. What distinguishes the "forward" from the "reverse" mode is how the chain-rule is evaluated, but ultimately both compute the total derivative

      +\[
   \dfrac{d f (\mathbf{x})}{d \mathbf{x}}
   = \dfrac{d f_{0}}{d f_{1}} \dfrac{d f_{1}}{d f_{2}} \dfrac{d f_{2}}{d f_{3}} \ldots \dfrac{d f_{n} (\mathbf{x})}{d \mathbf{x}}
   \quad .
-\] +\]" src="form_14.png"/>

      -

      In forward-mode, the chain-rule is computed naturally from the "inside out". The independent variables are therefore fixed, and each sub-function $f'_{i} \vert_{f'_{i+1}}$ is computed recursively and its result returned as inputs to the parent function. Encapsulating and fixing the order of operations using parentheses, this means that we compute

      -\[
+<p>In forward-mode, the chain-rule is computed naturally from the $f'_{i} \vert_{f'_{i+1}}$ is computed recursively and its result returned as inputs to the parent function. Encapsulating and fixing the order of operations using parentheses, this means that we compute

      +\[
   \dfrac{d f (\mathbf{x})}{d \mathbf{x}}
   = \dfrac{d f_{0}}{d f_{1}} \left( \dfrac{d f_{1}}{d f_{2}} \left(\dfrac{d f_{2}}{d f_{3}} \left(\ldots \left( \dfrac{d f_{n} (\mathbf{x})}{d \mathbf{x}} \right)\right)\right)\right)
   \quad .
-\] +\]" src="form_16.png"/>

      The computational complexity of a forward-sweep is proportional to that of the input function. However, for each directional derivative that is to be computed one sweep of the computational graph is required.

      In reverse-mode, the chain-rule is computed somewhat unnaturally from the "outside in". The values of the dependent variables first get computed and fixed, and then the preceding differential operations are evaluated and multiplied in succession with the previous results from left to right. Again, if we encapsulate and fix the order of operations using parentheses, this implies that the reverse calculation is performed by

      -\[
+<picture><source srcset=\[
 \dfrac{d f (\mathbf{x})}{d \mathbf{x}}
   = \left( \left( \left( \left( \left( \dfrac{d f_{0}}{d f_{1}} \right) \dfrac{d f_{1}}{d f_{2}} \right) \dfrac{d f_{2}}{d f_{3}} \right) \ldots \right) \dfrac{d f_{n} (\mathbf{x})}{d \mathbf{x}} \right)
   \quad .
-\] +\]" src="form_17.png"/>

      -

      The intermediate values $\dfrac{d f_{i-1}}{d f_{i}}$ are known as adjoints, which must be computed and stored as the computational graph is traversed. However, for each dependent scalar function one sweep of the computational graph renders all directional derivatives at once.

      +

      The intermediate values $\dfrac{d f_{i-1}}{d f_{i}}$ are known as adjoints, which must be computed and stored as the computational graph is traversed. However, for each dependent scalar function one sweep of the computational graph renders all directional derivatives at once.

      Overall, the efficiency of each mode is determined by the number of independent (input) variables and dependent (output) variables. If the outputs greatly exceed the inputs in number, then forward-mode can be shown to be more efficient than reverse-mode. The converse is true when the number of input variables greatly exceeds that of the output variables. This point may be used to help inform which number type is most suitable for which set of operations are to be performed using automatic differentiation. For example, in many applications for which second derivatives are to be computed it is appropriate to combine both reverse- and forward-modes. The former would then typically be used to calculate the first derivatives, and the latter the second derivatives.

      Supported automatic differentiation libraries

      @@ -328,7 +328,7 @@

      Symbolic expressions and differentiation

      Symbolic differentiation is, in terms of its design and usage, quite different to automatic differentiation. Underlying any symbolic library is a computer algebra system (CAS) that implements a language and collection of algorithms to manipulate symbolic (or "string-like") expressions. This is most similar, from a philosophical point of view, to how algebraic operations would be performed by hand.

      -

      To help better distinguish between symbolic differentiation and numerical methods like automatic differentiation, let's consider a very simple example. Suppose that the function $f(x,y) = [2x+1]^{y}$, where $x$ and $y$ are variables that are independent of one another. By applying the chain-rule, the derivatives of this function are simply $\dfrac{d f(x,y)}{d x} = 2y[2x+1]^{y-1}$ and $\dfrac{d f(x,y)}{d y} = [2x+1]^{y} \ln(2x+1)$. These are exactly the results that you get from a CAS after defining the symbolic variables x and y, defining the symbolic expression f = pow(2x+1, y) and computing the derivatives diff(f, x) and diff(f, y). At this point there is no assumption of what x and y represent; they may later be interpreted as plain (scalar) numbers, complex numbers, or something else for which the power and natural logarithm functions are well defined. Obviously this means that there is also no assumption about which point to evaluate either the expression or its derivatives. One could readily take the expression for $\dfrac{d f(x, y)}{d x}$ and evaluate it at $x=1, y=2.5$ and then later, with no recomputation of the derivative expression itself, evaluate it at $x=3.25, y=-6$. In fact, the interpretation of any symbolic variable or expression, and the inter-dependencies between variables, may be defined or redefined at any point during their manipulation; this leads to a degree of flexibility in computations that cannot be matched by auto-differentiation. For example, one could perform the permanent substitution $g(x) = \dfrac{d f(x, y)}{d x} \vert_{y=1}$ and then recompute $g(x)$ for several different values of $x$. One could also post-factum express an interdependency between x and y, such as $y \rightarrow y(x) := 2x$. For such a case, this means that the initially computed derivatives $\dfrac{d f(x, y)}{d x} \rightarrow \dfrac{\partial f(x, y(x))}{\partial x} = 2y(x) [2x+1]^{y(x)-1} = 4x[2x+1]^{2x-1}$ and $\dfrac{d f(x, y)}{d y} \rightarrow \dfrac{\partial f(x, y(x))}{\partial y} = [2x+1]^{y(x)} \ln(2x+1) = [2x+1]^{2x} \ln(2x+1)$ truly represent partial derivatives rather than total derivatives. Of course, if such an inter-dependency was explicitly defined before the derivatives $\dfrac{d f(x, y(x))}{d x}$ and $\dfrac{d f(x, y(x))}{d y}$ are computed, then this could correspond to the total derivative (which is the only result that auto-differentiation is able to achieve for this example).

      +

      To help better distinguish between symbolic differentiation and numerical methods like automatic differentiation, let's consider a very simple example. Suppose that the function $f(x,y) = [2x+1]^{y}$, where $x$ and $y$ are variables that are independent of one another. By applying the chain-rule, the derivatives of this function are simply $\dfrac{d f(x,y)}{d x} = 2y[2x+1]^{y-1}$ and $\dfrac{d f(x,y)}{d y} = [2x+1]^{y} \ln(2x+1)$. These are exactly the results that you get from a CAS after defining the symbolic variables x and y, defining the symbolic expression f = pow(2x+1, y) and computing the derivatives diff(f, x) and diff(f, y). At this point there is no assumption of what x and y represent; they may later be interpreted as plain (scalar) numbers, complex numbers, or something else for which the power and natural logarithm functions are well defined. Obviously this means that there is also no assumption about which point to evaluate either the expression or its derivatives. One could readily take the expression for $\dfrac{d f(x, y)}{d x}$ and evaluate it at $x=1, y=2.5$ and then later, with no recomputation of the derivative expression itself, evaluate it at $x=3.25, y=-6$. In fact, the interpretation of any symbolic variable or expression, and the inter-dependencies between variables, may be defined or redefined at any point during their manipulation; this leads to a degree of flexibility in computations that cannot be matched by auto-differentiation. For example, one could perform the permanent substitution $g(x) = \dfrac{d f(x, y)}{d x} \vert_{y=1}$ and then recompute $g(x)$ for several different values of $x$. One could also post-factum express an interdependency between x and y, such as $y \rightarrow y(x) := 2x$. For such a case, this means that the initially computed derivatives $\dfrac{d f(x, y)}{d x} \rightarrow \dfrac{\partial f(x, y(x))}{\partial x} = 2y(x) [2x+1]^{y(x)-1} = 4x[2x+1]^{2x-1}$ and $\dfrac{d f(x, y)}{d y} \rightarrow \dfrac{\partial f(x, y(x))}{\partial y} = [2x+1]^{y(x)} \ln(2x+1) = [2x+1]^{2x} \ln(2x+1)$ truly represent partial derivatives rather than total derivatives. Of course, if such an inter-dependency was explicitly defined before the derivatives $\dfrac{d f(x, y(x))}{d x}$ and $\dfrac{d f(x, y(x))}{d y}$ are computed, then this could correspond to the total derivative (which is the only result that auto-differentiation is able to achieve for this example).

      Due to the sophisticated CAS that forms the foundation of symbolic operations, the types of manipulations are not necessarily restricted to differentiation alone, but rather may span a spectrum of manipulations relevant to discrete differential calculus, topics in pure mathematics, and more. The documentation for the SymPy library gives plenty of examples that highlight what a fully-fledged CAS is capable of. Through the Differentiation::SD::Expression class, and the associated functions in the Differentiation::SD namespace, we provide a wrapper to the high-performance SymEngine symbolic manipulation library that has enriched operator overloading and a consistent interface that makes it easy and "natural" to use. In fact, this class can be used as a "drop-in" replacement for arithmetic types in many situations, transforming the operations from being numeric to symbolic in nature; this is made especially easy when classes are templated on the underlying number type. Being focused on numerical simulation of PDEs, the functionality of the CAS that is exposed within deal.II focuses on symbolic expression creation, manipulation, and differentiation.

      The convenience wrappers to SymEngine functionality are primarily focused on manipulations that solely involve dictionary-based (i.e., something reminiscent of "string-based") operations. Although SymEngine performs these operations in an efficient manner, they are still known to be computationally expensive, especially when the operations are performed on large expressions. It should therefore be expected that the performance of the parts of code that perform differentiation, symbolic substitution, etc., may be a limiting factor when using this in production code. deal.II therefore provides an interface to accelerate the evaluation of lengthy symbolic expression through the BatchOptimizer class (itself often leveraging functionality provided by SymEngine). In particular, the BatchOptimizer simultaneously optimizes a collection of symbolic expressions using methods such as common subexpression elimination (CSE), as well as by generating high performance code-paths to evaluate these expressions through the use of a custom-generated std::function or by compiling the expression using the LLVM JIT compiler. The usage of the Differentiation::SD::BatchOptimizer class is exemplified in step-71.

      As a final note, it is important to recognize the remaining major deficiencies in deal.II's current implementation of the interface to the supported symbolic library. The level of functionality currently implemented effectively limits the use of symbolic algebra to the traditional use case (i.e. scalar and tensor algebra, as might be useful to define constitutive relations or complex functions for application as boundary conditions or source terms). In fact, step-71 demonstrates how it can be used to implement challenging constitutive models. In the future we will also implement classes to assist in performing assembly operations in the same spirit as that which has been done in the Differentiation::AD namespace.

      /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/group__constraints.html differs (JavaScript source, ASCII text, with very long lines (1590)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/group__constraints.html 2023-11-25 15:26:00.083219500 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/group__constraints.html 2023-11-25 15:26:00.083219500 +0100 @@ -209,18 +209,18 @@
    15. If you have boundary conditions that set a certain part of the solution's value, for example no normal flux, $\mathbf n \cdot
   \mathbf u=0$ (as happens in flow problems and is handled by the VectorTools::compute_no_normal_flux_constraints function) or prescribed tangential components, $\mathbf{n}\times\mathbf{u}=
   \mathbf{n}\times\mathbf{f}$ (as happens in electromagnetic problems and is handled by the VectorTools::project_boundary_values_curl_conforming function). For the former case, imagine for example that we are at at vertex where the normal vector has the form $\frac 1{\sqrt{14}}
-  (1,2,3)^T$ and that the $x$-, $y$- and $z$-components of the flow field at this vertex are associated with degrees of freedom 12, 28, and 40. Then the no-normal-flux condition means that we need to have the condition $\frac 1{\sqrt{14}} (x_{12}+2x_{28}+3x_{40})=0$. The prescribed tangential component leads to similar constraints though there is often something on the right hand side.
    16. + (1,2,3)^T$" src="form_43.png"/> and that the $x$-, $y$- and $z$-components of the flow field at this vertex are associated with degrees of freedom 12, 28, and 40. Then the no-normal-flux condition means that we need to have the condition $\frac 1{\sqrt{14}} (x_{12}+2x_{28}+3x_{40})=0$. The prescribed tangential component leads to similar constraints though there is often something on the right hand side.
    17. If you have hanging node constraints, for example in a mesh like this:
      - Let's assume the bottom right one of the two red degrees of freedom is $x_{12}$ and that the two yellow neighbors on its left and right are $x_{28}$ and $x_{40}$. Then, requiring that the finite element function be continuous is equivalent to requiring that $x_{12}=
-  \frac 12 (x_{28}+x_{40})$. A similar situation occurs in the context of hp-adaptive finite element methods. For example, when using Q1 and Q2 elements (i.e. using FE_Q(1) and FE_Q(2)) on the two marked cells of the mesh
      + Let's assume the bottom right one of the two red degrees of freedom is $x_{12}$ and that the two yellow neighbors on its left and right are $x_{28}$ and $x_{40}$. Then, requiring that the finite element function be continuous is equivalent to requiring that $x_{12}=
+  \frac 12 (x_{28}+x_{40})$. A similar situation occurs in the context of hp-adaptive finite element methods. For example, when using Q1 and Q2 elements (i.e. using FE_Q(1) and FE_Q(2)) on the two marked cells of the mesh
      - there are three constraints: first $x_2=\frac 12 x_0 + \frac 12 x_1$, then $x_4=\frac 14 x_0 + \frac 34 x_1$, and finally the identity $x_3=x_1$. Similar constraints occur as hanging nodes even if all cells used the same finite elements. In all of these cases, you would use the DoFTools::make_hanging_node_constraints function to compute such constraints.
    18. + there are three constraints: first $x_2=\frac 12 x_0 + \frac 12 x_1$, then $x_4=\frac 14 x_0 + \frac 34 x_1$, and finally the identity $x_3=x_1$. Similar constraints occur as hanging nodes even if all cells used the same finite elements. In all of these cases, you would use the DoFTools::make_hanging_node_constraints function to compute such constraints.
    19. Other linear constraints, for example when you try to impose a certain average value for a problem that would otherwise not have a unique solution. An example of this is given in the step-11 tutorial program.
    20. -

      In all of these examples, constraints on degrees of freedom are linear and possibly inhomogeneous. In other words, they always have the form $x_{i_1} = \sum_{j=2}^M a_{i_j} x_{i_j} + b_i$. The deal.II class that deals with storing and using these constraints is AffineConstraints.

      +

      In all of these examples, constraints on degrees of freedom are linear and possibly inhomogeneous. In other words, they always have the form $x_{i_1} = \sum_{j=2}^M a_{i_j} x_{i_j} + b_i$. The deal.II class that deals with storing and using these constraints is AffineConstraints.

      Eliminating constraints

      When building the global system matrix and the right hand sides, one can build them without taking care of the constraints, i.e. by simply looping over cells and adding the local contributions to the global matrix and right hand side objects. In order to do actual calculations, you have to 'condense' the linear system: eliminate constrained degrees of freedom and distribute the appropriate values to the unconstrained dofs. This changes the sparsity pattern of the sparse matrices used in finite element calculations and is thus a quite expensive operation. The general scheme of things is then that you build your system, you eliminate (condense) away constrained nodes using the AffineConstraints::condense() functions, then you solve the remaining system, and finally you compute the values of constrained nodes from the values of the unconstrained ones using the AffineConstraints::distribute() function. Note that the AffineConstraints::condense() function is applied to matrix and right hand side of the linear system, while the AffineConstraints::distribute() function is applied to the solution vector.

      This scheme of first building a linear system and then eliminating constrained degrees of freedom is inefficient, and a bottleneck if there are many constraints and matrices are full, i.e. especially for 3d and/or higher order or hp-finite elements. Furthermore, it is impossible to implement for parallel computations where a process may not have access to elements of the matrix. We therefore offer a second way of building linear systems, using the AffineConstraints::add_entries_local_to_global() and AffineConstraints::distribute_local_to_global() functions discussed below. The resulting linear systems are equivalent to those one gets after calling the AffineConstraints::condense() functions.

      @@ -271,23 +271,23 @@
    21. Depending on the solver now you have to apply the AffineConstraints::distribute() function to the solution, because the solver could change the constrained values in the solution. For a Krylov based solver this should not be strictly necessary, but it is still possible that there is a difference between the inhomogeneous value and the solution value in the order of machine precision, and you may want to call AffineConstraints::distribute() anyway if you have additional constraints such as from hanging nodes.
    22. Of course, both approaches lead to the same final answer but in different ways. Using the first approach (i.e., when using use_inhomogeneities_for_rhs = false in AffineConstraints::distribute_local_to_global()), the linear system we build has zero entries in the right hand side in all those places where a degree of freedom is constrained, and some positive value on the matrix diagonal of these lines. Consequently, the solution vector of the linear system will have a zero value for inhomogeneously constrained degrees of freedom and we need to call AffineConstraints::distribute() to give these degrees of freedom their correct nonzero values.

      -

      On the other hand, in the second approach, the matrix diagonal element and corresponding right hand side entry for inhomogeneously constrained degrees of freedom are so that the solution of the linear system already has the correct value (e.g., if the constraint is that $x_{13}=42$ then row $13$ if the matrix is empty with the exception of the diagonal entry, and $b_{13}/A_{13,13}=42$ so that the solution of $Ax=b$ must satisfy $x_{13}=42$ as desired). As a consequence, we do not need to call AffineConstraints::distribute() after solving to fix up inhomogeneously constrained components of the solution, though there is also no harm in doing so.

      +

      On the other hand, in the second approach, the matrix diagonal element and corresponding right hand side entry for inhomogeneously constrained degrees of freedom are so that the solution of the linear system already has the correct value (e.g., if the constraint is that $x_{13}=42$ then row $13$ if the matrix is empty with the exception of the diagonal entry, and $b_{13}/A_{13,13}=42$ so that the solution of $Ax=b$ must satisfy $x_{13}=42$ as desired). As a consequence, we do not need to call AffineConstraints::distribute() after solving to fix up inhomogeneously constrained components of the solution, though there is also no harm in doing so.

      There remains the question of which of the approaches to take and why we need to set to zero the values of the solution vector in the first approach. The answer to both questions has to do with how iterative solvers solve the linear system. To this end, consider that we typically stop iterations when the residual has dropped below a certain fraction of the norm of the right hand side, or, alternatively, a certain fraction of the norm of the initial residual. Now consider this:

      -

      In addition to these considerations, consider the case where we have inhomogeneous constraints of the kind $x_{3}=\tfrac 12 x_1 + \tfrac 12$, e.g., from a hanging node constraint of the form $x_{3}=\tfrac 12 (x_1 +
-x_2)$ where $x_2$ is itself constrained by boundary values to $x_2=1$. In this case, the AffineConstraints container can of course not figure out what the final value of $x_3$ should be and, consequently, can not set the solution vector's third component correctly. Thus, the second approach will not work and you should take the first.

      +

      In addition to these considerations, consider the case where we have inhomogeneous constraints of the kind $x_{3}=\tfrac 12 x_1 + \tfrac 12$, e.g., from a hanging node constraint of the form $x_{3}=\tfrac 12 (x_1 +
+x_2)$ where $x_2$ is itself constrained by boundary values to $x_2=1$. In this case, the AffineConstraints container can of course not figure out what the final value of $x_3$ should be and, consequently, can not set the solution vector's third component correctly. Thus, the second approach will not work and you should take the first.

      Dealing with conflicting constraints

      There are situations where degrees of freedom are constrained in more than one way, and sometimes in conflicting ways. Consider, for example the following situation:

      -

      Here, degree of freedom $x_0$ marked in blue is a hanging node. If we used trilinear finite elements, i.e. FE_Q(1), then it would carry the constraint $x_0=\frac 12 (x_{1}+x_{2})$. On the other hand, it is at the boundary, and if we have imposed boundary conditions $u|_{\partial\Omega}=g$ then we will have the constraint $x_0=g_0$ where $g_0$ is the value of the boundary function $g(\mathbf x)$ at the location of this degree of freedom.

      +

      Here, degree of freedom $x_0$ marked in blue is a hanging node. If we used trilinear finite elements, i.e. FE_Q(1), then it would carry the constraint $x_0=\frac 12 (x_{1}+x_{2})$. On the other hand, it is at the boundary, and if we have imposed boundary conditions $u|_{\partial\Omega}=g$ then we will have the constraint $x_0=g_0$ where $g_0$ is the value of the boundary function $g(\mathbf x)$ at the location of this degree of freedom.

      So, which one will win? Or maybe: which one should win? There is no good answer to this question:

      That said, what should you do if you know what you want is this:

    To summarize, matrix-free computations are the way to go for higher order elements (where higher order means everything except linear shape functions) and use in explicit time stepping (step-48) or iterative solvers where also preconditioning can be done in a matrix-free way, as demonstrated in the step-37 and step-59 tutorial programs.

    The matrix-free evaluation infrastructure

    @@ -150,7 +150,7 @@

    The motivation for the FEEvaluationAccess classes is to allow for specializations of the value and gradient access of interpolated solution fields depending on the number of components. Whereas the base class FEEvaluationBase returns the gradient as a Tensor<1,n_components,Tensor<1,dim,VectorizedArray<Number>>>, with the outer tensor going over the components and the inner tensor going through the dim components of the gradient. For a scalar field, i.e., n_components=1, we can skip the outer tensor and simply use Tensor<1,dim,VectorizedArray<Number>> as the gradient type. Likewise, for a system with n_components=dim, the appropriate format for the gradient is Tensor<2,dim,VectorizedArray<Number>>.

    The FEFaceEvaluation class

    Face integrals, like for inhomogeneous Neumann conditions in continuous FEM or for the large class of discontinuous Galerkin schemes, require the evaluation of quantities on the quadrature point of a face, besides the cell integrals. The facilities for face evaluation are mostly shared with FEEvaluation, in the sense that FEFaceEvaluation also inherits from FEEvaluationAccess. All data fields regarding the degrees of freedom and shape functions can be reused, the latter because all information consists of 1D shape data anyway. With respect to the mapping data, however, a specialization is used because the data is of structdim=dim-1. As a consequence, the FEEvaluationAccess and FEEvaluationBase are given a template argument is_face to hold pointers to the cell and face mapping information, respectively. Besides access to the function values with FEEvaluationAccess::get_value() or gradients with FEEvaluationAccess::get_gradient(), the face evaluator also enables the access to the normal vector by FEEvaluationAccess::normal_vector() and a specialized field FEEvaluationAccess::get_normal_derivative(), which returns the derivative of the solution field normal to the face. This quantity is computed as the gradient (in real space) multiplied by the normal vector. The combination of the gradient and normal vector is typical of many (simple) second-order elliptic equations, such as the discretization of the Laplacian with the interior penalty method. If the gradient alone is not needed, the combined operation significantly reduces the data access, because only dim data entries for normal * Jacobian per quadrature point are necessary, as opposed to dim^2 fields for the Jacobian and dim fields for the normal when accessing them individually.

    -

    An important optimization for the computation of face integrals is to think about the amount of vector data that must be accessed to evaluate the integrals on a face. Think for example of the case of FE_DGQ, i.e., Lagrange polynomials that have some of their nodes on the element boundary. For evaluation of the function values, only $(k+1)^{d-1}$ degrees of freedom contribute via a non-zero basis function, whereas the rest of the $(k+1)^d$ basis functions evaluate to zero on that boundary. Since vector access is one of the bottlenecks in matrix-free computations, the access to the vector should be restricted to the interesting entries. To enable this setup, the method FEFaceEvaluation::gather_evaluate() (and FEFaceEvaluation::integrate_scatter() for the integration equivalent) combines the vector access with the interpolation to the quadrature points. There exist two specializations, including the aforementioned "non-zero" value case, which is stored as the field internal::MatrixFreeFunctions::ShapeInfo::nodal_at_cell_boundaries. A similar property is also possible for the case where only the value and the first derivative of a selected number of basis functions evaluate to nonzero on a face. The associated element type is FE_DGQHermite and the decision is stored on the property internal::MatrixFreeFunctions::tensor_symmetric_hermite. The decision on whether such an optimized kernel can be used is made automatically inside FEFaceEvaluation::gather_evaluate() and FEFaceEvaluation::integrate_scatter(). It might seem inefficient to do this decision for every integration task, but in the end this is a single if statement (conditional jump) that is easily predictable for a modern CPU as the decision is always the same inside an integration loop. (One only pays by somewhat increased compile times because the compiler needs to generate code for all paths, though).

    +

    An important optimization for the computation of face integrals is to think about the amount of vector data that must be accessed to evaluate the integrals on a face. Think for example of the case of FE_DGQ, i.e., Lagrange polynomials that have some of their nodes on the element boundary. For evaluation of the function values, only $(k+1)^{d-1}$ degrees of freedom contribute via a non-zero basis function, whereas the rest of the $(k+1)^d$ basis functions evaluate to zero on that boundary. Since vector access is one of the bottlenecks in matrix-free computations, the access to the vector should be restricted to the interesting entries. To enable this setup, the method FEFaceEvaluation::gather_evaluate() (and FEFaceEvaluation::integrate_scatter() for the integration equivalent) combines the vector access with the interpolation to the quadrature points. There exist two specializations, including the aforementioned "non-zero" value case, which is stored as the field internal::MatrixFreeFunctions::ShapeInfo::nodal_at_cell_boundaries. A similar property is also possible for the case where only the value and the first derivative of a selected number of basis functions evaluate to nonzero on a face. The associated element type is FE_DGQHermite and the decision is stored on the property internal::MatrixFreeFunctions::tensor_symmetric_hermite. The decision on whether such an optimized kernel can be used is made automatically inside FEFaceEvaluation::gather_evaluate() and FEFaceEvaluation::integrate_scatter(). It might seem inefficient to do this decision for every integration task, but in the end this is a single if statement (conditional jump) that is easily predictable for a modern CPU as the decision is always the same inside an integration loop. (One only pays by somewhat increased compile times because the compiler needs to generate code for all paths, though).

    The data storage through the MatrixFree class

    The tasks performed by FEEvaluation and FEFaceEvaluation can be split into the three categories: index access into vectors, evaluation and integration on the unit cell, and operation on quadrature points including the geometry evaluation. This split is reflected by the major data fields contained by MatrixFree, using internal::MatrixFreeFunctions::DoFInfo, internal::MatrixFreeFunctions::ShapeInfo, and internal::MatrixFreeFunctions::MappingInfo for each these three categories, respectively. Their design principles and internal layout is described in the following subsections.

    The main interface all these data structure adhere to is that integration tasks are broken down into a range of cells or faces that one can index into by a single integer index. The information about an integer range for the cell integrals, inner face integrals, and boundary integrals is provided by the class internal::MatrixFreeFunctions::TaskInfo, using the data fields cell_partition_data, face_partition_data, and boundary_partition_data. This class also contains information about subranges of indices for scheduling tasks in parallel using threads, and a grouping of the index range within {cell,face,boundary}_partition_data for interleaving cell and face integrals such that the access to vector entries for cell and face integrals re-uses data already in caches.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/group__reordering.html differs (JavaScript source, ASCII text, with very long lines (1334)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/group__reordering.html 2023-11-25 15:26:00.166551135 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/group__reordering.html 2023-11-25 15:26:00.166551135 +0100 @@ -168,7 +168,7 @@

    From the examples above, it is obvious that if we encounter a cell that cannot be added to the cells which have already been entered, we can not usually point to a cell that is the culprit and that must be entered in a different orientation. Furthermore, even if we knew which cell, there might be large number of cells that would then cease to fit into the grid and which we would have to find a different orientation as well (in the second example above, if we rotated cell 1, then we would have to rotate the cells 1 through N-1 as well).

    A brute force approach to this problem is the following: if cell N can't be added, then try to rotate cell N-1. If we can't rotate cell N-1 any more, then try to rotate cell N-2 and try to add cell N with all orientations of cell N-1. And so on. Algorithmically, we can visualize this by a tree structure, where node N has as many children as there are possible orientations of node N+1 (in two space dimensions, there are four orientations in which each cell can be constructed from its four vertices; for example, if the vertex indices are (0 1 3 2), then the four possibilities would be (0 1 3 2), (1 3 2 0), (3 2 0 1), and (2 0 1 3)). When adding one cell after the other, we traverse this tree in a depth-first (pre-order) fashion. When we encounter that one path from the root (cell 0) to a leaf (the last cell) is not allowed (i.e. that the orientations of the cells which are encoded in the path through the tree does not lead to a valid triangulation), we have to track back and try another path through the tree.

    In practice, of course, we do not follow each path to a final node and then find out whether a path leads to a valid triangulation, but rather use an inductive argument: if for all previously added cells the triangulation is a valid one, then we can find out whether a path through the tree can yield a valid triangulation by checking whether entering the present cell would introduce any faces that have a nonunique direction; if that is so, then we can stop following all paths below this point and track back immediately.

    -

    Nevertheless, it is already obvious that the tree has $4^N$ leaves in two space dimensions, since each of the $N$ cells can be added in four orientations. Most of these nodes can be discarded rapidly, since firstly the orientation of the first cell is irrelevant, and secondly if we add one cell that has a neighbor that has already been added, then there are already only two possible orientations left, so the total number of checks we have to make until we find a valid way is significantly smaller than $4^N$. However, the algorithm is still exponential in time and linear in memory (we only have to store the information for the present path in form of a stack of orientations of cells that have already been added).

    +

    Nevertheless, it is already obvious that the tree has $4^N$ leaves in two space dimensions, since each of the $N$ cells can be added in four orientations. Most of these nodes can be discarded rapidly, since firstly the orientation of the first cell is irrelevant, and secondly if we add one cell that has a neighbor that has already been added, then there are already only two possible orientations left, so the total number of checks we have to make until we find a valid way is significantly smaller than $4^N$. However, the algorithm is still exponential in time and linear in memory (we only have to store the information for the present path in form of a stack of orientations of cells that have already been added).

    In fact, the two examples above show that the exponential estimate is not a pessimistic one: we indeed have to track back to one of the very first cells there to find a way to add all cells in a consistent fashion.

    This discouraging situation is greatly improved by the fact that we have an alternative algorithm for 2d that is always linear in runtime (discovered and implemented by Michael Anderson of TICAM, University of Texas, in 2003), and that for 3d we can find an algorithm that in practice is usually only roughly linear in time and memory. We will describe these algorithms in the following. A full description and theoretical analysis is given in [AABB17] .

    The 2d linear complexity algorithm

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/group__threads.html differs (JavaScript source, ASCII text, with very long lines (1232)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/group__threads.html 2023-11-25 15:26:00.186550728 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/group__threads.html 2023-11-25 15:26:00.186550728 +0100 @@ -302,7 +302,7 @@
    *out = function(*in_1, *in_2);
    }

    In many cases, function has no state, and so we can split this loop into several sub-ranges as explained above. Consequently, deal.II has a set of functions parallel::transform that look like the one above but that do their work in parallel (there are several versions with one, two, and more input iterators for function objects that take one, two, or more arguments). The only difference in calling these functions is that they take an additional last argument that denotes the minimum size of sub-ranges of [begin_in_1,end_in_1); it should be big enough so that we don't spend more time on scheduling sub-ranges to processors but small enough that processors can be efficiently load balanced. A rule of thumb appears to be that a sub-range is too small if it takes less than 2000 instructions to execute it.

    -

    An example of how to use these functions are vector operations like the addition in $z = x+y$ where all three objects are of type Vector<Number>:

    parallel::transform (x.begin(), x.end(),
    +

    An example of how to use these functions are vector operations like the addition in $z = x+y$ where all three objects are of type Vector<Number>:

    parallel::transform (x.begin(), x.end(),
    y.begin(),
    z.begin(),
    [](const Number first, const Number second)
    @@ -313,7 +313,7 @@
    Point< 2 > second
    Definition: grid_out.cc:4616
    Point< 2 > first
    Definition: grid_out.cc:4615
    void transform(const InputIterator &begin_in, const InputIterator &end_in, OutputIterator out, const Function &function, const unsigned int grainsize)
    Definition: parallel.h:148
    -

    In this example, we used a lambda expression to construct, on the fly, a function object that takes two arguments and returns the sum of the two. This is exactly what we needed when we want to add the individual elements of vectors $x$ and $y$ and write the sum of the two into the elements of $z$. The function object that we get here is completely known to the compiler and when it expands the loop that results from parallel::transform will be as if we had written the loop in its obvious form:

    InputIterator1 in_1 = x.begin();
    +

    In this example, we used a lambda expression to construct, on the fly, a function object that takes two arguments and returns the sum of the two. This is exactly what we needed when we want to add the individual elements of vectors $x$ and $y$ and write the sum of the two into the elements of $z$. The function object that we get here is completely known to the compiler and when it expands the loop that results from parallel::transform will be as if we had written the loop in its obvious form:

    InputIterator1 in_1 = x.begin();
    InputIterator2 in_2 = y.begin();
    OutputIterator out = z.begin();
    @@ -406,7 +406,7 @@
    }
    void apply_to_subranges(const Iterator &begin, const std_cxx20::type_identity_t< Iterator > &end, const Function &f, const unsigned int grainsize)
    Definition: parallel.h:435

    Here, we call the vmult_on_subrange function on sub-ranges of at least 200 elements each, so that the initial setup cost can amortize.

    -

    A related operation is when the loops over elements each produce a result that must then be accumulated (other reduction operations than addition of numbers would work as well). An example is to form the matrix norm $x^T M x$ (it really is only a norm if $M$ is positive definite, but let's assume for a moment that it is). A sequential implementation would look like this for sparse matrices:

    double SparseMatrix::mat_norm (const Vector &x) const
    +

    A related operation is when the loops over elements each produce a result that must then be accumulated (other reduction operations than addition of numbers would work as well). An example is to form the matrix norm $x^T M x$ (it really is only a norm if $M$ is positive definite, but let's assume for a moment that it is). A sequential implementation would look like this for sparse matrices:

    double SparseMatrix::mat_norm (const Vector &x) const
    {
    const double *val_ptr = &values[0];
    const unsigned int *colnum_ptr = &colnums[0];
    @@ -607,7 +607,7 @@

  • -

    The last issue that is worth addressing is that the way we wrote the MyClass::assemble_on_one_cell function above, we create and destroy an FEValues object every time the function is called, i.e. once for each cell in the triangulation. That's an immensely expensive operation because the FEValues class tries to do a lot of work in its constructor in an attempt to reduce the number of operations we have to do on each cell (i.e. it increases the constant in the ${\cal O}(1)$ effort to initialize such an object in order to reduce the constant in the ${\cal O}(N)$ operations to call FEValues::reinit on the $N$ cells of a triangulation). Creating and destroying an FEValues object on each cell invalidates this effort.

    +

    The last issue that is worth addressing is that the way we wrote the MyClass::assemble_on_one_cell function above, we create and destroy an FEValues object every time the function is called, i.e. once for each cell in the triangulation. That's an immensely expensive operation because the FEValues class tries to do a lot of work in its constructor in an attempt to reduce the number of operations we have to do on each cell (i.e. it increases the constant in the ${\cal O}(1)$ effort to initialize such an object in order to reduce the constant in the ${\cal O}(N)$ operations to call FEValues::reinit on the $N$ cells of a triangulation). Creating and destroying an FEValues object on each cell invalidates this effort.

    The way to avoid this is to put the FEValues object into a second structure that will hold scratch data, and initialize it in the constructor:

    struct PerTaskData {
    FullMatrix<double> cell_matrix;
    Vector<double> cell_rhs;
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/group__vector__valued.html differs (JavaScript source, ASCII text, with very long lines (1296)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/group__vector__valued.html 2023-11-25 15:26:00.206550321 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/group__vector__valued.html 2023-11-25 15:26:00.206550321 +0100 @@ -280,8 +280,8 @@ \right) \end{eqnarray*}" src="form_302.png"/>

    -

    indeed has four components. We note that we could change the ordering of the solution components $\textbf u$ and $p$ inside $U$ if we also change columns of the matrix operator.

    -

    Next, we need to think about test functions $V$. We want to multiply both sides of the equation with them, then integrate over $\Omega$. The result should be a scalar equality. We can achieve this by choosing $V$ also vector valued as

    +

    indeed has four components. We note that we could change the ordering of the solution components $\textbf u$ and $p$ inside $U$ if we also change columns of the matrix operator.

    +

    Next, we need to think about test functions $V$. We want to multiply both sides of the equation with them, then integrate over $\Omega$. The result should be a scalar equality. We can achieve this by choosing $V$ also vector valued as

    \begin{eqnarray*}
   V =
   \left(
@@ -449,7 +449,7 @@
 <p class=

  • -

    These views can then be asked for information about these individual components. For example, when you write fe_values[pressure].value(i,q) you get the value of the pressure component of the $i$th shape function $V_i$ at the $q$th quadrature point. Because the extractor pressure represents a scalar component, the results of the operator fe_values[pressure].value(i,q) is a scalar number. On the other hand, the call fe_values[velocities].value(i,q) would produce the value of a whole set of dim components, which would be of type Tensor<1,dim>.

    +

    These views can then be asked for information about these individual components. For example, when you write fe_values[pressure].value(i,q) you get the value of the pressure component of the $i$th shape function $V_i$ at the $q$th quadrature point. Because the extractor pressure represents a scalar component, the results of the operator fe_values[pressure].value(i,q) is a scalar number. On the other hand, the call fe_values[velocities].value(i,q) would produce the value of a whole set of dim components, which would be of type Tensor<1,dim>.

  • @@ -593,10 +593,10 @@
    }
  • So if, again, this is not the code we use in step-8, what do we do there? The answer rests on the finite element we use. In step-8, we use the following element:

    FESystem<dim> finite_element (FE_Q<dim>(1), dim);
    -

    In other words, the finite element we use consists of dim copies of the same scalar element. This is what we call a primitive element: an element that may be vector-valued but where each shape function has exactly one non-zero component. In other words: if the $x$-component of a displacement shape function is nonzero, then the $y$- and $z$-components must be zero and similarly for the other components. What this means is that also derived quantities based on shape functions inherit this sparsity property. For example: the divergence $\mathrm{div}\ \Phi(x,y,z)=\partial_x\varphi_x(x,y,z) +
+</div><!-- fragment --><p> In other words, the finite element we use consists of <code>dim</code> copies of the same scalar element. This is what we call a <a class=primitive element: an element that may be vector-valued but where each shape function has exactly one non-zero component. In other words: if the $x$-component of a displacement shape function is nonzero, then the $y$- and $z$-components must be zero and similarly for the other components. What this means is that also derived quantities based on shape functions inherit this sparsity property. For example: the divergence $\mathrm{div}\ \Phi(x,y,z)=\partial_x\varphi_x(x,y,z) +
    \partial_y\varphi_y(x,y,z) + \partial_z\varphi_z(x,y,z)$ of a vector-valued shape function $\Phi(x,y,z)=(\varphi_x(x,y,z), \varphi_y(x,y,z), \varphi_z(x,y,z))^T$ is, in the present case, either $\mathrm{div}\ \Phi(x,y,z)=\partial_x\varphi_x(x,y,z)$, $\mathrm{div}\ \Phi(x,y,z)=\partial_y\varphi_y(x,y,z)$, or $\mathrm{div}\ \Phi(x,y,z)=\partial_z\varphi_z(x,y,z)$, because exactly one of the $\varphi_\ast$ is nonzero. Knowing this means that we can save a number of computations that, if we were to do them, would only yield zeros to add up.

    In a similar vein, if only one component of a shape function is nonzero, then only one row of its gradient $\nabla\Phi$ is nonzero. What this means for terms like $(\mu \nabla\Phi_i,\nabla\Phi_j)$, where the scalar product between two tensors is defined as $(\tau, \gamma)_\Omega=\int_\Omega \sum_{i,j=1}^d \tau_{ij} \gamma_{ij}$, is that the term is only nonzero if both tensors have their nonzero entries in the same row, which means that the two shape functions have to have their single nonzero component in the same location.

    -

    If we use this sort of knowledge, then we can in a first step avoid computing gradient tensors if we can determine up front that their scalar product will be nonzero, in a second step avoid building the entire tensors and only get its nonzero components, and in a final step simplify the scalar product by only considering that index $i$ for the one nonzero row, rather than multiplying and adding up zeros.

    +

    If we use this sort of knowledge, then we can in a first step avoid computing gradient tensors if we can determine up front that their scalar product will be nonzero, in a second step avoid building the entire tensors and only get its nonzero components, and in a final step simplify the scalar product by only considering that index $i$ for the one nonzero row, rather than multiplying and adding up zeros.

    The vehicle for all this is the ability to determine which vector component is going to be nonzero. This information is provided by the FiniteElement::system_to_component_index function. What can be done with it, using the example above, is explained in detail in step-8.

    Block solvers

    Using techniques as shown above, it isn't particularly complicated to assemble the linear system, i.e. matrix and right hand side, for a vector-valued problem. However, then it also has to be solved. This is more complicated. Naively, one could just consider the matrix as a whole. For most problems, this matrix is not going to be definite (except for special cases like the elasticity equations covered in step-8 and step-17). It will, often, also not be symmetric. This rather general class of matrices presents problems for iterative solvers: the lack of structural properties prevents the use of most efficient methods and preconditioners. While it can be done, the solution process will therefore most often be slower than necessary.

    @@ -614,7 +614,7 @@ \right), \end{eqnarray*}" src="form_337.png"/>

    -

    where $M$ represents the mass matrix that results from discretizing the identity operator $\mathbf 1$ and $B$ the equivalent of the gradient operator.

    +

    where $M$ represents the mass matrix that results from discretizing the identity operator $\mathbf 1$ and $B$ the equivalent of the gradient operator.

    By default, this is not what happens, however. Rather, deal.II assigns numbers to degrees of freedom in a rather random manner. Consequently, if you form a vector out of the values of degrees of freedom will not be neatly ordered in a vector like

    \begin{eqnarray*}
   \left(
@@ -654,8 +654,8 @@
   MU = F-BP.
 \end{eqnarray*}

    -

    This has the advantage that the matrices $B^TM^{-1}B$ and $M$ that we have to solve with are both symmetric and positive definite, as opposed to the large whole matrix we had before.

    -

    How a solver like this is implemented is explained in more detail in step-20, step-31, and a few other tutorial programs. What we would like to point out here is that we now need a way to extract certain parts of a matrix or vector: if we are to multiply, say, the $U$ part of the solution vector by the $M$ part of the global matrix, then we need to have a way to access these parts of the whole.

    +

    This has the advantage that the matrices $B^TM^{-1}B$ and $M$ that we have to solve with are both symmetric and positive definite, as opposed to the large whole matrix we had before.

    +

    How a solver like this is implemented is explained in more detail in step-20, step-31, and a few other tutorial programs. What we would like to point out here is that we now need a way to extract certain parts of a matrix or vector: if we are to multiply, say, the $U$ part of the solution vector by the $M$ part of the global matrix, then we need to have a way to access these parts of the whole.

    This is where the BlockVector, BlockSparseMatrix, and similar classes come in. For all practical purposes, then can be used as regular vectors or sparse matrices, i.e. they offer element access, provide the usual vector operations and implement, for example, matrix-vector multiplications. In other words, assembling matrices and right hand sides works in exactly the same way as for the non-block versions. That said, internally they store the elements of vectors and matrices in "blocks"; for example, instead of using one large array, the BlockVector class stores it as a set of arrays each of which we call a block. The advantage is that, while the whole thing can be used as a vector, one can also access an individual block which then, again, is a vector with all the vector operations.

    To show how to do this, let us consider the second equation $MU=F-BP$ to be solved above. This can be achieved using the following sequence similar to what we have in step-20:

    Vector<double> tmp (solution.block(0).size());
    system_matrix.block(0,1).vmult (tmp, solution.block(1));
    @@ -675,7 +675,7 @@
    Definition: vector.h:109
    -

    What's happening here is that we allocate a temporary vector with as many elements as the first block of the solution vector, i.e. the velocity component $U$, has. We then set this temporary vector equal to the $(0,1)$ block of the matrix, i.e. $B$, times component 1 of the solution which is the previously computed pressure $P$. The result is multiplied by $-1$, and component 0 of the right hand side, $F$ is added to it. The temporary vector now contains $F-BP$. The rest of the code snippet simply solves a linear system with $F-BP$ as right hand side and the $(0,0)$ block of the global matrix, i.e. $M$. Using block vectors and matrices in this way therefore allows us to quite easily write rather complicated solvers making use of the block structure of a linear system.

    +

    What's happening here is that we allocate a temporary vector with as many elements as the first block of the solution vector, i.e. the velocity component $U$, has. We then set this temporary vector equal to the $(0,1)$ block of the matrix, i.e. $B$, times component 1 of the solution which is the previously computed pressure $P$. The result is multiplied by $-1$, and component 0 of the right hand side, $F$ is added to it. The temporary vector now contains $F-BP$. The rest of the code snippet simply solves a linear system with $F-BP$ as right hand side and the $(0,0)$ block of the global matrix, i.e. $M$. Using block vectors and matrices in this way therefore allows us to quite easily write rather complicated solvers making use of the block structure of a linear system.

    Extracting data from solutions

    Once one has computed a solution, it is often necessary to evaluate it at quadrature points, for example to evaluate nonlinear residuals for the next Newton iteration, to evaluate the finite element residual for error estimators, or to compute the right hand side for the next time step in a time dependent problem.

    The way this is done us to again use an FEValues object to evaluate the shape functions at quadrature points, and with those also the values of a finite element function. For the example of the mixed Laplace problem above, consider the following code after solving:

    std::vector<Vector<double> > local_solution_values (n_q_points,
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/index.html differs (JavaScript source, ASCII text, with very long lines (886)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/index.html 2023-11-25 15:26:00.216550117 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/index.html 2023-11-25 15:26:00.216550117 +0100 @@ -117,7 +117,7 @@
  • DoFHandler: DoFHandler objects are the confluence of triangulations and finite elements: the finite element class describes how many degrees of freedom it needs per vertex, line, or cell, and the DoFHandler class allocates this space so that each vertex, line, or cell of the triangulation has the correct number of them. It also gives them a global numbering.

    -

    A different viewpoint is this: While the mesh and finite element describe abstract properties of the finite dimensional space $V_h$ in which we seek the discrete solution, the DoFHandler classes enumerate a concrete basis of this space so that we can represent the discrete solution as $u_h(\mathbf x)= \sum_j U_j \varphi_i(\mathbf x)$ by an ordered set of coefficients $U_j$.

    +

    A different viewpoint is this: While the mesh and finite element describe abstract properties of the finite dimensional space $V_h$ in which we seek the discrete solution, the DoFHandler classes enumerate a concrete basis of this space so that we can represent the discrete solution as $u_h(\mathbf x)= \sum_j U_j \varphi_i(\mathbf x)$ by an ordered set of coefficients $U_j$.

    Just as with triangulation objects, most operations on DoFHandlers are done by looping over all cells and doing something on each or a subset of them. The interfaces of the two classes are therefore rather similar: they allow to get iterators to the first and last cell (or face, or line, etc) and offer information through these iterators. The information that can be gotten from these iterators is the geometric and topological information that can already be gotten from the triangulation iterators (they are in fact derived classes) as well as things like the global numbers of the degrees of freedom on the present cell. On can also ask an iterator to extract the values corresponding to the degrees of freedom on the present cell from a data vector that stores values for all degrees of freedom associated with a triangulation.

    It is worth noting that, just as triangulations, DoFHandler classes do not know anything about the mapping from the unit cell to its individual cells. It is also ignorant of the shape functions that correspond to the degrees of freedom it manages: all it knows is that there are, for example, 2 degrees of freedom for each vertex and 4 per cell interior. Nothing about their specifics is relevant to the DoFHandler class with the exception of the fact that they exist.

    The DoFHandler class and its associates are described in the Degrees of Freedom module. In addition, there are specialized versions that can handle multilevel and hp-discretizations. These are described in the Multilevel support and hp-finite element support modules. Finite element methods frequently imply constraints on degrees of freedom, such as for hanging nodes or nodes at which boundary conditions apply; dealing with such constraints is described in the Constraints on degrees of freedom module.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/index__set_8h.html differs (JavaScript source, ASCII text, with very long lines (420)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/index__set_8h.html 2023-11-25 15:26:00.229883182 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/index__set_8h.html 2023-11-25 15:26:00.229883182 +0100 @@ -145,7 +145,7 @@
  • -

    Create and return an index set of size $N$ that contains every single index within this range. In essence, this function returns an index set created by

    IndexSet is (N);
    +

    Create and return an index set of size $N$ that contains every single index within this range. In essence, this function returns an index set created by

    IndexSet is (N);
    is.add_range(0, N);

    This function exists so that one can create and initialize index sets that are complete in one step, or so one can write code like

    if (my_index_set == complete_index_set(my_index_set.size())
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceAdaptationStrategies_1_1Coarsening.html differs (JavaScript source, ASCII text, with very long lines (471)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceAdaptationStrategies_1_1Coarsening.html 2023-11-25 15:26:00.239882978 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceAdaptationStrategies_1_1Coarsening.html 2023-11-25 15:26:00.239882978 +0100 @@ -139,11 +139,11 @@

    Check if data on all children match, and return value of the first child.

    -\[
+<picture><source srcset=\[
   d_{K_p} = d_{K_c}
   \qquad
   \forall K_c \text{ children of } K_p
-\] +\]" src="form_2146.png"/>

    @@ -176,13 +176,13 @@

    Return sum of data on all children.

    -\[
+<picture><source srcset=\[
   d_{K_p} = \sum d_{K_c}
   \qquad
   \forall K_c \text{ children of } K_p
-\] +\]" src="form_2147.png"/>

    -

    This strategy preserves the $l_1$-norm of the corresponding global data vector before and after adaptation.

    +

    This strategy preserves the $l_1$-norm of the corresponding global data vector before and after adaptation.

    @@ -212,15 +212,15 @@
    -

    Return $ l_2 $-norm of data on all children.

    +

    Return $ l_2 $-norm of data on all children.

    -\[
+<picture><source srcset=\[
   d_{K_p}^2 = \sum d_{K_c}^2
   \qquad
   \forall K_c \text{ children of } K_p
-\] +\]" src="form_2149.png"/>

    -

    This strategy preserves the $l_2$-norm of the corresponding global data vector before and after adaptation.

    +

    This strategy preserves the $l_2$-norm of the corresponding global data vector before and after adaptation.

    @@ -252,11 +252,11 @@

    Return mean value of data on all children.

    -\[
+<picture><source srcset=\[
   d_{K_p} = \sum d_{K_c} / n_\text{children}
   \qquad
   \forall K_c \text{ children of } K_p
-\] +\]" src="form_2150.png"/>

    @@ -289,11 +289,11 @@

    Return maximum value of data on all children.

    -\[
+<picture><source srcset=\[
   d_{K_p} = \max \left( d_{K_c} \right)
   \qquad
   \forall K_c \text{ children of } K_p
-\] +\]" src="form_2151.png"/>

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceAdaptationStrategies_1_1Refinement.html differs (JavaScript source, ASCII text, with very long lines (457)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceAdaptationStrategies_1_1Refinement.html 2023-11-25 15:26:00.253216038 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceAdaptationStrategies_1_1Refinement.html 2023-11-25 15:26:00.249882775 +0100 @@ -135,11 +135,11 @@

    Return a vector containing copies of data of the parent cell for each child.

    -\[
+<picture><source srcset=\[
   d_{K_c} = d_{K_p}
   \qquad
   \forall K_c \text{ children of } K_p
-\] +\]" src="form_2143.png"/>

    @@ -172,13 +172,13 @@

    Return a vector which contains data of the parent cell being equally divided among all children.

    -\[
+<picture><source srcset=\[
   d_{K_c} = d_{K_p} / n_\text{children}
   \qquad
   \forall K_c \text{ children of } K_p
-\] +\]" src="form_2144.png"/>

    -

    This strategy preserves the $l_1$-norm of the corresponding global data Vector before and after adaptation.

    +

    This strategy preserves the $l_1$-norm of the corresponding global data Vector before and after adaptation.

    @@ -210,13 +210,13 @@

    Return a vector which contains squared data of the parent cell being equally divided among the squares of all children.

    -\[
+<picture><source srcset=\[
   d_{K_c}^2 = d_{K_p}^2 / n_\text{children}
   \qquad
   \forall K_c \text{ children of } K_p
-\] +\]" src="form_2145.png"/>

    -

    This strategy preserves the $l_2$-norm of the corresponding global data Vector before and after adaptation.

    +

    This strategy preserves the $l_2$-norm of the corresponding global data Vector before and after adaptation.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceDataComponentInterpretation.html differs (JavaScript source, ASCII text, with very long lines (991)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceDataComponentInterpretation.html 2023-11-25 15:26:00.263215835 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceDataComponentInterpretation.html 2023-11-25 15:26:00.263215835 +0100 @@ -113,7 +113,7 @@
    -

    The members of this enum are used to describe the logical interpretation of what the various components of a vector-valued data set mean. For example, if one has a finite element for the Stokes equations in 2d, representing components $(u,v,p)$, one would like to indicate that the first two, $u$ and $v$, represent a logical vector so that later on when we generate graphical output we can hand them off to a visualization program that will automatically know to render them as a vector field, rather than as two separate and independent scalar fields.

    +

    The members of this enum are used to describe the logical interpretation of what the various components of a vector-valued data set mean. For example, if one has a finite element for the Stokes equations in 2d, representing components $(u,v,p)$, one would like to indicate that the first two, $u$ and $v$, represent a logical vector so that later on when we generate graphical output we can hand them off to a visualization program that will automatically know to render them as a vector field, rather than as two separate and independent scalar fields.

    By passing a set of enums of the current kind to the DataOut_DoFData::add_data_vector functions, this can be achieved.

    See the step-22 tutorial program for an example on how this information can be used in practice.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceDataOutBase.html differs (JavaScript source, ASCII text, with very long lines (1173)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceDataOutBase.html 2023-11-25 15:26:00.286548695 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceDataOutBase.html 2023-11-25 15:26:00.286548695 +0100 @@ -593,7 +593,7 @@

    While this discussion applies to two spatial dimensions, it is more complicated in 3d. The reason is that we could still use patches, but it is difficult when trying to visualize them, since if we use a cut through the data (by, for example, using x- and z-coordinates, a fixed y-value and plot function values in z-direction, then the patched data is not a patch in the sense GNUPLOT wants it any more. Therefore, we use another approach, namely writing the data on the 3d grid as a sequence of lines, i.e. two points each associated with one or more data sets. There are therefore 12 lines for each subcells of a patch.

    Given the lines as described above, a cut through this data in Gnuplot can then be achieved like this:

    *   set data style lines
     *   splot [:][:][0:] "T" using 1:2:(\$3==.5 ? \$4 : -1)
    -* 

    This command plots data in $x$- and $y$-direction unbounded, but in $z$-direction only those data points which are above the $x$- $y$-plane (we assume here a positive solution, if it has negative values, you might want to decrease the lower bound). Furthermore, it only takes the data points with z-values (&3) equal to 0.5, i.e. a cut through the domain at z=0.5. For the data points on this plane, the data values of the first data set (&4) are raised in z-direction above the x-y-plane; all other points are denoted the value -1 instead of the value of the data vector and are not plotted due to the lower bound in z plotting direction, given in the third pair of brackets.

    +*

    This command plots data in $x$- and $y$-direction unbounded, but in $z$-direction only those data points which are above the $x$- $y$-plane (we assume here a positive solution, if it has negative values, you might want to decrease the lower bound). Furthermore, it only takes the data points with z-values (&3) equal to 0.5, i.e. a cut through the domain at z=0.5. For the data points on this plane, the data values of the first data set (&4) are raised in z-direction above the x-y-plane; all other points are denoted the value -1 instead of the value of the data vector and are not plotted due to the lower bound in z plotting direction, given in the third pair of brackets.

    More complex cuts are possible, including nonlinear ones. Note however, that only those points which are actually on the cut-surface are plotted.

    Definition at line 3557 of file data_out_base.cc.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceDerivativeApproximation.html differs (JavaScript source, ASCII text, with very long lines (1600)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceDerivativeApproximation.html 2023-11-25 15:26:00.299881757 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceDerivativeApproximation.html 2023-11-25 15:26:00.299881757 +0100 @@ -118,17 +118,17 @@

    Detailed Description

    This namespace provides functions that compute a cell-wise approximation of the norm of a derivative of a finite element field by taking difference quotients between neighboring cells. This is a rather simple but efficient form to get an error indicator, since it can be computed with relatively little numerical effort and yet gives a reasonable approximation.

    -

    The way the difference quotients are computed on cell $K$ is the following (here described for the approximation of the gradient of a finite element field, but see below for higher derivatives): let $K'$ be a neighboring cell, and let $y_{K'}=x_{K'}-x_K$ be the distance vector between the centers of the two cells, then $ \frac{u_h(x_{K'}) - u_h(x_K)}{ \|y_{K'}\|
-}$ is an approximation of the directional derivative $ \nabla u(x_K) \cdot
-\frac{y_{K'}}{ \|y_{K'}\| }.$ By multiplying both terms by $\frac{y_{K'}}{
-\|y_{K'}\| }$ from the left and summing over all neighbors $K'$, we obtain $ \sum_{K'} \left( \frac{y_{K'}}{ \|y_{K'}\|} \frac{y_{K'}^T}{ \|y_{K'}\| }
+<p>The way the difference quotients are computed on cell <picture><source srcset=$K$ is the following (here described for the approximation of the gradient of a finite element field, but see below for higher derivatives): let $K'$ be a neighboring cell, and let $y_{K'}=x_{K'}-x_K$ be the distance vector between the centers of the two cells, then $ \frac{u_h(x_{K'}) - u_h(x_K)}{ \|y_{K'}\|
+}$ is an approximation of the directional derivative $ \nabla u(x_K) \cdot
+\frac{y_{K'}}{ \|y_{K'}\| }.$ By multiplying both terms by $\frac{y_{K'}}{
+\|y_{K'}\| }$ from the left and summing over all neighbors $K'$, we obtain $ \sum_{K'} \left( \frac{y_{K'}}{ \|y_{K'}\|} \frac{y_{K'}^T}{ \|y_{K'}\| }
 \right) \nabla u(x_K) \approx \sum_{K'} \left( \frac{y_{K'}}{ \|y_{K'}\|}
-\frac{u_h(x_{K'}) - u_h(x_K)}{ \|y_{K'}\| }  \right).$

    -

    Thus, if the matrix $ Y =  \sum_{K'} \left( \frac{y_{K'}}{\|y_{K'}\|}
-\frac{y_{K'}^T}{ \|y_{K'}\| } \right)$ is regular (which is the case when the vectors $y_{K'}$ to all neighbors span the whole space), we can obtain an approximation to the true gradient by $ \nabla u(x_K) \approx Y^{-1}
+\frac{u_h(x_{K'}) - u_h(x_K)}{ \|y_{K'}\| }  \right).$

    +

    Thus, if the matrix $ Y =  \sum_{K'} \left( \frac{y_{K'}}{\|y_{K'}\|}
+\frac{y_{K'}^T}{ \|y_{K'}\| } \right)$ is regular (which is the case when the vectors $y_{K'}$ to all neighbors span the whole space), we can obtain an approximation to the true gradient by $ \nabla u(x_K) \approx Y^{-1}
 \sum_{K'} \left( \frac{y_{K'}}{\|y_{K'}\|} \frac{u_h(x_{K'}) - u_h(x_K)}{
-\|y_{K'}\| } \right).$ This is a quantity that is easily computed. The value returned for each cell when calling the approximate_gradient function of this class is the $l_2$ norm of this approximation to the gradient. To make this a useful quantity, you may want to scale each element by the correct power of the respective cell size.

    -

    The computation of this quantity must fail if a cell has only neighbors for which the direction vectors $y_K$ do not span the whole space, since then the matrix $Y$ is no longer invertible. If this happens, you will get an error similar to this one:

    --------------------------------------------------------
    +\|y_{K'}\| } \right).$" src="form_2171.png"/> This is a quantity that is easily computed. The value returned for each cell when calling the approximate_gradient function of this class is the $l_2$ norm of this approximation to the gradient. To make this a useful quantity, you may want to scale each element by the correct power of the respective cell size.

    +

    The computation of this quantity must fail if a cell has only neighbors for which the direction vectors $y_K$ do not span the whole space, since then the matrix $Y$ is no longer invertible. If this happens, you will get an error similar to this one:

    --------------------------------------------------------
    An error occurred in line <749>
    of file <source/numerics/derivative_approximation.cc> in function
    void DerivativeApproximation::approximate(...)
    @@ -146,19 +146,19 @@
    DEAL_II_HOST constexpr Number determinant(const SymmetricTensor< 2, dim, Number > &)

    As can easily be verified, this can only happen on very coarse grids, when some cells and all their neighbors have not been refined even once. You should therefore only call the functions of this class if all cells are at least once refined. In practice this is not much of a restriction.

    Approximation of higher derivatives

    -

    Similar to the reasoning above, approximations to higher derivatives can be computed in a similar fashion. For example, the tensor of second derivatives is approximated by the formula $ \nabla^2 u(x_K) \approx Y^{-1}
+<p>Similar to the reasoning above, approximations to higher derivatives can be computed in a similar fashion. For example, the tensor of second derivatives is approximated by the formula <picture><source srcset=$ \nabla^2 u(x_K) \approx Y^{-1}
 \sum_{K'} \left( \frac{y_{K'}}{\|y_{K'}\|} \otimes \frac{\nabla u_h(x_{K'})
-- \nabla u_h(x_K)}{ \|y_{K'}\| } \right), $ where $\otimes$ denotes the outer product of two vectors. Note that unlike the true tensor of second derivatives, its approximation is not necessarily symmetric. This is due to the fact that in the derivation, it is not clear whether we shall consider as projected second derivative the term $\nabla^2 u y_{KK'}$ or $y_{KK'}^T
-\nabla^2 u$. Depending on which choice we take, we obtain one approximation of the tensor of second derivatives or its transpose. To avoid this ambiguity, as result we take the symmetrized form, which is the mean value of the approximation and its transpose.

    -

    The returned value on each cell is the spectral norm of the approximated tensor of second derivatives, i.e. the largest eigenvalue by absolute value. This equals the largest curvature of the finite element field at each cell, and the spectral norm is the matrix norm associated to the $l_2$ vector norm.

    +- \nabla u_h(x_K)}{ \|y_{K'}\| } \right), $" src="form_2173.png"/> where $\otimes$ denotes the outer product of two vectors. Note that unlike the true tensor of second derivatives, its approximation is not necessarily symmetric. This is due to the fact that in the derivation, it is not clear whether we shall consider as projected second derivative the term $\nabla^2 u y_{KK'}$ or $y_{KK'}^T
+\nabla^2 u$. Depending on which choice we take, we obtain one approximation of the tensor of second derivatives or its transpose. To avoid this ambiguity, as result we take the symmetrized form, which is the mean value of the approximation and its transpose.

    +

    The returned value on each cell is the spectral norm of the approximated tensor of second derivatives, i.e. the largest eigenvalue by absolute value. This equals the largest curvature of the finite element field at each cell, and the spectral norm is the matrix norm associated to the $l_2$ vector norm.

    Even higher than the second derivative can be obtained along the same lines as exposed above.

    Refinement indicators based on the derivatives

    -

    If you would like to base a refinement criterion upon these approximation of the derivatives, you will have to scale the results of this class by an appropriate power of the mesh width. For example, since $\|u-u_h\|^2_{L_2}
-\le C h^2 \|\nabla u\|^2_{L_2}$, it might be the right thing to scale the indicators as $\eta_K = h \|\nabla u\|_K$, i.e. $\eta_K = h^{1+d/2}
-\|\nabla u\|_{\infty;K}$, i.e. the right power is $1+d/2$.

    +

    If you would like to base a refinement criterion upon these approximation of the derivatives, you will have to scale the results of this class by an appropriate power of the mesh width. For example, since $\|u-u_h\|^2_{L_2}
+\le C h^2 \|\nabla u\|^2_{L_2}$, it might be the right thing to scale the indicators as $\eta_K = h \|\nabla u\|_K$, i.e. $\eta_K = h^{1+d/2}
+\|\nabla u\|_{\infty;K}$, i.e. the right power is $1+d/2$.

    Likewise, for the second derivative, one should choose a power of the mesh size $h$ one higher than for the gradient.

    Implementation

    -

    The formulae for the computation of approximations to the gradient and to the tensor of second derivatives shown above are very much alike. The basic difference is that in one case the finite difference quotient is a scalar, while in the other case it is a vector. For higher derivatives, this would be a tensor of even higher rank. We then have to form the outer product of this difference quotient with the distance vector $y_{KK'}$, symmetrize it, contract it with the matrix $Y^{-1}$ and compute its norm. To make the implementation simpler and to allow for code reuse, all these operations that are dependent on the actual order of the derivatives to be approximated, as well as the computation of the quantities entering the difference quotient, have been separated into auxiliary nested classes (names Gradient and SecondDerivative) and the main algorithm is simply passed one or the other data types and asks them to perform the order dependent operations. The main framework that is independent of this, such as finding all active neighbors, or setting up the matrix $Y$ is done in the main function approximate.

    +

    The formulae for the computation of approximations to the gradient and to the tensor of second derivatives shown above are very much alike. The basic difference is that in one case the finite difference quotient is a scalar, while in the other case it is a vector. For higher derivatives, this would be a tensor of even higher rank. We then have to form the outer product of this difference quotient with the distance vector $y_{KK'}$, symmetrize it, contract it with the matrix $Y^{-1}$ and compute its norm. To make the implementation simpler and to allow for code reuse, all these operations that are dependent on the actual order of the derivatives to be approximated, as well as the computation of the quantities entering the difference quotient, have been separated into auxiliary nested classes (names Gradient and SecondDerivative) and the main algorithm is simply passed one or the other data types and asks them to perform the order dependent operations. The main framework that is independent of this, such as finding all active neighbors, or setting up the matrix $Y$ is done in the main function approximate.

    Due to this way of operation, the class may be easily extended for higher order derivatives than are presently implemented. Basically, only an additional class along the lines of the derivative descriptor classes Gradient and SecondDerivative has to be implemented, with the respective alias and functions replaced by the appropriate analogues for the derivative that is to be approximated.

    Function Documentation

    @@ -293,7 +293,7 @@
    -

    This function is the analogue to the one above, computing finite difference approximations of the tensor of second derivatives. Pass it the DoF handler object that describes the finite element field, a nodal value vector, and receive the cell-wise spectral norm of the approximated tensor of second derivatives. The spectral norm is the matrix norm associated to the $l_2$ vector norm.

    +

    This function is the analogue to the one above, computing finite difference approximations of the tensor of second derivatives. Pass it the DoF handler object that describes the finite element field, a nodal value vector, and receive the cell-wise spectral norm of the approximated tensor of second derivatives. The spectral norm is the matrix norm associated to the $l_2$ vector norm.

    The last parameter denotes the solution component, for which the gradient is to be computed. It defaults to the first component. For scalar elements, this is the only valid choice; for vector-valued ones, any component between zero and the number of vector components can be given here.

    In a parallel computation the solution vector needs to contain the locally relevant unknowns.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceDifferentiation_1_1SD.html differs (JavaScript source, ASCII text, with very long lines (690)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceDifferentiation_1_1SD.html 2023-11-25 15:26:00.346547474 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceDifferentiation_1_1SD.html 2023-11-25 15:26:00.346547474 +0100 @@ -666,7 +666,7 @@
    -

    Return a symbolic number that represents the Euler constant $e \approx 2.71828$ raised to the given exponent.

    +

    Return a symbolic number that represents the Euler constant $e \approx 2.71828$ raised to the given exponent.

    Mimics the function std::exp(exponent) using the standard math library.

    Definition at line 60 of file symengine_math.cc.

    @@ -2894,7 +2894,7 @@

    Return an Expression representing a scalar symbolic variable with the identifier specified by symbol.

    -

    For example, if the symbol is the string "x" then the scalar symbolic variable that is returned represents the scalar $x$.

    +

    For example, if the symbol is the string "x" then the scalar symbolic variable that is returned represents the scalar $x$.

    Parameters
    @@ -2933,7 +2933,7 @@
    [in]symbolAn identifier (or name) for the returned symbolic variable.

    Return an Expression representing a scalar symbolic function with the identifier specified by symbol. The function's symbolic dependencies are specified by the input arguments.

    -

    For example, if the symbol is the string "f", and the arguments to the function that is generated are the symbolic variable x and the symbolic expression y+z, then the generic symbolic function that is returned represents $f(x, y+z)$.

    +

    For example, if the symbol is the string "f", and the arguments to the function that is generated are the symbolic variable x and the symbolic expression y+z, then the generic symbolic function that is returned represents $f(x, y+z)$.

    Parameters
    @@ -2971,7 +2971,7 @@
    [in]symbolAn identifier (or name) for the returned symbolic function.

    Return an Expression representing a scalar symbolic function with the identifier specified by symbol. The function's symbolic dependencies are specified by the keys to the input arguments map; the values stored in the map are ignored.

    -

    For example, if the symbol is the string "f", and the arguments to the function that is generated are the symbolic variable x and the symbolic expression y+z, then the generic symbolic function that is returned represents $f(x, y+z)$.

    +

    For example, if the symbol is the string "f", and the arguments to the function that is generated are the symbolic variable x and the symbolic expression y+z, then the generic symbolic function that is returned represents $f(x, y+z)$.

    Parameters
    @@ -3015,7 +3015,7 @@
    [in]symbolAn identifier (or name) for the returned symbolic function.
    -
    Returns
    The symbolic function or expression representing the result $\frac{\partial f}{\partial x}$.
    +
    Returns
    The symbolic function or expression representing the result $\frac{\partial f}{\partial x}$.

    Definition at line 70 of file symengine_scalar_operations.cc.

    @@ -4270,7 +4270,7 @@

    Return a substitution map that has any explicit interdependencies between the entries of the input substitution_map resolved.

    The force_cyclic_dependency_resolution flag exists to ensure, if desired, that no cyclic dependencies can exist in the returned map. If a cyclic dependency exists in the input substitution map, substitution_map, then with this flag set to true the dependency cycle is broken by a dictionary-ordered substitution. For example, if the substitution map contains two entries map["a"] -> "b" and map["b"] -> "a", then the result of calling this function would be a map with the elements map["a"] -> "a" and map["b"] -> "a".

    If one symbol is an explicit function of another, and it is desired that all their values are completely resolved, then it may be necessary to perform substitution a number of times before the result is finalized. This function performs substitution sweeps for a set of symbolic variables until all explicit relationships between the symbols in the map have been resolved. Whether each entry returns a symbolic or real value depends on the nature of the values stored in the substitution map. If the values associated with a key are also symbolic then the returned result may still be symbolic in nature. The terminal result of using the input substitution map, symbol_values, is then guaranteed to be rendered by a single substitution of the returned dependency-resolved map.

    -

    Example: If map["a"] -> 1 and map["b"] -> "a"+ 2, then the function $f(a,b(a)) = a+b$ will be evaluated and the result $f\vert_{a=1,b=a+2} = 3+a$ is determined upon the completion of the first sweep. A second sweep is therefore necessary to resolve the final symbol, and the returned value is ultimately $f = [3+a]_{a=1} = 4$. By resolving the explicit relationships between all symbols in the map, we determine that map["a"] -> 1 and map["b"] -> 1 + 2 = 3 and thus, using only one substitution, that $f = a+b = 1 + 3 = 4$.

    +

    Example: If map["a"] -> 1 and map["b"] -> "a"+ 2, then the function $f(a,b(a)) = a+b$ will be evaluated and the result $f\vert_{a=1,b=a+2} = 3+a$ is determined upon the completion of the first sweep. A second sweep is therefore necessary to resolve the final symbol, and the returned value is ultimately $f = [3+a]_{a=1} = 4$. By resolving the explicit relationships between all symbols in the map, we determine that map["a"] -> 1 and map["b"] -> 1 + 2 = 3 and thus, using only one substitution, that $f = a+b = 1 + 3 = 4$.

    @@ -4345,11 +4345,11 @@ If the symbols stored in the map are explicitly dependent on one another, then the returned result depends on the order in which the map is traversed. It is recommended to first resolve all interdependencies in the map using the resolve_explicit_dependencies() function.

    Examples:

    1. -

      If map["a"] == 1 and map["b"] == "a" + 2, then the function $f(a,b(a)) := a+b$ will be evaluated and the result $f\vert_{a=1,b=a+2} = 3+a$ is returned. This return is because the symbol "a" is substituted throughout the function first, and only then is the symbol "b(a)" substituted, by which time its explicit dependency on "a" cannot be resolved.

      +

      If map["a"] == 1 and map["b"] == "a" + 2, then the function $f(a,b(a)) := a+b$ will be evaluated and the result $f\vert_{a=1,b=a+2} = 3+a$ is returned. This return is because the symbol "a" is substituted throughout the function first, and only then is the symbol "b(a)" substituted, by which time its explicit dependency on "a" cannot be resolved.

    2. -If map["a"] == "b"+2 and map["b"] == 1, then the function $f(a(b),b): = a+b$ will be evaluated and the result $f\vert_{a=b+2, b} = [b+2+b]_{b=1} = 4$ is returned. This is because the explicitly dependent symbol "a(b)" is substituted first followed by the symbol "b".
    3. +If map["a"] == "b"+2 and map["b"] == 1, then the function $f(a(b),b): = a+b$ will be evaluated and the result $f\vert_{a=b+2, b} = [b+2+b]_{b=1} = 4$ is returned. This is because the explicitly dependent symbol "a(b)" is substituted first followed by the symbol "b".
    @@ -4531,7 +4531,7 @@

    Return a vector of Expressions representing a vectorial symbolic variable with the identifier specified by symbol.

    -

    For example, if the symbol is the string "v" then the vectorial symbolic variable that is returned represents the vector $v$. Each component of $v$ is prefixed by the given symbol, and has a suffix that indicates its component index.

    +

    For example, if the symbol is the string "v" then the vectorial symbolic variable that is returned represents the vector $v$. Each component of $v$ is prefixed by the given symbol, and has a suffix that indicates its component index.

    Template Parameters
    @@ -4568,7 +4568,7 @@
    dimThe dimension of the returned tensor.

    Return a tensor of Expressions representing a tensorial symbolic variable with the identifier specified by symbol.

    -

    For example, if the symbol is the string "T" then the tensorial symbolic variable that is returned represents the vector $T$. Each component of $T$ is prefixed by the given symbol, and has a suffix that indicates its component indices.

    +

    For example, if the symbol is the string "T" then the tensorial symbolic variable that is returned represents the vector $T$. Each component of $T$ is prefixed by the given symbol, and has a suffix that indicates its component indices.

    Template Parameters
    @@ -4797,7 +4797,7 @@
    rankThe rank of the returned tensor.
    -
    Returns
    The tensor of symbolic functions or expressions representing the result $\frac{\partial f}{\partial \mathbf{T}}$.
    +
    Returns
    The tensor of symbolic functions or expressions representing the result $\frac{\partial f}{\partial \mathbf{T}}$.
    @@ -4834,7 +4834,7 @@ -
    Returns
    The symmetric tensor of symbolic functions or expressions representing the result $\frac{\partial f}{\partial \mathbf{S}}$.
    +
    Returns
    The symmetric tensor of symbolic functions or expressions representing the result $\frac{\partial f}{\partial \mathbf{S}}$.
    @@ -4871,7 +4871,7 @@ -
    Returns
    The tensor of symbolic functions or expressions representing the result $\frac{\partial f}{\partial \mathbf{T}}$.
    +
    Returns
    The tensor of symbolic functions or expressions representing the result $\frac{\partial f}{\partial \mathbf{T}}$.
    @@ -4908,7 +4908,7 @@ -
    Returns
    The symmetric tensor of symbolic functions or expressions representing the result $\frac{\partial f}{\partial \mathbf{S}}$.
    +
    Returns
    The symmetric tensor of symbolic functions or expressions representing the result $\frac{\partial f}{\partial \mathbf{S}}$.
    @@ -4945,7 +4945,7 @@ -
    Returns
    The tensor of symbolic functions or expressions representing the result $\frac{\partial \mathbf{T}}{\partial x}$.
    +
    Returns
    The tensor of symbolic functions or expressions representing the result $\frac{\partial \mathbf{T}}{\partial x}$.
    @@ -4982,7 +4982,7 @@ -
    Returns
    The symmetric tensor of symbolic functions or expressions representing the result $\frac{\partial \mathbf{S}}{\partial x}$.
    +
    Returns
    The symmetric tensor of symbolic functions or expressions representing the result $\frac{\partial \mathbf{S}}{\partial x}$.
    @@ -5019,7 +5019,7 @@ -
    Returns
    The tensor of symbolic functions or expressions representing the result $\frac{\partial \mathbf{T}}{\partial x}$.
    +
    Returns
    The tensor of symbolic functions or expressions representing the result $\frac{\partial \mathbf{T}}{\partial x}$.
    @@ -5056,7 +5056,7 @@ -
    Returns
    The symmetric tensor of symbolic functions or expressions representing the result $\frac{\partial \mathbf{S}}{\partial x}$.
    +
    Returns
    The symmetric tensor of symbolic functions or expressions representing the result $\frac{\partial \mathbf{S}}{\partial x}$.
    @@ -5093,8 +5093,8 @@ -
    Returns
    The tensor of symbolic functions or variables representing the result $\frac{\partial \mathbf{T}_{1}}{\partial
-\mathbf{T}_{2}}$.
    +
    Returns
    The tensor of symbolic functions or variables representing the result $\frac{\partial \mathbf{T}_{1}}{\partial
+\mathbf{T}_{2}}$.
    @@ -5131,8 +5131,8 @@ -
    Returns
    The symmetric tensor of symbolic functions or variables representing the result $\frac{\partial \mathbf{S}_{1}}{\partial
-\mathbf{S}_{2}}$.
    +
    Returns
    The symmetric tensor of symbolic functions or variables representing the result $\frac{\partial \mathbf{S}_{1}}{\partial
+\mathbf{S}_{2}}$.
    @@ -5169,7 +5169,7 @@ -
    Returns
    The tensor of symbolic functions or variables representing the result $\frac{\partial \mathbf{T}}{\partial \mathbf{S}}$.
    +
    Returns
    The tensor of symbolic functions or variables representing the result $\frac{\partial \mathbf{T}}{\partial \mathbf{S}}$.
    @@ -5206,7 +5206,7 @@ -
    Returns
    The tensor of symbolic functions or variables representing the result $\frac{\partial \mathbf{S}}{\partial \mathbf{T}}$.
    +
    Returns
    The tensor of symbolic functions or variables representing the result $\frac{\partial \mathbf{S}}{\partial \mathbf{T}}$.
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceDoFRenumbering.html differs (JavaScript source, ASCII text, with very long lines (1206)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceDoFRenumbering.html 2023-11-25 15:26:00.373213599 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceDoFRenumbering.html 2023-11-25 15:26:00.373213599 +0100 @@ -226,13 +226,13 @@

    Using the constraint information usually leads to reductions in bandwidth of 10 or 20 per cent, but may for some very unstructured grids also lead to an increase. You have to weigh the decrease in your case with the time spent to use the constraint information, which usually is several times longer than the ‘pure’ renumbering algorithm.

    In almost all cases, the renumbering scheme finds a corner to start with. Since there is more than one corner in most grids and since even an interior degree of freedom may be a better starting point, giving the starting point by the user may be a viable way if you have a simple scheme to derive a suitable point (e.g. by successively taking the third child of the cell top left of the coarsest level, taking its third vertex and the dof index thereof, if you want the top left corner vertex). If you do not know beforehand what your grid will look like (e.g. when using adaptive algorithms), searching a best starting point may be difficult, however, and in many cases will not justify the effort.

    Component-wise and block-wise numberings

    -

    For finite elements composed of several base elements using the FESystem class, or for elements which provide several components themselves, it may be of interest to sort the DoF indices by component. This will then bring out the block matrix structure, since otherwise the degrees of freedom are numbered cell-wise without taking into account that they may belong to different components. For example, one may want to sort degree of freedom for a Stokes discretization so that we first get all velocities and then all the pressures so that the resulting matrix naturally decomposes into a $2\times 2$ system.

    +

    For finite elements composed of several base elements using the FESystem class, or for elements which provide several components themselves, it may be of interest to sort the DoF indices by component. This will then bring out the block matrix structure, since otherwise the degrees of freedom are numbered cell-wise without taking into account that they may belong to different components. For example, one may want to sort degree of freedom for a Stokes discretization so that we first get all velocities and then all the pressures so that the resulting matrix naturally decomposes into a $2\times 2$ system.

    This kind of numbering may be obtained by calling the component_wise() function of this class. Since it does not touch the order of indices within each component, it may be worthwhile to first renumber using the Cuthill- McKee or a similar algorithm and afterwards renumbering component-wise. This will bring out the matrix structure and additionally have a good numbering within each block.

    The component_wise() function allows not only to honor enumeration based on vector components, but also allows to group together vector components into "blocks" using a defaulted argument to the various DoFRenumbering::component_wise() functions (see GlossComponent vs GlossBlock for a description of the difference). The blocks designated through this argument may, but do not have to be, equal to the blocks that the finite element reports. For example, a typical Stokes element would be

    FESystem<dim> stokes_fe (FE_Q<dim>(2), dim, // dim velocities
    FE_Q<dim>(1), 1); // one pressure
    Definition: fe_system.h:209
    Definition: fe_q.h:551
    -

    This element has dim+1 vector components and equally many blocks. However, one may want to consider the velocities as one logical block so that all velocity degrees of freedom are enumerated the same way, independent of whether they are $x$- or $y$-velocities. This is done, for example, in step-20 and step-22 as well as several other tutorial programs.

    +

    This element has dim+1 vector components and equally many blocks. However, one may want to consider the velocities as one logical block so that all velocity degrees of freedom are enumerated the same way, independent of whether they are $x$- or $y$-velocities. This is done, for example, in step-20 and step-22 as well as several other tutorial programs.

    On the other hand, if you really want to use block structure reported by the finite element itself (a case that is often the case if you have finite elements that have multiple vector components, e.g. the FE_RaviartThomas or FE_Nedelec elements) then you can use the DoFRenumbering::block_wise instead of the DoFRenumbering::component_wise functions.

    Cell-wise numbering

    Given an ordered vector of cells, the function cell_wise() sorts the degrees of freedom such that degrees on earlier cells of this vector will occur before degrees on later cells.

    @@ -245,7 +245,7 @@

    The MatrixFree class provides optimized algorithms for interleaving operations on vectors before and after the access of the vector data in the respective loops. The algorithm matrix_free_data_locality() makes sure that all unknowns with a short distance between the first and last access are grouped together, in order to increase the spatial data locality.

    A comparison of reordering strategies

    As a benchmark of comparison, let us consider what the different sparsity patterns produced by the various algorithms when using the $Q_2^d\times
-Q_1$ element combination typically employed in the discretization of Stokes equations, when used on the mesh obtained in step-22 after one adaptive mesh refinement in 3d. The space dimension together with the coupled finite element leads to a rather dense system matrix with, on average around 180 nonzero entries per row. After applying each of the reordering strategies shown below, the degrees of freedom are also sorted using DoFRenumbering::component_wise into velocity and pressure groups; this produces the $2\times 2$ block structure seen below with the large velocity-velocity block at top left, small pressure-pressure block at bottom right, and coupling blocks at top right and bottom left.

    +Q_1$" src="form_951.png"/> element combination typically employed in the discretization of Stokes equations, when used on the mesh obtained in step-22 after one adaptive mesh refinement in 3d. The space dimension together with the coupled finite element leads to a rather dense system matrix with, on average around 180 nonzero entries per row. After applying each of the reordering strategies shown below, the degrees of freedom are also sorted using DoFRenumbering::component_wise into velocity and pressure groups; this produces the $2\times 2$ block structure seen below with the large velocity-velocity block at top left, small pressure-pressure block at bottom right, and coupling blocks at top right and bottom left.

    The goal of reordering strategies is to improve the preconditioner. In step-22 we use a SparseILU to preconditioner for the velocity-velocity block at the top left. The quality of the preconditioner can then be measured by the number of CG iterations required to solve a linear system with this block. For some of the reordering strategies below we record this number for adaptive refinement cycle 3, with 93176 degrees of freedom; because we solve several linear systems with the same matrix in the Schur complement, the average number of iterations is reported. The lower the number the better the preconditioner and consequently the better the renumbering of degrees of freedom is suited for this task. We also state the run-time of the program, in part determined by the number of iterations needed, for the first 4 cycles on one of our machines. Note that the reported times correspond to the run time of the entire program, not just the affected solver; if a program runs twice as fast with one particular ordering than with another one, then this means that the actual solver is actually several times faster.

    @@ -473,7 +473,7 @@
    const std::vector< unsigned int > &&#href_anchor"paramname">target_component = std::vector<unsigned int>()&#href_anchor"memdoc"> -

    Sort the degrees of freedom by vector component. The numbering within each component is not touched, so a degree of freedom with index $i$, belonging to some component, and another degree of freedom with index $j$ belonging to the same component will be assigned new indices $n(i)$ and $n(j)$ with $n(i)<n(j)$ if $i<j$ and $n(i)>n(j)$ if $i>j$.

    +

    Sort the degrees of freedom by vector component. The numbering within each component is not touched, so a degree of freedom with index $i$, belonging to some component, and another degree of freedom with index $j$ belonging to the same component will be assigned new indices $n(i)$ and $n(j)$ with $n(i)<n(j)$ if $i<j$ and $n(i)>n(j)$ if $i>j$.

    You can specify that the components are ordered in a different way than suggested by the FESystem object you use. To this end, set up the vector target_component such that the entry at index i denotes the number of the target component for dofs with component i in the FESystem. Naming the same target component more than once is possible and results in a blocking of several components into one. This is discussed in step-22. If you omit this argument, the same order as given by the finite element is used.

    If one of the base finite elements from which the global finite element under consideration here, is a non-primitive one, i.e. its shape functions have more than one non-zero component, then it is not possible to associate these degrees of freedom with a single vector component. In this case, they are associated with the first vector component to which they belong.

    For finite elements with only one component, or a single non-primitive base element, this function is the identity operation.

    @@ -575,7 +575,7 @@
    -

    Sort the degrees of freedom by vector block. The numbering within each block is not touched, so a degree of freedom with index $i$, belonging to some block, and another degree of freedom with index $j$ belonging to the same block will be assigned new indices $n(i)$ and $n(j)$ with $n(i)<n(j)$ if $i<j$ and $n(i)>n(j)$ if $i>j$.

    +

    Sort the degrees of freedom by vector block. The numbering within each block is not touched, so a degree of freedom with index $i$, belonging to some block, and another degree of freedom with index $j$ belonging to the same block will be assigned new indices $n(i)$ and $n(j)$ with $n(i)<n(j)$ if $i<j$ and $n(i)>n(j)$ if $i>j$.

    Note
    This function only succeeds if each of the elements in the hp::FECollection attached to the DoFHandler argument has exactly the same number of blocks (see the glossary for more information). Note that this is not always given: while the hp::FECollection class ensures that all of its elements have the same number of vector components, they need not have the same number of blocks. At the same time, this function here needs to match individual blocks across elements and therefore requires that elements have the same number of blocks and that subsequent blocks in one element have the same meaning as in another element.

    Definition at line 999 of file dof_renumbering.cc.

    @@ -679,7 +679,7 @@
  • For meshes based on parallel::distributed::Triangulation, the locally owned cells of each MPI process are contiguous in Z order. That means that numbering degrees of freedom by visiting cells in Z order yields locally owned DoF indices that consist of contiguous ranges for each process. This is also true for the default ordering of DoFs on such triangulations, but the default ordering creates an enumeration that also depends on how many processors participate in the mesh, whereas the one generated by this function enumerates the degrees of freedom on a particular cell with indices that will be the same regardless of how many processes the mesh is split up between.
  • For meshes based on parallel::shared::Triangulation, the situation is more complex. Here, the set of locally owned cells is determined by a partitioning algorithm (selected by passing an object of type parallel::shared::Triangulation::Settings to the constructor of the triangulation), and in general these partitioning algorithms may assign cells to subdomains based on decisions that may have nothing to do with the Z order. (Though it is possible to select these flags in a way so that partitioning uses the Z order.) As a consequence, the cells of one subdomain are not contiguous in Z order, and if one renumbered degrees of freedom based on the Z order of cells, one would generally end up with DoF indices that on each processor do not form a contiguous range. This is often inconvenient (for example, because PETSc cannot store vectors and matrices for which the locally owned set of indices is not contiguous), and consequently this function uses the following algorithm for parallel::shared::Triangulation objects:

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceDoFTools.html differs (JavaScript source, ASCII text, with very long lines (1381)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceDoFTools.html 2023-11-25 15:26:00.409879516 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceDoFTools.html 2023-11-25 15:26:00.406546253 +0100 @@ -306,7 +306,7 @@

    (As a side note, for corner cases: The question what a degree of freedom on the boundary is, is not so easy. It should really be a degree of freedom of which the respective basis function has nonzero values on the boundary. At least for Lagrange elements this definition is equal to the statement that the off-point, or what deal.II calls support_point, of the shape function, i.e. the point where the function assumes its nominal value (for Lagrange elements this is the point where it has the function value 1), is located on the boundary. We do not check this directly, the criterion is rather defined through the information the finite element class gives: the FiniteElement class defines the numbers of basis functions per vertex, per line, and so on and the basis functions are numbered after this information; a basis function is to be considered to be on the face of a cell (and thus on the boundary if the cell is at the boundary) according to it belonging to a vertex, line, etc but not to the interior of the cell. The finite element uses the same cell-wise numbering so that we can say that if a degree of freedom was numbered as one of the dofs on lines, we assume that it is located on the line. Where the off-point actually is, is a secret of the finite element (well, you can ask it, but we don't do it here) and not relevant in this context.)

    Setting up sparsity patterns for boundary matrices

    In some cases, one wants to only work with DoFs that sit on the boundary. One application is, for example, if rather than interpolating non- homogeneous boundary values, one would like to project them. For this, we need two things: a way to identify nodes that are located on (parts of) the boundary, and a way to build matrices out of only degrees of freedom that are on the boundary (i.e. much smaller matrices, in which we do not even build the large zero block that stems from the fact that most degrees of freedom have no support on the boundary of the domain). The first of these tasks is done by the map_dof_to_boundary_indices() function (described above).

    -

    The second part requires us first to build a sparsity pattern for the couplings between boundary nodes, and then to actually build the components of this matrix. While actually computing the entries of these small boundary matrices is discussed in the MatrixCreator namespace, the creation of the sparsity pattern is done by the create_boundary_sparsity_pattern() function. For its work, it needs to have a numbering of all those degrees of freedom that are on those parts of the boundary that we are interested in. You can get this from the map_dof_to_boundary_indices() function. It then builds the sparsity pattern corresponding to integrals like $\int_\Gamma \varphi_{b2d(i)} \varphi_{b2d(j)} dx$, where $i$ and $j$ are indices into the matrix, and $b2d(i)$ is the global DoF number of a degree of freedom sitting on a boundary (i.e., $b2d$ is the inverse of the mapping returned by map_dof_to_boundary_indices() function).

    +

    The second part requires us first to build a sparsity pattern for the couplings between boundary nodes, and then to actually build the components of this matrix. While actually computing the entries of these small boundary matrices is discussed in the MatrixCreator namespace, the creation of the sparsity pattern is done by the create_boundary_sparsity_pattern() function. For its work, it needs to have a numbering of all those degrees of freedom that are on those parts of the boundary that we are interested in. You can get this from the map_dof_to_boundary_indices() function. It then builds the sparsity pattern corresponding to integrals like $\int_\Gamma \varphi_{b2d(i)} \varphi_{b2d(j)} dx$, where $i$ and $j$ are indices into the matrix, and $b2d(i)$ is the global DoF number of a degree of freedom sitting on a boundary (i.e., $b2d$ is the inverse of the mapping returned by map_dof_to_boundary_indices() function).

    Enumeration Type Documentation

    ◆ Coupling

    @@ -512,7 +512,7 @@

    Otherwise, if face_1 and face_2 are not active faces, this function loops recursively over the children of face_1 and face_2. If only one of the two faces is active, then we recursively iterate over the children of the non-active ones and make sure that the solution function on the refined side equals that on the non-refined face in much the same way as we enforce hanging node constraints at places where differently refined cells come together. (However, unlike hanging nodes, we do not enforce the requirement that there be only a difference of one refinement level between the two sides of the domain you would like to be periodic).

    This routine only constrains DoFs that are not already constrained. If this routine encounters a DoF that already is constrained (for instance by Dirichlet boundary conditions), the old setting of the constraint (dofs the entry is constrained to, inhomogeneities) is kept and nothing happens.

    The flags in the component_mask (see GlossComponentMask) denote which components of the finite element space shall be constrained with periodic boundary conditions. If it is left as specified by the default value all components are constrained. If it is different from the default value, it is assumed that the number of entries equals the number of components of the finite element. This can be used to enforce periodicity in only one variable in a system of equations.

    -

    face_orientation, face_flip and face_rotation describe an orientation that should be applied to face_1 prior to matching and constraining DoFs. This has nothing to do with the actual orientation of the given faces in their respective cells (which for boundary faces is always the default) but instead how you want to see periodicity to be enforced. For example, by using these flags, you can enforce a condition of the kind $u(0,y)=u(1,1-y)$ (i.e., a Moebius band) or in 3d a twisted torus. More precisely, these flags match local face DoF indices in the following manner:

    +

    face_orientation, face_flip and face_rotation describe an orientation that should be applied to face_1 prior to matching and constraining DoFs. This has nothing to do with the actual orientation of the given faces in their respective cells (which for boundary faces is always the default) but instead how you want to see periodicity to be enforced. For example, by using these flags, you can enforce a condition of the kind $u(0,y)=u(1,1-y)$ (i.e., a Moebius band) or in 3d a twisted torus. More precisely, these flags match local face DoF indices in the following manner:

    In 2d: face_orientation must always be true, face_rotation is always false, and face_flip has the meaning of line_flip; this implies e.g. for Q1:

    face_orientation = true, face_flip = false, face_rotation = false:
    @@ -585,7 +585,7 @@
    and any combination of that...

    Optionally a matrix matrix along with a std::vector first_vector_components can be specified that describes how DoFs on face_1 should be modified prior to constraining to the DoFs of face_2. Here, two declarations are possible: If the std::vector first_vector_components is non empty the matrix is interpreted as a dim $\times$ dim rotation matrix that is applied to all vector valued blocks listed in first_vector_components of the FESystem. If first_vector_components is empty the matrix is interpreted as an interpolation matrix with size no_face_dofs $\times$ no_face_dofs.

    This function makes sure that identity constraints don't create cycles in constraints.

    -

    periodicity_factor can be used to implement Bloch periodic conditions (a.k.a. phase shift periodic conditions) of the form $\psi(\mathbf{r})=e^{-i\mathbf{k}\cdot\mathbf{r}}u(\mathbf{r})$ where $u$ is periodic with the same periodicity as the crystal lattice and $\mathbf{k}$ is the wavevector, see https://en.wikipedia.org/wiki/Bloch_wave. The solution at face_2 is equal to the solution at face_1 times periodicity_factor. For example, if the solution at face_1 is $\psi(0)$ and $\mathbf{d}$ is the corresponding point on face_2, then the solution at face_2 should be $\psi(d) = \psi(0)e^{-i \mathbf{k}\cdot \mathbf{d}}$. This condition can be implemented using $\mathrm{periodicity\_factor}=e^{-i \mathbf{k}\cdot \mathbf{d}}$.

    +

    periodicity_factor can be used to implement Bloch periodic conditions (a.k.a. phase shift periodic conditions) of the form $\psi(\mathbf{r})=e^{-i\mathbf{k}\cdot\mathbf{r}}u(\mathbf{r})$ where $u$ is periodic with the same periodicity as the crystal lattice and $\mathbf{k}$ is the wavevector, see https://en.wikipedia.org/wiki/Bloch_wave. The solution at face_2 is equal to the solution at face_1 times periodicity_factor. For example, if the solution at face_1 is $\psi(0)$ and $\mathbf{d}$ is the corresponding point on face_2, then the solution at face_2 should be $\psi(d) = \psi(0)e^{-i \mathbf{k}\cdot \mathbf{d}}$. This condition can be implemented using $\mathrm{periodicity\_factor}=e^{-i \mathbf{k}\cdot \mathbf{d}}$.

    Detailed information can be found in the see Glossary entry on periodic boundary conditions.

    Definition at line 2292 of file dof_tools_constraints.cc.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceFESeries.html differs (JavaScript source, ASCII text, with very long lines (867)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceFESeries.html 2023-11-25 15:26:00.419879313 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceFESeries.html 2023-11-25 15:26:00.419879313 +0100 @@ -177,7 +177,7 @@
    -

    Linear regression least-square fit of $y = k \, x + b$. The size of the input vectors should be equal and more than 1. The returned pair will contain $k$ (first) and $b$ (second).

    +

    Linear regression least-square fit of $y = k \, x + b$. The size of the input vectors should be equal and more than 1. The returned pair will contain $k$ (first) and $b$ (second).

    Definition at line 30 of file fe_series.cc.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceFETools.html differs (JavaScript source, ASCII text, with very long lines (2085)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceFETools.html 2023-11-25 15:26:00.439878906 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceFETools.html 2023-11-25 15:26:00.439878906 +0100 @@ -186,7 +186,7 @@ void&#href_anchor"memTemplItemRight" valign="bottom">extrapolate (const DoFHandler< dim, spacedim > &dof1, const InVector &z1, const DoFHandler< dim, spacedim > &dof2, const AffineConstraints< typename OutVector::value_type > &constraints, OutVector &z2) &#href_anchor"details" id="details">

    Detailed Description

    This namespace offers interpolations and extrapolations of discrete functions of one FiniteElement fe1 to another FiniteElement fe2.

    -

    It also provides the local interpolation matrices that interpolate on each cell. Furthermore it provides the difference matrix $id-I_h$ that is needed for evaluating $(id-I_h)z$ for e.g. the dual solution $z$.

    +

    It also provides the local interpolation matrices that interpolate on each cell. Furthermore it provides the difference matrix $id-I_h$ that is needed for evaluating $(id-I_h)z$ for e.g. the dual solution $z$.

    For more information about the spacedim template parameter check the documentation of FiniteElement or the one of Triangulation.

    Function Documentation

    @@ -370,7 +370,7 @@

    Compute the identity matrix minus the back interpolation matrix. The difference_matrix will be of size (fe1.n_dofs_per_cell(), fe1.n_dofs_per_cell()) after this function. Previous content of the argument will be overwritten.

    -

    This function computes the matrix that transforms a fe1 function $z$ to $z-I_hz$ where $I_h$ denotes the interpolation operator from the fe1 space to the fe2 space. This matrix hence is useful to evaluate error-representations where $z$ denotes the dual solution.

    +

    This function computes the matrix that transforms a fe1 function $z$ to $z-I_hz$ where $I_h$ denotes the interpolation operator from the fe1 space to the fe2 space. This matrix hence is useful to evaluate error-representations where $z$ denotes the dual solution.

    @@ -404,7 +404,7 @@
    -

    Compute the local $L^2$-projection matrix from fe1 to fe2.

    +

    Compute the local $L^2$-projection matrix from fe1 to fe2.

    @@ -425,22 +425,22 @@

    This is a rather specialized function used during the construction of finite element objects. It is used to build the basis of shape functions for an element, given a set of polynomials and interpolation points. The function is only implemented for finite elements with exactly dim vector components. In particular, this applies to classes derived from the FE_PolyTensor class.

    -

    Specifically, the purpose of this function is as follows: FE_PolyTensor receives, from its derived classes, an argument that describes a polynomial space. This space may be parameterized in terms of monomials, or in some other way, but is in general not in the form that we use for finite elements where we typically want to use a basis that is derived from some kind of node functional (e.g., the interpolation at specific points). Concretely, assume that the basis used by the polynomial space is $\{\tilde\varphi_j(\mathbf x)\}_{j=1}^N$, and that the node functionals of the finite element are $\{\Psi_i\}_{i=1}^N$. We then want to compute a basis $\{\varphi_j(\mathbf x)\}_{j=1}^N$ for the finite element space so that $\Psi_i[\varphi_j] = \delta_{ij}$. To do this, we can set $\varphi_j(\mathbf x) = \sum_{k=1}^N c_{jk} \tilde\varphi_k(\mathbf x)$ where we need to determine the expansion coefficients $c_{jk}$. We do this by applying $\Psi_i$ to both sides of the equation, to obtain

    -\begin{align*}
+<p>Specifically, the purpose of this function is as follows: <a class=FE_PolyTensor receives, from its derived classes, an argument that describes a polynomial space. This space may be parameterized in terms of monomials, or in some other way, but is in general not in the form that we use for finite elements where we typically want to use a basis that is derived from some kind of node functional (e.g., the interpolation at specific points). Concretely, assume that the basis used by the polynomial space is $\{\tilde\varphi_j(\mathbf x)\}_{j=1}^N$, and that the node functionals of the finite element are $\{\Psi_i\}_{i=1}^N$. We then want to compute a basis $\{\varphi_j(\mathbf x)\}_{j=1}^N$ for the finite element space so that $\Psi_i[\varphi_j] = \delta_{ij}$. To do this, we can set $\varphi_j(\mathbf x) = \sum_{k=1}^N c_{jk} \tilde\varphi_k(\mathbf x)$ where we need to determine the expansion coefficients $c_{jk}$. We do this by applying $\Psi_i$ to both sides of the equation, to obtain

    +\begin{align*}
   \Psi_i [\varphi_j] = \sum_{k=1}^N c_{jk} \Psi_i[\tilde\varphi_k],
-\end{align*} +\end{align*}" src="form_1215.png"/>

    -

    and we know that the left hand side equals $\delta_{ij}$. If you think of this as a system of $N\times N$ equations for the elements of a matrix on the left and on the right, then this can be written as

    -\begin{align*}
+<p> and we know that the left hand side equals <picture><source srcset=$\delta_{ij}$. If you think of this as a system of $N\times N$ equations for the elements of a matrix on the left and on the right, then this can be written as

    +\begin{align*}
   I = C X^T
-\end{align*} +\end{align*}" src="form_1217.png"/>

    -

    where $C$ is the matrix of coefficients $c_{jk}$ and $X_{ik} = \Psi_i[\tilde\varphi_k]$. Consequently, in order to compute the expansion coefficients $C=X^{-T}$, we need to apply the node functionals to all functions of the "raw" basis of the polynomial space.

    -

    Until the finite element receives this matrix $X$ back, it describes its shape functions (e.g., in FiniteElement::shape_value()) in the form $\tilde\varphi_j$. After it calls this function, it has the expansion coefficients and can describe its shape functions as $\varphi_j$.

    -

    This function therefore computes this matrix $X$, for the following specific circumstances:

    @@ -1071,7 +1071,7 @@
    -

    Compute $(Id-I_h)z_1$ for a given dof1-function $z_1$, where $I_h$ is the interpolation from fe1 to fe2. The result $(Id-I_h)z_1$ is written into z1_difference.

    +

    Compute $(Id-I_h)z_1$ for a given dof1-function $z_1$, where $I_h$ is the interpolation from fe1 to fe2. The result $(Id-I_h)z_1$ is written into z1_difference.

    Note, that this function does not work for continuous elements at hanging nodes. For that case use the interpolation_difference function, below, that takes an additional AffineConstraints object.

    @@ -1123,7 +1123,7 @@
    -

    Compute $(Id-I_h)z_1$ for a given dof1-function $z_1$, where $I_h$ is the interpolation from fe1 to fe2. The result $(Id-I_h)z_1$ is written into z1_difference. constraints1 and constraints2 are the hanging node constraints corresponding to dof1 and dof2, respectively. These objects are particular important when continuous elements on grids with hanging nodes (locally refined grids) are involved.

    +

    Compute $(Id-I_h)z_1$ for a given dof1-function $z_1$, where $I_h$ is the interpolation from fe1 to fe2. The result $(Id-I_h)z_1$ is written into z1_difference. constraints1 and constraints2 are the hanging node constraints corresponding to dof1 and dof2, respectively. These objects are particular important when continuous elements on grids with hanging nodes (locally refined grids) are involved.

    For parallel computations, supply z1 with ghost elements and z1_difference without ghost elements.

    @@ -1165,7 +1165,7 @@
    -

    $L^2$ projection for discontinuous elements. Operates the same direction as interpolate.

    +

    $L^2$ projection for discontinuous elements. Operates the same direction as interpolate.

    The global projection can be computed by local matrices if the finite element spaces are discontinuous. With continuous elements, this is impossible, since a global mass matrix must be inverted.

    @@ -1214,7 +1214,7 @@
  • It then performs a loop over all non-active cells of dof2. If such a non-active cell has at least one active child, then we call the children of this cell a "patch". We then interpolate from the children of this patch to the patch, using the finite element space associated with dof2 and immediately interpolate back to the children. In essence, this information throws away all information in the solution vector that lives on a scale smaller than the patch cell.
  • Since we traverse non-active cells from the coarsest to the finest levels, we may find patches that correspond to child cells of previously treated patches if the mesh had been refined adaptively (this cannot happen if the mesh has been refined globally because there the children of a patch are all active). We also perform the operation described above on these patches, but it is easy to see that on patches that are children of previously treated patches, the operation is now the identity operation (since it interpolates from the children of the current patch a function that had previously been interpolated to these children from an even coarser patch). Consequently, this does not alter the solution vector any more.
  • -

    The name of the function originates from the fact that it can be used to construct a representation of a function of higher polynomial degree on a once coarser mesh. For example, if you imagine that you start with a $Q_1$ function on a globally refined mesh, and that dof2 is associated with a $Q_2$ element, then this function computes the equivalent of the operator $I_{2h}^{(2)}$ interpolating the original piecewise linear function onto a quadratic function on a once coarser mesh with mesh size $2h$ (but representing this function on the original mesh with size $h$). If the exact solution is sufficiently smooth, then $u^\ast=I_{2h}^{(2)}u_h$ is typically a better approximation to the exact solution $u$ of the PDE than $u_h$ is. In other words, this function provides a postprocessing step that improves the solution in a similar way one often obtains by extrapolating a sequence of solutions, explaining the origin of the function's name.

    +

    The name of the function originates from the fact that it can be used to construct a representation of a function of higher polynomial degree on a once coarser mesh. For example, if you imagine that you start with a $Q_1$ function on a globally refined mesh, and that dof2 is associated with a $Q_2$ element, then this function computes the equivalent of the operator $I_{2h}^{(2)}$ interpolating the original piecewise linear function onto a quadratic function on a once coarser mesh with mesh size $2h$ (but representing this function on the original mesh with size $h$). If the exact solution is sufficiently smooth, then $u^\ast=I_{2h}^{(2)}u_h$ is typically a better approximation to the exact solution $u$ of the PDE than $u_h$ is. In other words, this function provides a postprocessing step that improves the solution in a similar way one often obtains by extrapolating a sequence of solutions, explaining the origin of the function's name.

    Note
    The resulting field does not satisfy continuity requirements of the given finite elements if the algorithm outlined above is used. When you use continuous elements on grids with hanging nodes, please use the extrapolate function with an additional AffineConstraints argument, see below.
    Since this function operates on patches of cells, it requires that the underlying grid is refined at least once for every coarse grid cell. If this is not the case, an exception will be raised.
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceFunctionTools.html differs (JavaScript source, ASCII text, with very long lines (650)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceFunctionTools.html 2023-11-25 15:26:00.453211970 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceFunctionTools.html 2023-11-25 15:26:00.453211970 +0100 @@ -138,13 +138,13 @@
    -

    Estimate bounds on the value and bounds on each gradient component of a Function, $f$, over a BoundingBox, by approximating it by a 2nd order Taylor polynomial starting from the box center.

    -

    Each lower and upper bound is returned as a std::pair<double, double>, such that the first entry is the lower bound, $L$, and the second is the upper bound, $U$, i.e. $f(x) \in [L, U]$.

    +

    Estimate bounds on the value and bounds on each gradient component of a Function, $f$, over a BoundingBox, by approximating it by a 2nd order Taylor polynomial starting from the box center.

    +

    Each lower and upper bound is returned as a std::pair<double, double>, such that the first entry is the lower bound, $L$, and the second is the upper bound, $U$, i.e. $f(x) \in [L, U]$.

    The function value, gradient, and Hessian are computed at the box center. The bounds on the value of the function are then estimated as

    -

    $f(x) \in [f(x_c) - F, f(x_c) + F]$, where $F = \sum_i |\partial_i f(x_c)| h_i
-   + 1/2 \sum_i \sum_j |\partial_i \partial_j f(x_c)| h_i h_j$.

    -

    Here, $h_i$ is half the side length of the box in the $i$th coordinate direction, which is the distance we extrapolate. The bounds on the gradient components are estimated similarly as

    -

    $\partial_i f \in [\partial_i f(x_c) - G_i, \partial_i f(x_c) + G_i]$, where $G_i = \sum_j |\partial_i \partial_j f(x_c)| h_j$.

    +

    $f(x) \in [f(x_c) - F, f(x_c) + F]$, where $F = \sum_i |\partial_i f(x_c)| h_i
+   + 1/2 \sum_i \sum_j |\partial_i \partial_j f(x_c)| h_i h_j$.

    +

    Here, $h_i$ is half the side length of the box in the $i$th coordinate direction, which is the distance we extrapolate. The bounds on the gradient components are estimated similarly as

    +

    $\partial_i f \in [\partial_i f(x_c) - G_i, \partial_i f(x_c) + G_i]$, where $G_i = \sum_j |\partial_i \partial_j f(x_c)| h_j$.

    If the function has more than 1 component the component parameter can be used to specify which function component the bounds should be computed for.

    Definition at line 26 of file function_tools.cc.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceGeometricUtilities_1_1Coordinates.html differs (JavaScript source, ASCII text, with very long lines (669)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceGeometricUtilities_1_1Coordinates.html 2023-11-25 15:26:00.463211767 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceGeometricUtilities_1_1Coordinates.html 2023-11-25 15:26:00.463211767 +0100 @@ -128,13 +128,13 @@
    -

    Return spherical coordinates of a Cartesian point point. The returned array is filled with radius, azimuth angle $\in [0,2 \pi)$ and polar/inclination angle $ \in [0,\pi]$ (omitted in 2d).

    +

    Return spherical coordinates of a Cartesian point point. The returned array is filled with radius, azimuth angle $\in [0,2 \pi)$ and polar/inclination angle $ \in [0,\pi]$ (omitted in 2d).

    In 3d the transformation is given by

    -\begin{align*}
+<picture><source srcset=\begin{align*}
  r &= \sqrt{x^2+y^2+z^2} \\
  \theta &= {\rm atan}(y/x) \\
  \phi &= {\rm acos} (z/r)
-\end{align*} +\end{align*}" src="form_546.png"/>

    The use of this function is demonstrated in step-75.

    @@ -158,13 +158,13 @@
    -

    Return the Cartesian coordinates of a spherical point defined by scoord which is filled with radius $r \in [0,\infty)$, azimuth angle $\theta \in [0,2 \pi)$ and polar/inclination angle $\phi \in [0,\pi]$ (omitted in 2d).

    +

    Return the Cartesian coordinates of a spherical point defined by scoord which is filled with radius $r \in [0,\infty)$, azimuth angle $\theta \in [0,2 \pi)$ and polar/inclination angle $\phi \in [0,\pi]$ (omitted in 2d).

    In 3d the transformation is given by

    -\begin{align*}
+<picture><source srcset=\begin{align*}
  x &= r\, \cos(\theta) \, \sin(\phi) \\
  y &= r\, \sin(\theta) \, \sin(\phi) \\
  z &= r\, \cos(\phi)
-\end{align*} +\end{align*}" src="form_550.png"/>

    Definition at line 77 of file geometric_utilities.cc.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceGraphColoring.html differs (JavaScript source, ASCII text, with very long lines (1206)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceGraphColoring.html 2023-11-25 15:26:00.473211563 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceGraphColoring.html 2023-11-25 15:26:00.473211563 +0100 @@ -141,9 +141,9 @@

    Create a partitioning of the given range of iterators so that iterators that point to conflicting objects will be placed into different partitions, where the question whether two objects conflict is determined by a user-provided function.

    This function can also be considered as a graph coloring: each object pointed to by an iterator is considered to be a node and there is an edge between each two nodes that conflict. The graph coloring algorithm then assigns a color to each node in such a way that two nodes connected by an edge do not have the same color.

    A typical use case for this function is in assembling a matrix in parallel. There, one would like to assemble local contributions on different cells at the same time (an operation that is purely local and so requires no synchronization) but then we need to add these local contributions to the global matrix. In general, the contributions from different cells may be to the same matrix entries if the cells share degrees of freedom and, consequently, can not happen at the same time unless we want to risk a race condition (see http://en.wikipedia.org/wiki/Race_condition). Thus, we call these two cells in conflict, and we can only allow operations in parallel from cells that do not conflict. In other words, two cells are in conflict if the set of matrix entries (for example characterized by the rows) have a nonempty intersection.

    -

    In this generality, computing the graph of conflicts would require calling a function that determines whether two iterators (or the two objects they represent) conflict, and calling it for every pair of iterators, i.e., $\frac 12 N (N-1)$ times. This is too expensive in general. A better approach is to require a user-defined function that returns for every iterator it is called for a set of indicators of some kind that characterize a conflict; two iterators are in conflict if their conflict indicator sets have a nonempty intersection. In the example of assembling a matrix, the conflict indicator set would contain the indices of all degrees of freedom on the cell pointed to (in the case of continuous Galerkin methods) or the union of indices of degree of freedom on the current cell and all cells adjacent to the faces of the current cell (in the case of discontinuous Galerkin methods, because there one computes face integrals coupling the degrees of freedom connected by a common face – see step-12).

    +

    In this generality, computing the graph of conflicts would require calling a function that determines whether two iterators (or the two objects they represent) conflict, and calling it for every pair of iterators, i.e., $\frac 12 N (N-1)$ times. This is too expensive in general. A better approach is to require a user-defined function that returns for every iterator it is called for a set of indicators of some kind that characterize a conflict; two iterators are in conflict if their conflict indicator sets have a nonempty intersection. In the example of assembling a matrix, the conflict indicator set would contain the indices of all degrees of freedom on the cell pointed to (in the case of continuous Galerkin methods) or the union of indices of degree of freedom on the current cell and all cells adjacent to the faces of the current cell (in the case of discontinuous Galerkin methods, because there one computes face integrals coupling the degrees of freedom connected by a common face – see step-12).

    Note
    The conflict set returned by the user defined function passed as third argument needs to accurately describe all degrees of freedom for which anything is written into the matrix or right hand side. In other words, if the writing happens through a function like AffineConstraints::copy_local_to_global(), then the set of conflict indices must actually contain not only the degrees of freedom on the current cell, but also those they are linked to by constraints such as hanging nodes.
    -

    In other situations, the conflict indicator sets may represent something different altogether – it is up to the caller of this function to describe what it means for two iterators to conflict. Given this, computing conflict graph edges can be done significantly more cheaply than with ${\cal O}(N^2)$ operations.

    +

    In other situations, the conflict indicator sets may represent something different altogether – it is up to the caller of this function to describe what it means for two iterators to conflict. Given this, computing conflict graph edges can be done significantly more cheaply than with ${\cal O}(N^2)$ operations.

    In any case, the result of the function will be so that iterators whose conflict indicator sets have overlap will not be assigned to the same color.

    Note
    The algorithm used in this function is described in a paper by Turcksin, Kronbichler and Bangerth, see workstream_paper.
    Parameters
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceGridGenerator.html differs (JavaScript source, ASCII text, with very long lines (1193)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceGridGenerator.html 2023-11-25 15:26:00.509877484 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceGridGenerator.html 2023-11-25 15:26:00.509877484 +0100 @@ -303,7 +303,7 @@
    -

    Initialize the given triangulation with a hypercube (line in 1d, square in 2d, etc) consisting of exactly one cell. The hypercube volume is the tensor product interval $[left,right]^{\text{dim}}$ in the present number of dimensions, where the limits are given as arguments. They default to zero and unity, then producing the unit hypercube.

    +

    Initialize the given triangulation with a hypercube (line in 1d, square in 2d, etc) consisting of exactly one cell. The hypercube volume is the tensor product interval $[left,right]^{\text{dim}}$ in the present number of dimensions, where the limits are given as arguments. They default to zero and unity, then producing the unit hypercube.

    If the argument colorize is false, then all boundary indicators are set to zero (the default boundary indicator) for 2d and 3d. If it is true, the boundary is colorized as in hyper_rectangle(). In 1d the indicators are always colorized, see hyper_rectangle().

    @@ -339,7 +339,7 @@
    -

    Create a $d$-simplex (i.e., a triangle in 2d, or a tetrahedron in 3d) with $d+1$ corners. Since deal.II does not support triangular and tetrahedral cells, the simplex described by the input arguments is subdivided into quadrilaterals and hexahedra by adding edge, face, and simplex midpoints, resulting in a mesh that consists of $d+1$ quadrilateral or hexahedral cells.

    +

    Create a $d$-simplex (i.e., a triangle in 2d, or a tetrahedron in 3d) with $d+1$ corners. Since deal.II does not support triangular and tetrahedral cells, the simplex described by the input arguments is subdivided into quadrilaterals and hexahedra by adding edge, face, and simplex midpoints, resulting in a mesh that consists of $d+1$ quadrilateral or hexahedral cells.

    The vertices argument contains a vector with all d+1 vertices defining the corners of the simplex. They must be given in an order such that the vectors from the first vertex to each of the others form a right-handed system.

    The meshes generated in two and three dimensions are:

    @@ -832,7 +832,7 @@
    -

    Generate a grid consisting of a channel with a cylinder. This is a common benchmark for Navier-Stokes solvers. The geometry consists of a channel of size $[0, 2.2] \times [0, 0.41] \times [0, 0.41] $ (where the $z$ dimension is omitted in 2d) with a cylinder, parallel to the $z$ axis with diameter $0.1$, centered at $(0.2, 0.2, 0)$. The channel has three distinct regions:

      +

      Generate a grid consisting of a channel with a cylinder. This is a common benchmark for Navier-Stokes solvers. The geometry consists of a channel of size $[0, 2.2] \times [0, 0.41] \times [0, 0.41] $ (where the $z$ dimension is omitted in 2d) with a cylinder, parallel to the $z$ axis with diameter $0.1$, centered at $(0.2, 0.2, 0)$. The channel has three distinct regions:

      1. If n_shells is greater than zero, then there are that many shells centered around the cylinder,
      2. @@ -848,18 +848,18 @@
        -

        The resulting Triangulation uses three manifolds: a PolarManifold (in 2d) or CylindricalManifold (in 3d) with manifold id $0$, a TransfiniteInterpolationManifold with manifold id $1$, and a FlatManifold everywhere else. For more information on this topic see the glossary entry on manifold indicators. The cell faces on the cylinder and surrounding shells have manifold ids of $0$, while the cell volumes adjacent to the shells (or, if they do not exist, the cylinder) have a manifold id of $1$. Put another way: this grid uses TransfiniteInterpolationManifold to smoothly transition from the shells (generated with GridGenerator::concentric_hyper_shells) to the bulk region. All other cell volumes and faces have manifold id numbers::flat_manifold_id and use FlatManifold. All cells with id numbers::flat_manifold_id are rectangular prisms aligned with the coordinate axes.

        -

        The picture below shows part of the 2d grid (using all default arguments to this function) after two global refinements. The cells with manifold id $0$ are orange (the polar manifold id), cells with manifold id $1$ are yellow (the transfinite interpolation manifold id), and the ones with manifold id numbers::flat_manifold_id are cyan:

        +

        The resulting Triangulation uses three manifolds: a PolarManifold (in 2d) or CylindricalManifold (in 3d) with manifold id $0$, a TransfiniteInterpolationManifold with manifold id $1$, and a FlatManifold everywhere else. For more information on this topic see the glossary entry on manifold indicators. The cell faces on the cylinder and surrounding shells have manifold ids of $0$, while the cell volumes adjacent to the shells (or, if they do not exist, the cylinder) have a manifold id of $1$. Put another way: this grid uses TransfiniteInterpolationManifold to smoothly transition from the shells (generated with GridGenerator::concentric_hyper_shells) to the bulk region. All other cell volumes and faces have manifold id numbers::flat_manifold_id and use FlatManifold. All cells with id numbers::flat_manifold_id are rectangular prisms aligned with the coordinate axes.

        +

        The picture below shows part of the 2d grid (using all default arguments to this function) after two global refinements. The cells with manifold id $0$ are orange (the polar manifold id), cells with manifold id $1$ are yellow (the transfinite interpolation manifold id), and the ones with manifold id numbers::flat_manifold_id are cyan:

        Parameters
        - + - +
        triaTriangulation to be created. Must be empty upon calling this function.
        shell_region_widthWidth of the layer of shells around the cylinder. This value should be between $0$ and $0.05$; the default value is $0.03$.
        shell_region_widthWidth of the layer of shells around the cylinder. This value should be between $0$ and $0.05$; the default value is $0.03$.
        n_shellsNumber of shells to use in the shell layer.
        skewnessParameter controlling how close the shells are to the cylinder: see the mathematical definition given in GridGenerator::concentric_hyper_shells.
        colorizeIf true, then assign different boundary ids to different parts of the boundary. For more information on boundary indicators see this glossary entry. The left boundary (at $x = 0$) is assigned an id of $0$, the right boundary (at $x = 2.2$) is assigned an id of $1$; the boundary of the obstacle in the middle (i.e., the circle in 2d or the cylinder walls in 3d) is assigned an id of $2$, and the channel walls are assigned an id of $3$.
        colorizeIf true, then assign different boundary ids to different parts of the boundary. For more information on boundary indicators see this glossary entry. The left boundary (at $x = 0$) is assigned an id of $0$, the right boundary (at $x = 2.2$) is assigned an id of $1$; the boundary of the obstacle in the middle (i.e., the circle in 2d or the cylinder walls in 3d) is assigned an id of $2$, and the channel walls are assigned an id of $3$.
        @@ -1227,7 +1227,7 @@

        This function is declared to exist for triangulations of all space dimensions, but throws an error if called in 1d.

        By default, the manifold_id is set to 0 on the boundary faces, 1 on the boundary cells, and numbers::flat_manifold_id on the central cell and on internal faces.

        A SphericalManifold is attached by default to the boundary faces for correct placement of boundary vertices upon refinement and to be able to use higher order mappings. However, it turns out that this strategy may not be the optimal one to create a good a mesh for a hyperball. The "Possibilities for extensions" section of step-6 has an extensive discussion of how one would construct better meshes and what one needs to do for it. Setting the argument attach_spherical_manifold_on_boundary_cells to true attaches a SphericalManifold manifold also to the cells adjacent to the boundary, and not only to the boundary faces.

        -
        Note
        Since this is likely one of the earliest functions users typically consider to create meshes with curved boundaries, let us also comment on one aspect that is often confusing: Namely, that what one sees is not always what is actually happening. Specifically, if you output the coarse mesh with a function such as GridOut::write_vtk() using default options, then one doesn't generally get to see curved faces at the boundary. That's because most file formats by default only store vertex locations, with the implicit understanding that cells are composed from these vertices and bounded by straight edges. At the same time, the fact that this function attaches a SphericalManifold object to the boundary faces means that at least internally, edges really are curved. If you want to see them that way, you need to make sure that the function you use to output the mesh actually plots boundary faces as curved lines rather than straight lines characterized by only the locations of the two end points. For example, GridOut::write_gnuplot() can do that if you set the corresponding flag in the GridOutFlags::Gnuplot structure. It is, however, an entirely separate consideration whether you are actually computing on curved cells. In typical finite element computations, one has to compute integrals and these are computed by transforming back actual cells using a mapping to the reference cell. What mapping is used determines what shape the cells have for these internal computations: For example, with the widely used $Q_1$ mapping (implicitly used in step-6), integration always happens on cells that are assumed to have straight boundaries described by only the vertex locations. In other words, if such a mapping is used, then the cells of the domain really do have straight edges, regardless of the manifold description attached to these edges and regardless of the flags given when generating output. As a consequence of all of this, it is important to distinguish three things: (i) the manifold description attached to an object in the mesh; (ii) the mapping used in integration; and (iii) the style used in outputting graphical information about the mesh. All of these can be chosen more or less independently of each other, and what you see visualized is not necessarily exactly what is happening.
        +
        Note
        Since this is likely one of the earliest functions users typically consider to create meshes with curved boundaries, let us also comment on one aspect that is often confusing: Namely, that what one sees is not always what is actually happening. Specifically, if you output the coarse mesh with a function such as GridOut::write_vtk() using default options, then one doesn't generally get to see curved faces at the boundary. That's because most file formats by default only store vertex locations, with the implicit understanding that cells are composed from these vertices and bounded by straight edges. At the same time, the fact that this function attaches a SphericalManifold object to the boundary faces means that at least internally, edges really are curved. If you want to see them that way, you need to make sure that the function you use to output the mesh actually plots boundary faces as curved lines rather than straight lines characterized by only the locations of the two end points. For example, GridOut::write_gnuplot() can do that if you set the corresponding flag in the GridOutFlags::Gnuplot structure. It is, however, an entirely separate consideration whether you are actually computing on curved cells. In typical finite element computations, one has to compute integrals and these are computed by transforming back actual cells using a mapping to the reference cell. What mapping is used determines what shape the cells have for these internal computations: For example, with the widely used $Q_1$ mapping (implicitly used in step-6), integration always happens on cells that are assumed to have straight boundaries described by only the vertex locations. In other words, if such a mapping is used, then the cells of the domain really do have straight edges, regardless of the manifold description attached to these edges and regardless of the flags given when generating output. As a consequence of all of this, it is important to distinguish three things: (i) the manifold description attached to an object in the mesh; (ii) the mapping used in integration; and (iii) the style used in outputting graphical information about the mesh. All of these can be chosen more or less independently of each other, and what you see visualized is not necessarily exactly what is happening.
        Precondition
        The triangulation passed as argument needs to be empty when calling this function.
    @@ -1529,7 +1529,7 @@
    -

    Create a dim dimensional cylinder where the $x$-axis serves as the axis of the cylinder. For the purposes of this function, a cylinder is defined as a (dim - 1) dimensional disk of given radius, extruded along the axis of the cylinder (which is the first coordinate direction). Consequently, in three dimensions, the cylinder extends from x=-half_length to x=+half_length and its projection into the yz-plane is a circle of radius radius. In two dimensions, the cylinder is a rectangle from x=-half_length to x=+half_length and from y=-radius to y=radius.

    +

    Create a dim dimensional cylinder where the $x$-axis serves as the axis of the cylinder. For the purposes of this function, a cylinder is defined as a (dim - 1) dimensional disk of given radius, extruded along the axis of the cylinder (which is the first coordinate direction). Consequently, in three dimensions, the cylinder extends from x=-half_length to x=+half_length and its projection into the yz-plane is a circle of radius radius. In two dimensions, the cylinder is a rectangle from x=-half_length to x=+half_length and from y=-radius to y=radius.

    The boundaries are colored according to the following scheme: 0 for the hull of the cylinder, 1 for the left hand face and 2 for the right hand face (see the glossary entry on colorization).

    The manifold id for the hull of the cylinder is set to zero, and a CylindricalManifold is attached to it.

    Precondition
    The triangulation passed as argument needs to be empty when calling this function.
    @@ -1573,7 +1573,7 @@
    -

    Create a dim dimensional cylinder where the $x$-axis serves as the axis of the cylinder. For the purposes of this function, a cylinder is defined as a (dim - 1) dimensional disk of given radius, extruded along the axis of the cylinder (which is the first coordinate direction). Consequently, in three dimensions, the cylinder extends from x=-half_length to x=+half_length and its projection into the yz-plane is a circle of radius radius. In two dimensions, the cylinder is a rectangle from x=-half_length to x=+half_length and from y=-radius to y=radius. This function is only implemented for dim==3.

    +

    Create a dim dimensional cylinder where the $x$-axis serves as the axis of the cylinder. For the purposes of this function, a cylinder is defined as a (dim - 1) dimensional disk of given radius, extruded along the axis of the cylinder (which is the first coordinate direction). Consequently, in three dimensions, the cylinder extends from x=-half_length to x=+half_length and its projection into the yz-plane is a circle of radius radius. In two dimensions, the cylinder is a rectangle from x=-half_length to x=+half_length and from y=-radius to y=radius. This function is only implemented for dim==3.

    The boundaries are colored according to the following scheme: 0 for the hull of the cylinder, 1 for the left hand face and 2 for the right hand face (see the glossary entry on colorization).

    The manifold id for the hull of the cylinder is set to zero, and a CylindricalManifold is attached to it.

    @@ -1712,13 +1712,13 @@ Point Radius -Openings $(2,0,0)$ $1$ +Openings $(2,0,0)$ $1$ -$(0,2,0)$ $1$ +$(0,2,0)$ $1$ -$(0,0,2)$ $1$ +$(0,0,2)$ $1$ -Bifurcation $(0,0,0)$ $1$ +Bifurcation $(0,0,0)$ $1$
    @@ -1731,13 +1731,13 @@ Point Radius -Openings $(-2,0,0)$ $1$ +Openings $(-2,0,0)$ $1$ -$(0,2,0)$ $1$ +$(0,2,0)$ $1$ -$(2,0,0)$ $1$ +$(2,0,0)$ $1$ -Bifurcation $(0,0,0)$ $1$ +Bifurcation $(0,0,0)$ $1$
    @@ -1750,13 +1750,13 @@ Point Radius -Openings $(-2,0,0)$ $1$ +Openings $(-2,0,0)$ $1$ -$(1,\sqrt{3},0)$ $1$ +$(1,\sqrt{3},0)$ $1$ -$(1,-\sqrt{3},0)$ $1$ +$(1,-\sqrt{3},0)$ $1$ -Bifurcation $(0,0,0)$ $1$ +Bifurcation $(0,0,0)$ $1$

    Definition at line 267 of file grid_generator_pipe_junction.cc.

    @@ -1799,7 +1799,7 @@
    Parameters
    - +
    triaA Triangulation object which has to be empty.
    sizesA vector of integers of dimension GeometryInfo<dim>::faces_per_cell with the following meaning: the legs of the cross are stacked on the faces of the center cell, in the usual order of deal.II cells, namely first $-x$, then $x$, then $-y$ and so on. The corresponding entries in sizes name the number of cells stacked on this face. All numbers may be zero, thus L- and T-shaped domains are specializations of this domain.
    sizesA vector of integers of dimension GeometryInfo<dim>::faces_per_cell with the following meaning: the legs of the cross are stacked on the faces of the center cell, in the usual order of deal.II cells, namely first $-x$, then $x$, then $-y$ and so on. The corresponding entries in sizes name the number of cells stacked on this face. All numbers may be zero, thus L- and T-shaped domains are specializations of this domain.
    colorize_cellsIf colorization is enabled, then the material id of a cells corresponds to the leg it is in. The id of the center cell is zero, and then the legs are numbered starting at one (see the glossary entry on colorization).
    @@ -2024,7 +2024,7 @@
  • 96 for the rhombic dodecahedron refined once. This choice dates from an older version of deal.II before the Manifold classes were implemented: today this choce is equivalent to the rhombic dodecahedron after performing one global refinement.
  • -Numbers of the kind $192\times 2^m$ with $m\geq 0$ integer. This choice is similar to the 24 and 48 cell cases, but provides additional refinements in azimuthal direction combined with a single layer in radial direction. The base mesh is either the 6 or 12 cell version, depending on whether $m$ in the power is odd or even, respectively.
  • +Numbers of the kind $192\times 2^m$ with $m\geq 0$ integer. This choice is similar to the 24 and 48 cell cases, but provides additional refinements in azimuthal direction combined with a single layer in radial direction. The base mesh is either the 6 or 12 cell version, depending on whether $m$ in the power is odd or even, respectively.

    The versions with 24, 48, and $2^m 192$ cells are useful if the shell is thin and the radial lengths should be made more similar to the circumferential lengths.

    The 3d grids with 12 and 96 cells are plotted below:

    @@ -2260,7 +2260,7 @@
    -

    Produce a domain that is the space between two cylinders in 3d, with given length, inner and outer radius and a given number of elements. The cylinder shell is built around the $z$-axis with the two faces located at $z = 0$ and $z = $ length.

    +

    Produce a domain that is the space between two cylinders in 3d, with given length, inner and outer radius and a given number of elements. The cylinder shell is built around the $z$-axis with the two faces located at $z = 0$ and $z = $ length.

    If n_radial_cells is zero (as is the default), then it is computed adaptively such that the resulting elements have the least aspect ratio. The same holds for n_axial_cells.

    Note
    Although this function is declared as a template, it does not make sense in 1d and 2d. Also keep in mind that this object is rotated and positioned differently than the one created by cylinder().

    All manifold ids are set to zero, and a CylindricalManifold is attached to the triangulation.

    @@ -2315,9 +2315,9 @@
    -

    Produce the volume or surface mesh of a torus. The axis of the torus is the $y$-axis while the plane of the torus is the $x$- $z$ plane.

    +

    Produce the volume or surface mesh of a torus. The axis of the torus is the $y$-axis while the plane of the torus is the $x$- $z$ plane.

    If dim is 3, the mesh will be the volume of the torus, using a mesh equivalent to the circle in the poloidal coordinates with 5 cells on the cross section. This function attaches a TorusManifold to all boundary faces which are marked with a manifold id of 1, a CylindricalManifold to the interior cells and all their faces which are marked with a manifold id of 2 (representing a flat state within the poloidal coordinates), and a TransfiniteInterpolationManifold to the cells between the TorusManifold on the surface and the ToroidalManifold in the center, with cells marked with manifold id 0.

    -

    An example for the case if dim is 3 with a cut through the domain at $z=0$, 6 toroidal cells, $R=2$ and $r=0.5$ without any global refinement is given here:

    +

    An example for the case if dim is 3 with a cut through the domain at $z=0$, 6 toroidal cells, $R=2$ and $r=0.5$ without any global refinement is given here:

    @@ -2329,7 +2329,7 @@ RThe radius of the circle, which forms the middle line of the torus containing the loop of cells. Must be greater than r. rThe inner radius of the torus. n_cells_toroidalOptional argument to set the number of cell layers in toroidal direction. The default is 6 cell layers. - phiOptional argument to generate an open torus with angle $0 < \varphi <= 2 \pi$. The default value is $2 \pi$, in which case a closed torus is generated. If the torus is open, the torus is cut at two planes perpendicular to the torus centerline. The center of these two planes are located at $(x_1, y_1, z_1) = (R, 0, 0)$ and $(x_2, y_2, z_2) = (R \cos(\varphi), 0, R \sin(\varphi))$. + phiOptional argument to generate an open torus with angle $0 < \varphi <= 2 \pi$. The default value is $2 \pi$, in which case a closed torus is generated. If the torus is open, the torus is cut at two planes perpendicular to the torus centerline. The center of these two planes are located at $(x_1, y_1, z_1) = (R, 0, 0)$ and $(x_2, y_2, z_2) = (R \cos(\varphi), 0, R \sin(\varphi))$. @@ -2385,8 +2385,8 @@
    -

    This function produces a square in the xy-plane with a cylindrical hole in the middle. The square and the circle are centered at the origin. In 3d, this geometry is extruded in $z$ direction to the interval $[0,L]$.

    -

    The inner boundary has a manifold id of $0$ and a boundary id of $6$. This function attaches a PolarManifold or CylindricalManifold to the interior boundary in 2d and 3d respectively. The other faces have boundary ids of $0, 1, 2, 3, 4$, or $5$ given in the standard order of faces in 2d or 3d.

    +

    This function produces a square in the xy-plane with a cylindrical hole in the middle. The square and the circle are centered at the origin. In 3d, this geometry is extruded in $z$ direction to the interval $[0,L]$.

    +

    The inner boundary has a manifold id of $0$ and a boundary id of $6$. This function attaches a PolarManifold or CylindricalManifold to the interior boundary in 2d and 3d respectively. The other faces have boundary ids of $0, 1, 2, 3, 4$, or $5$ given in the standard order of faces in 2d or 3d.

    @@ -2464,17 +2464,17 @@

    Produce a grid consisting of concentric shells. The primary difference between this function and GridGenerator::hyper_shell() is that this function permits unevenly spaced (in the radial direction) coarse level cells.

    -

    The parameters center, inner_radius, and outer_radius behave in the same way as the first three arguments to GridGenerator::hyper_shell. n_shells gives the total number of shells to use (i.e., the number of cells in the radial direction). The outer radius of the $k$th shell is given by

    +

    The parameters center, inner_radius, and outer_radius behave in the same way as the first three arguments to GridGenerator::hyper_shell. n_shells gives the total number of shells to use (i.e., the number of cells in the radial direction). The outer radius of the $k$th shell is given by

    -\[
/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceGridRefinement.html differs (JavaScript source, ASCII text, with very long lines (1406))
--- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceGridRefinement.html	2023-11-25 15:26:00.523210545 +0100
+++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceGridRefinement.html	2023-11-25 15:26:00.523210545 +0100
@@ -222,7 +222,7 @@
 <p class=

    -

    As an example, with no coarsening, setting top_fraction_of_cells to 1/3 will result in approximately doubling the number of cells in two dimensions. That is because each of these 1/3 of cells will be replaced by its four children, resulting in $4\times \frac 13 N$ cells, whereas the remaining 2/3 of cells remains untouched – thus yielding a total of $4\times \frac 13 N + \frac 23 N = 2N$ cells. The same effect in three dimensions is achieved by refining 1/7th of the cells. These values are therefore frequently used because they ensure that the cost of computations on subsequent meshes become expensive sufficiently quickly that the fraction of time spent on the coarse meshes is not too large. On the other hand, the fractions are small enough that mesh adaptation does not refine too many cells in each step.

    +

    As an example, with no coarsening, setting top_fraction_of_cells to 1/3 will result in approximately doubling the number of cells in two dimensions. That is because each of these 1/3 of cells will be replaced by its four children, resulting in $4\times \frac 13 N$ cells, whereas the remaining 2/3 of cells remains untouched – thus yielding a total of $4\times \frac 13 N + \frac 23 N = 2N$ cells. The same effect in three dimensions is achieved by refining 1/7th of the cells. These values are therefore frequently used because they ensure that the cost of computations on subsequent meshes become expensive sufficiently quickly that the fraction of time spent on the coarse meshes is not too large. On the other hand, the fractions are small enough that mesh adaptation does not refine too many cells in each step.

    Note
    This function only sets the coarsening and refinement flags. The mesh is not changed until you call Triangulation::execute_coarsening_and_refinement().
    Parameters
    @@ -290,14 +290,14 @@

    This function provides a strategy to mark cells for refinement and coarsening with the goal of controlling the reduction of the error estimate.

    Also known as the bulk criterion or Dörfler marking, this function computes the thresholds for refinement and coarsening such that the criteria of cells getting flagged for refinement make up for a certain fraction of the total error. We explain its operation for refinement, coarsening works analogously.

    Let cK be the criterion of cell K. Then the total error estimate is computed by the formula

    -\[
+<picture><source srcset=\[
 E = \sum_{K\in \cal T} c_K.
-\] +\]" src="form_1368.png"/>

    -

    If 0 < a < 1 is top_fraction, then we refine the smallest subset $\cal M$ of the Triangulation $\cal T$ such that

    -\[
+<p>If <em> 0 < a < 1</em> is <code>top_fraction</code>, then we refine the smallest subset <picture><source srcset=$\cal M$ of the Triangulation $\cal T$ such that

    +\[
 a E \le \sum_{K\in \cal M} c_K
-\] +\]" src="form_1371.png"/>

    The algorithm is performed by the greedy algorithm described in refine_and_coarsen_fixed_number().

    Note
    The often used formula with squares on the left and right is recovered by actually storing the square of cK in the vector criteria.
    @@ -348,32 +348,32 @@
    -

    This function flags cells of a triangulation for refinement with the aim to reach a grid that is optimal with respect to an objective function that tries to balance reducing the error and increasing the numerical cost when the mesh is refined. Specifically, this function makes the assumption that if you refine a cell $K$ with error indicator $\eta_K$ provided by the second argument to this function, then the error on the children (for all children together) will only be $2^{-\text{order}}\eta_K$ where order is the third argument of this function. This makes the assumption that the error is only a local property on a mesh and can be reduced by local refinement – an assumption that is true for the interpolation operator, but not for the usual Galerkin projection, although it is approximately true for elliptic problems where the Greens function decays quickly and the error here is not too much affected by a too coarse mesh somewhere else.

    -

    With this, we can define the objective function this function tries to optimize. Let us assume that the mesh currently has $N_0$ cells. Then, if we refine the $m$ cells with the largest errors, we expect to get (in $d$ space dimensions)

    -\[
+<p>This function flags cells of a triangulation for refinement with the aim to reach a grid that is optimal with respect to an objective function that tries to balance reducing the error and increasing the numerical cost when the mesh is refined. Specifically, this function makes the assumption that if you refine a cell <picture><source srcset=$K$ with error indicator $\eta_K$ provided by the second argument to this function, then the error on the children (for all children together) will only be $2^{-\text{order}}\eta_K$ where order is the third argument of this function. This makes the assumption that the error is only a local property on a mesh and can be reduced by local refinement – an assumption that is true for the interpolation operator, but not for the usual Galerkin projection, although it is approximately true for elliptic problems where the Greens function decays quickly and the error here is not too much affected by a too coarse mesh somewhere else.

    +

    With this, we can define the objective function this function tries to optimize. Let us assume that the mesh currently has $N_0$ cells. Then, if we refine the $m$ cells with the largest errors, we expect to get (in $d$ space dimensions)

    +\[
   N(m) = (N_0-m) + 2^d m = N_0 + (2^d-1)m
-\] +\]" src="form_1375.png"/>

    -

    cells ( $N_0-m$ are not refined, and each of the $m$ cells we refine yield $2^d$ child cells. On the other hand, with refining $m$ cells, and using the assumptions above, we expect that the error will be

    -\[
+<p> cells ( <picture><source srcset=$N_0-m$ are not refined, and each of the $m$ cells we refine yield $2^d$ child cells. On the other hand, with refining $m$ cells, and using the assumptions above, we expect that the error will be

    +\[
   \eta^\text{exp}(m)
   =
   \sum_{K, K\; \text{will not be refined}} \eta_K
   +
   \sum_{K, K\; \text{will be refined}} 2^{-\text{order}}\eta_K
-\] +\]" src="form_1378.png"/>

    -

    where the first sum extends over $N_0-m$ cells and the second over the $m$ cells that will be refined. Note that $N(m)$ is an increasing function of $m$ whereas $\eta^\text{exp}(m)$ is a decreasing function.

    -

    This function then tries to find that number $m$ of cells to mark for refinement for which the objective function

    -\[
+<p> where the first sum extends over <picture><source srcset=$N_0-m$ cells and the second over the $m$ cells that will be refined. Note that $N(m)$ is an increasing function of $m$ whereas $\eta^\text{exp}(m)$ is a decreasing function.

    +

    This function then tries to find that number $m$ of cells to mark for refinement for which the objective function

    +\[
   J(m) = N(m)^{\text{order}/d} \eta^\text{exp}(m)
-\] +\]" src="form_1381.png"/>

    is minimal.

    The rationale for this function is two-fold. First, compared to the refine_and_coarsen_fixed_fraction() and refine_and_coarsen_fixed_number() functions, this function has the property that if all refinement indicators are the same (i.e., we have achieved a mesh where the error per cell is equilibrated), then the entire mesh is refined. This is based on the observation that a mesh with equilibrated error indicators is the optimal mesh (i.e., has the least overall error) among all meshes with the same number of cells. (For proofs of this, see R. Becker, M. Braack, R. Rannacher: "Numerical simulation of laminar flames at low Mach number with adaptive finite elements", Combustion Theory and Modelling, Vol. 3, Nr. 3, p. 503-534 1999; and W. Bangerth, R. Rannacher: "Adaptive Finite Element Methods for Differential Equations", Birkhauser, 2003.)

    -

    Second, the function uses the observation that ideally, the error behaves like $e \approx c N^{-\alpha}$ with some constant $\alpha$ that depends on the dimension and the finite element degree. It should - given optimal mesh refinement - not depend so much on the regularity of the solution, as it is based on the idea, that all singularities can be resolved by refinement. Mesh refinement is then based on the idea that we want to make $c=e N^\alpha$ small. This corresponds to the functional $J(m)$ above.

    +

    Second, the function uses the observation that ideally, the error behaves like $e \approx c N^{-\alpha}$ with some constant $\alpha$ that depends on the dimension and the finite element degree. It should - given optimal mesh refinement - not depend so much on the regularity of the solution, as it is based on the idea, that all singularities can be resolved by refinement. Mesh refinement is then based on the idea that we want to make $c=e N^\alpha$ small. This corresponds to the functional $J(m)$ above.

    Note
    This function was originally implemented by Thomas Richter. It follows a strategy described in [Richter2005]. See in particular Section 4.3, pp. 42-43.

    Definition at line 448 of file grid_refinement.cc.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceGridTools.html differs (JavaScript source, ASCII text, with very long lines (1031)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceGridTools.html 2023-11-25 15:26:00.573209526 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceGridTools.html 2023-11-25 15:26:00.573209526 +0100 @@ -508,8 +508,8 @@
    -

    Compute the volume (i.e. the dim-dimensional measure) of the triangulation. We compute the measure using the integral $\sum_K \int_K 1
-\; dx$ where $K$ are the cells of the given triangulation. The integral is approximated via quadrature. This version of the function uses a linear mapping to compute the JxW values on each cell.

    +

    Compute the volume (i.e. the dim-dimensional measure) of the triangulation. We compute the measure using the integral $\sum_K \int_K 1
+\; dx$ where $K$ are the cells of the given triangulation. The integral is approximated via quadrature. This version of the function uses a linear mapping to compute the JxW values on each cell.

    If the triangulation is a dim-dimensional one embedded in a higher dimensional space of dimension spacedim, then the value returned is the dim-dimensional measure. For example, for a two-dimensional triangulation in three-dimensional space, the value returned is the area of the surface so described. (This obviously makes sense since the spacedim-dimensional measure of a dim-dimensional triangulation would always be zero if dim < spacedim).

    This function also works for objects of type parallel::distributed::Triangulation, in which case the function is a collective operation.

    Parameters
    @@ -549,8 +549,8 @@
    -

    Compute the volume (i.e. the dim-dimensional measure) of the triangulation. We compute the measure using the integral $\sum_K \int_K 1
-\; dx$ where $K$ are the cells of the given triangulation. The integral is approximated via quadrature for which we use the mapping argument.

    +

    Compute the volume (i.e. the dim-dimensional measure) of the triangulation. We compute the measure using the integral $\sum_K \int_K 1
+\; dx$ where $K$ are the cells of the given triangulation. The integral is approximated via quadrature for which we use the mapping argument.

    If the triangulation is a dim-dimensional one embedded in a higher dimensional space of dimension spacedim, then the value returned is the dim-dimensional measure. For example, for a two-dimensional triangulation in three-dimensional space, the value returned is the area of the surface so described. (This obviously makes sense since the spacedim-dimensional measure of a dim-dimensional triangulation would always be zero if dim < spacedim.

    This function also works for objects of type parallel::distributed::Triangulation, in which case the function is a collective operation.

    Parameters
    @@ -707,8 +707,8 @@
    -

    This function computes an affine approximation of the map from the unit coordinates to the real coordinates of the form $p_\text{real} = A
-p_\text{unit} + b $ by a least squares fit of this affine function to the $2^\text{dim}$ vertices representing a quadrilateral or hexahedral cell in spacedim dimensions. The result is returned as a pair with the matrix A as the first argument and the vector b describing distance of the plane to the origin.

    +

    This function computes an affine approximation of the map from the unit coordinates to the real coordinates of the form $p_\text{real} = A
+p_\text{unit} + b $ by a least squares fit of this affine function to the $2^\text{dim}$ vertices representing a quadrilateral or hexahedral cell in spacedim dimensions. The result is returned as a pair with the matrix A as the first argument and the vector b describing distance of the plane to the origin.

    For any valid mesh cell whose geometry is not degenerate, this operation results in a unique affine mapping, even in cases where the actual transformation by a bi-/trilinear or higher order mapping might be singular. The result is exact in case the transformation from the unit to the real cell is indeed affine, such as in one dimension or for Cartesian and affine (parallelogram) meshes in 2d/3d.

    This approximation is underlying the function TriaAccessor::real_to_unit_cell_affine_approximation() function.

    For exact transformations to the unit cell, use Mapping::transform_real_to_unit_cell().

    @@ -747,7 +747,7 @@
    -

    Computes an aspect ratio measure for all locally-owned active cells and fills a vector with one entry per cell, given a triangulation and mapping. The size of the vector that is returned equals the number of active cells. The vector contains zero for non locally-owned cells. The aspect ratio of a cell is defined as the ratio of the maximum to minimum singular value of the Jacobian, taking the maximum over all quadrature points of a quadrature rule specified via quadrature. For example, for the special case of rectangular elements in 2d with dimensions $a$ and $b$ ( $a \geq b$), this function returns the usual aspect ratio definition $a/b$. The above definition using singular values is a generalization to arbitrarily deformed elements. This function is intended to be used for $d=2,3$ space dimensions, but it can also be used for $d=1$ returning a value of 1.

    +

    Computes an aspect ratio measure for all locally-owned active cells and fills a vector with one entry per cell, given a triangulation and mapping. The size of the vector that is returned equals the number of active cells. The vector contains zero for non locally-owned cells. The aspect ratio of a cell is defined as the ratio of the maximum to minimum singular value of the Jacobian, taking the maximum over all quadrature points of a quadrature rule specified via quadrature. For example, for the special case of rectangular elements in 2d with dimensions $a$ and $b$ ( $a \geq b$), this function returns the usual aspect ratio definition $a/b$. The above definition using singular values is a generalization to arbitrarily deformed elements. This function is intended to be used for $d=2,3$ space dimensions, but it can also be used for $d=1$ returning a value of 1.

    Note
    Inverted elements do not throw an exception. Instead, a value of inf is written into the vector in case of inverted elements.
    Make sure to use enough quadrature points for a precise calculation of the aspect ratio in case of deformed elements.
    @@ -954,7 +954,7 @@

    Remove vertices that are duplicated, due to the input of a structured grid, for example. If these vertices are not removed, the faces bounded by these vertices become part of the boundary, even if they are in the interior of the mesh.

    -

    This function is called by some GridIn::read_* functions. Only the vertices with indices in considered_vertices are tested for equality. This speeds up the algorithm, which is, for worst-case hyper cube geometries $O(N^{3/2})$ in 2d and $O(N^{5/3})$ in 3d: quite slow. However, if you wish to consider all vertices, simply pass an empty vector. In that case, the function fills considered_vertices with all vertices.

    +

    This function is called by some GridIn::read_* functions. Only the vertices with indices in considered_vertices are tested for equality. This speeds up the algorithm, which is, for worst-case hyper cube geometries $O(N^{3/2})$ in 2d and $O(N^{5/3})$ in 3d: quite slow. However, if you wish to consider all vertices, simply pass an empty vector. In that case, the function fills considered_vertices with all vertices.

    Two vertices are considered equal if their difference in each coordinate direction is less than tol. This implies that nothing happens if the tolerance is set to zero.

    Definition at line 761 of file grid_tools.cc.

    @@ -1122,7 +1122,7 @@

    Transform the vertices of the given triangulation by applying the function object provided as first argument to all its vertices.

    -

    The transformation given as argument is used to transform each vertex. Its respective type has to offer a function-like syntax, i.e. the predicate is either an object of a type that has an operator(), or it is a pointer to a non-member function, or it is a lambda function object. In either case, argument and return value have to be of type Point<spacedim>. An example – a simple transformation that moves the object two units to the right in the $x_1$ direction – could look like as follows:

    +

    The transformation given as argument is used to transform each vertex. Its respective type has to offer a function-like syntax, i.e. the predicate is either an object of a type that has an operator(), or it is a pointer to a non-member function, or it is a lambda function object. In either case, argument and return value have to be of type Point<spacedim>. An example – a simple transformation that moves the object two units to the right in the $x_1$ direction – could look like as follows:

    ... // fill triangulation with something
    {
    @@ -1344,13 +1344,13 @@

    Transform the given triangulation smoothly to a different domain where, typically, each of the vertices at the boundary of the triangulation is mapped to the corresponding points in the new_points map.

    -

    The unknown displacement field $u_d(\mathbf x)$ in direction $d$ is obtained from the minimization problem

    -\[ \min\, \int \frac{1}{2}
+<p>The unknown displacement field <picture><source srcset=$u_d(\mathbf x)$ in direction $d$ is obtained from the minimization problem

    +\[ \min\, \int \frac{1}{2}
   c(\mathbf x)
   \mathbf \nabla u_d(\mathbf x) \cdot
   \mathbf \nabla u_d(\mathbf x)
   \,\rm d x
-\] +\]" src="form_1395.png"/>

    subject to prescribed constraints. The minimizer is obtained by solving the Laplace equation of the dim components of a displacement field that maps the current domain into one described by new_points . Linear finite elements with four Gaussian quadrature points in each direction are used. The difference between the vertex positions specified in new_points and their current value in tria therefore represents the prescribed values of this displacement field at the boundary of the domain, or more precisely at all of those locations for which new_points provides values (which may be at part of the boundary, or even in the interior of the domain). The function then evaluates this displacement field at each unconstrained vertex and uses it to place the mapped vertex where the displacement field locates it. Because the solution of the Laplace equation is smooth, this guarantees a smooth mapping from the old domain to the new one.

    Parameters
    @@ -3410,7 +3410,7 @@

    This function does the same as the previous one, i.e. it partitions a triangulation using a partitioning algorithm into a number of subdomains identified by the cell->subdomain_id() flag.

    The difference to the previous function is the second argument, a sparsity pattern that represents the connectivity pattern between cells.

    -

    While the function above builds it directly from the triangulation by considering which cells neighbor each other, this function can take a more refined connectivity graph. The sparsity pattern needs to be of size $N\times N$, where $N$ is the number of active cells in the triangulation. If the sparsity pattern contains an entry at position $(i,j)$, then this means that cells $i$ and $j$ (in the order in which they are traversed by active cell iterators) are to be considered connected; partitioning algorithm will then try to partition the domain in such a way that (i) the subdomains are of roughly equal size, and (ii) a minimal number of connections are broken.

    +

    While the function above builds it directly from the triangulation by considering which cells neighbor each other, this function can take a more refined connectivity graph. The sparsity pattern needs to be of size $N\times N$, where $N$ is the number of active cells in the triangulation. If the sparsity pattern contains an entry at position $(i,j)$, then this means that cells $i$ and $j$ (in the order in which they are traversed by active cell iterators) are to be considered connected; partitioning algorithm will then try to partition the domain in such a way that (i) the subdomains are of roughly equal size, and (ii) a minimal number of connections are broken.

    This function is mainly useful in cases where connections between cells exist that are not present in the triangulation alone (otherwise the previous function would be the simpler one to use). Such connections may include that certain parts of the boundary of a domain are coupled through symmetric boundary conditions or integrals (e.g. friction contact between the two sides of a crack in the domain), or if a numerical scheme is used that not only connects immediate neighbors but a larger neighborhood of cells (e.g. when solving integral equations).

    In addition, this function may be useful in cases where the default sparsity pattern is not entirely sufficient. This can happen because the default is to just consider face neighbors, not neighboring cells that are connected by edges or vertices. While the latter couple when using continuous finite elements, they are typically still closely connected in the neighborship graph, and partitioning algorithm will not usually cut important connections in this case. However, if there are vertices in the mesh where many cells (many more than the common 4 or 6 in 2d and 3d, respectively) come together, then there will be a significant number of cells that are connected across a vertex, but several degrees removed in the connectivity graph built only using face neighbors. In a case like this, partitioning algorithm may sometimes make bad decisions and you may want to build your own connectivity graph.

    Note
    If the weight signal has been attached to the triangulation, then this will be used and passed to the partitioner.
    @@ -4016,7 +4016,7 @@

    An orthogonal equality test for faces.

    face1 and face2 are considered equal, if a one to one matching between its vertices can be achieved via an orthogonal equality relation.

    -

    Here, two vertices v_1 and v_2 are considered equal, if $M\cdot v_1 + offset - v_2$ is parallel to the unit vector in unit direction direction. If the parameter matrix is a reference to a spacedim x spacedim matrix, $M$ is set to matrix, otherwise $M$ is the identity matrix.

    +

    Here, two vertices v_1 and v_2 are considered equal, if $M\cdot v_1 + offset - v_2$ is parallel to the unit vector in unit direction direction. If the parameter matrix is a reference to a spacedim x spacedim matrix, $M$ is set to matrix, otherwise $M$ is the identity matrix.

    If the matching was successful, the relative orientation of face1 with respect to face2 is returned in the bitset orientation, where

    orientation[0] -> face_orientation
    orientation[1] -> face_flip
    orientation[2] -> face_rotation
    @@ -4135,8 +4135,8 @@

    This function tries to match all faces belonging to the first boundary with faces belonging to the second boundary with the help of orthogonal_equality().

    The bitset that is returned inside of PeriodicFacePair encodes the relative orientation of the first face with respect to the second face, see the documentation of orthogonal_equality() for further details.

    The direction refers to the space direction in which periodicity is enforced. When matching periodic faces this vector component is ignored.

    -

    The offset is a vector tangential to the faces that is added to the location of vertices of the 'first' boundary when attempting to match them to the corresponding vertices of the 'second' boundary. This can be used to implement conditions such as $u(0,y)=u(1,y+1)$.

    -

    Optionally, a $dim\times dim$ rotation matrix can be specified that describes how vector valued DoFs of the first face should be modified prior to constraining to the DoFs of the second face. The matrix is used in two places. First, matrix will be supplied to orthogonal_equality() and used for matching faces: Two vertices $v_1$ and $v_2$ match if $\text{matrix}\cdot v_1 + \text{offset} - v_2$ is parallel to the unit vector in unit direction direction. (For more details see DoFTools::make_periodicity_constraints(), the glossary glossary entry on periodic conditions and step-45). Second, matrix will be stored in the PeriodicFacePair collection matched_pairs for further use.

    +

    The offset is a vector tangential to the faces that is added to the location of vertices of the 'first' boundary when attempting to match them to the corresponding vertices of the 'second' boundary. This can be used to implement conditions such as $u(0,y)=u(1,y+1)$.

    +

    Optionally, a $dim\times dim$ rotation matrix can be specified that describes how vector valued DoFs of the first face should be modified prior to constraining to the DoFs of the second face. The matrix is used in two places. First, matrix will be supplied to orthogonal_equality() and used for matching faces: Two vertices $v_1$ and $v_2$ match if $\text{matrix}\cdot v_1 + \text{offset} - v_2$ is parallel to the unit vector in unit direction direction. (For more details see DoFTools::make_periodicity_constraints(), the glossary glossary entry on periodic conditions and step-45). Second, matrix will be stored in the PeriodicFacePair collection matched_pairs for further use.

    Template Parameters
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceLocalIntegrators.html differs (JavaScript source, ASCII text, with very long lines (800)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceLocalIntegrators.html 2023-11-25 15:26:00.586542590 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceLocalIntegrators.html 2023-11-25 15:26:00.586542590 +0100 @@ -125,9 +125,9 @@

    The namespace L2 contains functions for mass matrices and L2-inner products.

    Notational conventions

    In most cases, the action of a function in this namespace can be described by a single integral. We distinguish between integrals over cells Z and over faces F. If an integral is denoted as

    -\[
+<picture><source srcset=\[
   \int_Z u \otimes v \,dx,
-\] +\]" src="form_1564.png"/>

    it will yield the following results, depending on the type of operation

    • @@ -137,7 +137,7 @@
    • If the function returns a number, then this number is the integral of the two given functions u and v.
    -

    We will use regular cursive symbols $u$ for scalars and bold symbols $\mathbf u$ for vectors. Test functions are always v and trial functions are always u. Parameters are Greek and the face normal vectors are $\mathbf n = \mathbf n_1 = -\mathbf n_2$.

    +

    We will use regular cursive symbols $u$ for scalars and bold symbols $\mathbf u$ for vectors. Test functions are always v and trial functions are always u. Parameters are Greek and the face normal vectors are $\mathbf n = \mathbf n_1 = -\mathbf n_2$.

    Signature of functions

    Functions in this namespace follow a generic signature. In the simplest case, you have two related functions

    template <int dim>
    void
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceLocalIntegrators_1_1Advection.html differs (JavaScript source, ASCII text, with very long lines (1117)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceLocalIntegrators_1_1Advection.html 2023-11-25 15:26:00.603208915 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceLocalIntegrators_1_1Advection.html 2023-11-25 15:26:00.603208915 +0100 @@ -169,8 +169,8 @@
    MeshTypeA type that satisfies the requirements of the MeshType concept.

    Advection along the direction w in weak form with derivative on the test function

    -\[ m_{ij} = \int_Z u_j\,(\mathbf w \cdot \nabla) v_i
-\, dx. \] +\[ m_{ij} = \int_Z u_j\,(\mathbf w \cdot \nabla) v_i
+\, dx. \]

    The FiniteElement in fe may be scalar or vector valued. In the latter case, the advection operator is applied to each component separately.

    Parameters
    @@ -239,7 +239,7 @@

    Scalar advection residual operator in strong form

    -\[ r_i = \int_Z  (\mathbf w \cdot \nabla)u\, v_i \, dx. \] +\[ r_i = \int_Z  (\mathbf w \cdot \nabla)u\, v_i \, dx. \]

    Warning
    This is not the residual consistent with cell_matrix(), but with its transpose.
    @@ -298,8 +298,8 @@

    Vector-valued advection residual operator in strong form

    -\[ r_i = \int_Z \bigl((\mathbf w \cdot \nabla) \mathbf u\bigr)
-\cdot\mathbf v_i \, dx. \] +\[ r_i = \int_Z \bigl((\mathbf w \cdot \nabla) \mathbf u\bigr)
+\cdot\mathbf v_i \, dx. \]

    Warning
    This is not the residual consistent with cell_matrix(), but with its transpose.
    @@ -359,7 +359,7 @@

    Scalar advection residual operator in weak form

    -\[ r_i = \int_Z  (\mathbf w \cdot \nabla)v\, u_i \, dx. \] +\[ r_i = \int_Z  (\mathbf w \cdot \nabla)v\, u_i \, dx. \]

    Definition at line 216 of file advection.h.

    @@ -417,8 +417,8 @@

    Vector-valued advection residual operator in weak form

    -\[ r_i = \int_Z \bigl((\mathbf w \cdot \nabla) \mathbf v\bigr)
-\cdot\mathbf u_i \, dx. \] +\[ r_i = \int_Z \bigl((\mathbf w \cdot \nabla) \mathbf v\bigr)
+\cdot\mathbf u_i \, dx. \]

    Definition at line 256 of file advection.h.

    @@ -467,11 +467,11 @@

    Upwind flux at the boundary for weak advection operator. This is the value of the trial function at the outflow boundary and zero else:

    -\[
+<picture><source srcset=\[
 a_{ij} = \int_{\partial\Omega}
 [\mathbf w\cdot\mathbf n]_+
 u_i v_j \, ds
-\] +\]" src="form_1518.png"/>

    The velocity is provided as an ArrayView, having dim vectors, one for each velocity component. Each of the vectors must either have only a single entry, if the advection velocity is constant, or have an entry for each quadrature point.

    The finite element can have several components, in which case each component is advected by the same velocity.

    @@ -537,13 +537,13 @@

    Scalar case: Residual for upwind flux at the boundary for weak advection operator. This is the value of the trial function at the outflow boundary and the value of the incoming boundary condition on the inflow boundary:

    -\[
+<picture><source srcset=\[
 a_{ij} = \int_{\partial\Omega}
 (\mathbf w\cdot\mathbf n)
 \widehat u v_j \, ds
-\] +\]" src="form_1519.png"/>

    -

    Here, the numerical flux $\widehat u$ is the upwind value at the face, namely the finite element function whose values are given in the argument input on the outflow boundary. On the inflow boundary, it is the inhomogeneous boundary value in the argument data.

    +

    Here, the numerical flux $\widehat u$ is the upwind value at the face, namely the finite element function whose values are given in the argument input on the outflow boundary. On the inflow boundary, it is the inhomogeneous boundary value in the argument data.

    The velocity is provided as an ArrayView, having dim vectors, one for each velocity component. Each of the vectors must either have only a single entry, if the advection velocity is constant, or have an entry for each quadrature point.

    The finite element can have several components, in which case each component is advected by the same velocity.

    @@ -606,13 +606,13 @@

    Vector-valued case: Residual for upwind flux at the boundary for weak advection operator. This is the value of the trial function at the outflow boundary and the value of the incoming boundary condition on the inflow boundary:

    -\[
+<picture><source srcset=\[
 a_{ij} = \int_{\partial\Omega}
 (\mathbf w\cdot\mathbf n)
 \widehat u v_j \, ds
-\] +\]" src="form_1519.png"/>

    -

    Here, the numerical flux $\widehat u$ is the upwind value at the face, namely the finite element function whose values are given in the argument input on the outflow boundary. On the inflow boundary, it is the inhomogeneous boundary value in the argument data.

    +

    Here, the numerical flux $\widehat u$ is the upwind value at the face, namely the finite element function whose values are given in the argument input on the outflow boundary. On the inflow boundary, it is the inhomogeneous boundary value in the argument data.

    The velocity is provided as an ArrayView, having dim vectors, one for each velocity component. Each of the vectors must either have only a single entry, if the advection velocity is constant, or have an entry for each quadrature point.

    The finite element can have several components, in which case each component is advected by the same velocity.

    @@ -687,13 +687,13 @@

    Upwind flux in the interior for weak advection operator. Matrix entries correspond to the upwind value of the trial function, multiplied by the jump of the test functions

    -\[
+<picture><source srcset=\[
 a_{ij} = \int_F \left|\mathbf w
 \cdot \mathbf n\right|
 u^\uparrow
 (v^\uparrow-v^\downarrow)
 \,ds
-\] +\]" src="form_1521.png"/>

    The velocity is provided as an ArrayView, having dim vectors, one for each velocity component. Each of the vectors must either have only a single entry, if the advection velocity is constant, or have an entry for each quadrature point.

    The finite element can have several components, in which case each component is advected the same way.

    @@ -761,13 +761,13 @@

    Scalar case: Upwind flux in the interior for weak advection operator. Matrix entries correspond to the upwind value of the trial function, multiplied by the jump of the test functions

    -\[
+<picture><source srcset=\[
 a_{ij} = \int_F \left|\mathbf w
 \cdot \mathbf n\right|
 u^\uparrow
 (v^\uparrow-v^\downarrow)
 \,ds
-\] +\]" src="form_1521.png"/>

    The velocity is provided as an ArrayView, having dim vectors, one for each velocity component. Each of the vectors must either have only a single entry, if the advection velocity is constant, or have an entry for each quadrature point.

    The finite element can have several components, in which case each component is advected the same way.

    @@ -833,13 +833,13 @@

    Vector-valued case: Upwind flux in the interior for weak advection operator. Matrix entries correspond to the upwind value of the trial function, multiplied by the jump of the test functions

    -\[
+<picture><source srcset=\[
 a_{ij} = \int_F \left|\mathbf w
 \cdot \mathbf n\right|
 u^\uparrow
 (v^\uparrow-v^\downarrow)
 \,ds
-\] +\]" src="form_1521.png"/>

    The velocity is provided as an ArrayView, having dim vectors, one for each velocity component. Each of the vectors must either have only a single entry, if the advection velocity is constant, or have an entry for each quadrature point.

    The finite element can have several components, in which case each component is advected the same way.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceLocalIntegrators_1_1Divergence.html differs (JavaScript source, ASCII text, with very long lines (988)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceLocalIntegrators_1_1Divergence.html 2023-11-25 15:26:00.616541980 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceLocalIntegrators_1_1Divergence.html 2023-11-25 15:26:00.616541980 +0100 @@ -164,7 +164,7 @@

    Cell matrix for divergence. The derivative is on the trial function.

    -\[ \int_Z v\nabla \cdot \mathbf u \,dx \] +\[ \int_Z v\nabla \cdot \mathbf u \,dx \]

    This is the strong divergence operator and the trial space should be at least Hdiv. The test functions may be discontinuous.

    @@ -209,8 +209,8 @@

    The residual of the divergence operator in strong form.

    -\[ \int_Z
-v\nabla \cdot \mathbf u \,dx \] +\[ \int_Z
+v\nabla \cdot \mathbf u \,dx \]

    This is the strong divergence operator and the trial space should be at least Hdiv. The test functions may be discontinuous.

    The function cell_matrix() is the Frechet derivative of this function with respect to the test functions.

    @@ -256,8 +256,8 @@

    The residual of the divergence operator in weak form.

    -\[ - \int_Z
-\nabla v \cdot \mathbf u \,dx \] +\[ - \int_Z
+\nabla v \cdot \mathbf u \,dx \]

    This is the weak divergence operator and the test space should be at least H1. The trial functions may be discontinuous.

    Todo:
    Verify: The function cell_matrix() is the Frechet derivative of this function with respect to the test functions.
    @@ -303,8 +303,8 @@

    Cell matrix for gradient. The derivative is on the trial function.

    -\[
-\int_Z \nabla u \cdot \mathbf v\,dx \] +\[
+\int_Z \nabla u \cdot \mathbf v\,dx \]

    This is the strong gradient and the trial space should be at least in H1. The test functions can be discontinuous.

    @@ -349,8 +349,8 @@

    The residual of the gradient operator in strong form.

    -\[ \int_Z
-\mathbf v\cdot\nabla u \,dx \] +\[ \int_Z
+\mathbf v\cdot\nabla u \,dx \]

    This is the strong gradient operator and the trial space should be at least H1. The test functions may be discontinuous.

    The function gradient_matrix() is the Frechet derivative of this function with respect to the test functions.

    @@ -397,8 +397,8 @@

    The residual of the gradient operator in weak form.

    -\[ -\int_Z
-\nabla\cdot \mathbf v u \,dx \] +\[ -\int_Z
+\nabla\cdot \mathbf v u \,dx \]

    This is the weak gradient operator and the test space should be at least Hdiv. The trial functions may be discontinuous.

    Todo:
    Verify: The function gradient_matrix() is the Frechet derivative of this function with respect to the test functions.
    @@ -444,7 +444,7 @@

    The trace of the divergence operator, namely the product of the normal component of the vector valued trial space and the test space.

    -\[ \int_F (\mathbf u\cdot \mathbf n) v \,ds \] +\[ \int_F (\mathbf u\cdot \mathbf n) v \,ds \]

    Definition at line 259 of file divergence.h.

    @@ -493,9 +493,9 @@

    The trace of the divergence operator, namely the product of the normal component of the vector valued trial space and the test space.

    -\[
+<picture><source srcset=\[
 \int_F (\mathbf u\cdot \mathbf n) v \,ds
-\] +\]" src="form_1529.png"/>

    Definition at line 292 of file divergence.h.

    @@ -540,9 +540,9 @@

    The trace of the gradient operator, namely the product of the normal component of the vector valued test space and the trial space.

    -\[
+<picture><source srcset=\[
 \int_F u (\mathbf v\cdot \mathbf n) \,ds
-\] +\]" src="form_1530.png"/>

    Definition at line 324 of file divergence.h.

    @@ -611,10 +611,10 @@

    The trace of the divergence operator, namely the product of the jump of the normal component of the vector valued trial function and the mean value of the test function.

    -\[
+<picture><source srcset=\[
 \int_F (\mathbf u_1\cdot \mathbf n_1 + \mathbf u_2 \cdot \mathbf n_2)
 \frac{v_1+v_2}{2} \,ds
-\] +\]" src="form_1531.png"/>

    Definition at line 358 of file divergence.h.

    @@ -673,12 +673,12 @@

    The jump of the normal component

    -\[
+<picture><source srcset=\[
 \int_F
  (\mathbf u_1\cdot \mathbf n_1 + \mathbf u_2 \cdot \mathbf n_2)
  (\mathbf v_1\cdot \mathbf n_1 + \mathbf v_2 \cdot \mathbf n_2)
 \,ds
-\] +\]" src="form_1532.png"/>

    Definition at line 417 of file divergence.h.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceLocalIntegrators_1_1Elasticity.html differs (JavaScript source, ASCII text, with very long lines (1248)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceLocalIntegrators_1_1Elasticity.html 2023-11-25 15:26:00.629875041 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceLocalIntegrators_1_1Elasticity.html 2023-11-25 15:26:00.629875041 +0100 @@ -162,7 +162,7 @@

    The linear elasticity operator in weak form, namely double contraction of symmetric gradients.

    -\[ \int_Z \varepsilon(u): \varepsilon(v)\,dx \] +\[ \int_Z \varepsilon(u): \varepsilon(v)\,dx \]

    Definition at line 51 of file elasticity.h.

    @@ -215,7 +215,7 @@

    Vector-valued residual operator for linear elasticity in weak form

    -\[ - \int_Z \varepsilon(u): \varepsilon(v) \,dx \] +\[ - \int_Z \varepsilon(u): \varepsilon(v) \,dx \]

    Definition at line 84 of file elasticity.h.

    @@ -268,10 +268,10 @@

    The matrix for the weak boundary condition of Nitsche type for linear elasticity:

    -\[
+<picture><source srcset=\[
 \int_F \Bigl(\gamma u \cdot v - n^T \epsilon(u) v - u \epsilon(v)
 n\Bigr)\;ds.
-\] +\]" src="form_1535.png"/>

    Definition at line 123 of file elasticity.h.

    @@ -324,10 +324,10 @@

    The matrix for the weak boundary condition of Nitsche type for the tangential displacement in linear elasticity:

    -\[
+<picture><source srcset=\[
 \int_F \Bigl(\gamma u_\tau \cdot v_\tau - n^T \epsilon(u_\tau) v_\tau -
 u_\tau^T \epsilon(v_\tau) n\Bigr)\;ds.
-\] +\]" src="form_1536.png"/>

    Definition at line 178 of file elasticity.h.

    @@ -387,12 +387,12 @@

    Weak boundary condition for the elasticity operator by Nitsche, namely on the face F the vector

    -\[
+<picture><source srcset=\[
 \int_F \Bigl(\gamma (u-g) \cdot v - n^T \epsilon(u) v - (u-g) \epsilon(v)
 n^T\Bigr)\;ds.
-\] +\]" src="form_1537.png"/>

    -

    Here, u is the finite element function whose values and gradient are given in the arguments input and Dinput, respectively. g is the inhomogeneous boundary value in the argument data. $n$ is the outer normal vector and $\gamma$ is the usual penalty parameter.

    +

    Here, u is the finite element function whose values and gradient are given in the arguments input and Dinput, respectively. g is the inhomogeneous boundary value in the argument data. $n$ is the outer normal vector and $\gamma$ is the usual penalty parameter.

    Definition at line 257 of file elasticity.h.

    @@ -459,10 +459,10 @@

    The weak boundary condition of Nitsche type for the tangential displacement in linear elasticity:

    -\[
+<picture><source srcset=\[
 \int_F \Bigl(\gamma (u_\tau-g_\tau) \cdot v_\tau - n^T \epsilon(u_\tau) v
 - (u_\tau-g_\tau) \epsilon(v_\tau) n\Bigr)\;ds.
-\] +\]" src="form_1539.png"/>

    Definition at line 309 of file elasticity.h.

    @@ -517,12 +517,12 @@

    Homogeneous weak boundary condition for the elasticity operator by Nitsche, namely on the face F the vector

    -\[
+<picture><source srcset=\[
 \int_F \Bigl(\gamma u \cdot v - n^T \epsilon(u) v - u \epsilon(v)
 n^T\Bigr)\;ds.
-\] +\]" src="form_1540.png"/>

    -

    Here, u is the finite element function whose values and gradient are given in the arguments input and Dinput, respectively. $n$ is the outer normal vector and $\gamma$ is the usual penalty parameter.

    +

    Here, u is the finite element function whose values and gradient are given in the arguments input and Dinput, respectively. $n$ is the outer normal vector and $\gamma$ is the usual penalty parameter.

    Definition at line 387 of file elasticity.h.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceLocalIntegrators_1_1GradDiv.html differs (JavaScript source, ASCII text, with very long lines (1245)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceLocalIntegrators_1_1GradDiv.html 2023-11-25 15:26:00.643208101 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceLocalIntegrators_1_1GradDiv.html 2023-11-25 15:26:00.643208101 +0100 @@ -144,9 +144,9 @@

    The weak form of the grad-div operator penalizing volume changes

    -\[
+<picture><source srcset=\[
  \int_Z \nabla\cdot u \nabla \cdot v \,dx
-\] +\]" src="form_1541.png"/>

    Definition at line 52 of file grad_div.h.

    @@ -190,9 +190,9 @@

    The weak form of the grad-div residual

    -\[
+<picture><source srcset=\[
  \int_Z \nabla\cdot u \nabla \cdot v \,dx
-\] +\]" src="form_1541.png"/>

    Definition at line 86 of file grad_div.h.

    @@ -245,10 +245,10 @@

    The matrix for the weak boundary condition of Nitsche type for linear elasticity:

    -\[
+<picture><source srcset=\[
 \int_F \Bigl(\gamma (u \cdot n)(v \cdot n)  - \nabla\cdot u
 v\cdot n - u \cdot n \nabla \cdot v \Bigr)\;ds.
-\] +\]" src="form_1542.png"/>

    Definition at line 122 of file grad_div.h.

    @@ -308,14 +308,14 @@

    Weak boundary condition for the Laplace operator by Nitsche, vector valued version, namely on the face F the vector

    -\[
+<picture><source srcset=\[
 \int_F \Bigl(\gamma (\mathbf u \cdot \mathbf n- \mathbf g \cdot
 \mathbf n) (\mathbf v \cdot \mathbf n)
 - \nabla \cdot \mathbf u (\mathbf v \cdot \mathbf n)
 - (\mathbf u-\mathbf g) \cdot \mathbf n \nabla \cdot v\Bigr)\;ds.
-\] +\]" src="form_1543.png"/>

    -

    Here, u is the finite element function whose values and gradient are given in the arguments input and Dinput, respectively. g is the inhomogeneous boundary value in the argument data. $\gamma$ is the usual penalty parameter.

    +

    Here, u is the finite element function whose values and gradient are given in the arguments input and Dinput, respectively. g is the inhomogeneous boundary value in the argument data. $\gamma$ is the usual penalty parameter.

    Definition at line 174 of file grad_div.h.

    @@ -464,12 +464,12 @@

    Grad-div residual term for the symmetric interior penalty method:

    -\[
+<picture><source srcset=\[
 \int_F \Bigl( \gamma [\mathbf u \cdot\mathbf n]
 \cdot[\mathbf v \cdot \mathbf n]
 - \{\nabla \cdot \mathbf u\}[\mathbf v\cdot \mathbf n]
 - [\mathbf u\times \mathbf n]\{\nabla\cdot \mathbf v\} \Bigr) \; ds.
-\] +\]" src="form_1544.png"/>

    See for instance Hansbo and Larson, 2002

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceLocalIntegrators_1_1L2.html differs (JavaScript source, ASCII text, with very long lines (821)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceLocalIntegrators_1_1L2.html 2023-11-25 15:26:00.656541166 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceLocalIntegrators_1_1L2.html 2023-11-25 15:26:00.656541166 +0100 @@ -197,18 +197,18 @@

    The weighted mass matrix for scalar or vector values finite elements.

    -\[ \int_Z \omega(x) uv\,dx \quad \text{or} \quad \int_Z \omega(x)
-\mathbf u\cdot \mathbf v\,dx \] +\[ \int_Z \omega(x) uv\,dx \quad \text{or} \quad \int_Z \omega(x)
+\mathbf u\cdot \mathbf v\,dx \]

    Likewise, this term can be used on faces, where it computes the integrals

    -\[ \int_F \omega(x) uv\,ds \quad \text{or} \quad \int_F
-\omega(x) \mathbf u\cdot \mathbf v\,ds \] +\[ \int_F \omega(x) uv\,ds \quad \text{or} \quad \int_F
+\omega(x) \mathbf u\cdot \mathbf v\,ds \]

    Parameters
    - +
    MThe weighted mass matrix obtained as result.
    feThe FEValues object describing the local trial function space. update_values and update_JxW_values must be set.
    weightsThe weights, $\omega(x)$, evaluated at the quadrature points in the finite element (size must be equal to the number of quadrature points in the element).
    weightsThe weights, $\omega(x)$, evaluated at the quadrature points in the finite element (size must be equal to the number of quadrature points in the element).
    @@ -256,13 +256,13 @@

    L2-inner product for scalar functions.

    -\[ \int_Z fv\,dx \quad \text{or} \quad \int_F fv\,ds \] +\[ \int_Z fv\,dx \quad \text{or} \quad \int_F fv\,ds \]

    Parameters
    - +
    resultThe vector obtained as result.
    feThe FEValues object describing the local trial function space. update_values and update_JxW_values must be set.
    inputThe representation of $f$ evaluated at the quadrature points in the finite element (size must be equal to the number of quadrature points in the element).
    inputThe representation of $f$ evaluated at the quadrature points in the finite element (size must be equal to the number of quadrature points in the element).
    factorA constant that multiplies the result.
    @@ -309,14 +309,14 @@

    L2-inner product for a slice of a vector valued right hand side.

    -\[ \int_Z \mathbf f\cdot \mathbf v\,dx \quad \text{or}
-\quad \int_F \mathbf f\cdot \mathbf v\,ds \] +\[ \int_Z \mathbf f\cdot \mathbf v\,dx \quad \text{or}
+\quad \int_F \mathbf f\cdot \mathbf v\,ds \]

    Parameters
    - +
    resultThe vector obtained as result.
    feThe FEValues object describing the local trial function space. update_values and update_JxW_values must be set.
    inputThe vector valued representation of $\mathbf f$ evaluated at the quadrature points in the finite element (size of each component must be equal to the number of quadrature points in the element).
    inputThe vector valued representation of $\mathbf f$ evaluated at the quadrature points in the finite element (size of each component must be equal to the number of quadrature points in the element).
    factorA constant that multiplies the result.
    @@ -383,9 +383,9 @@
    -

    The jump matrix between two cells for scalar or vector values finite elements. Note that the factor $\gamma$ can be used to implement weighted jumps.

    -\[ \int_F [\gamma u][\gamma v]\,ds \quad \text{or}
-\int_F [\gamma \mathbf u]\cdot [\gamma \mathbf v]\,ds \] +

    The jump matrix between two cells for scalar or vector values finite elements. Note that the factor $\gamma$ can be used to implement weighted jumps.

    +\[ \int_F [\gamma u][\gamma v]\,ds \quad \text{or}
+\int_F [\gamma \mathbf u]\cdot [\gamma \mathbf v]\,ds \]

    Using appropriate weights, this term can be used to penalize violation of conformity in H1.

    Note that for the parameters that follow, the external matrix refers to the flux between cells, while the internal matrix refers to entries coupling inside the cell.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceLocalIntegrators_1_1Laplace.html differs (JavaScript source, ASCII text, with very long lines (1245)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceLocalIntegrators_1_1Laplace.html 2023-11-25 15:26:00.669874226 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceLocalIntegrators_1_1Laplace.html 2023-11-25 15:26:00.669874226 +0100 @@ -159,8 +159,8 @@

    Laplacian in weak form, namely on the cell Z the matrix

    -\[
-\int_Z \nu \nabla u \cdot \nabla v \, dx. \] +\[
+\int_Z \nu \nabla u \cdot \nabla v \, dx. \]

    The FiniteElement in fe may be scalar or vector valued. In the latter case, the Laplacian is applied to each component separately.

    @@ -214,7 +214,7 @@

    Laplacian residual operator in weak form

    -\[ \int_Z \nu \nabla u \cdot \nabla v \, dx. \] +\[ \int_Z \nu \nabla u \cdot \nabla v \, dx. \]

    Definition at line 92 of file laplace.h.

    @@ -267,7 +267,7 @@

    Vector-valued Laplacian residual operator in weak form

    -\[ \int_Z \nu \nabla u : \nabla v \, dx. \] +\[ \int_Z \nu \nabla u : \nabla v \, dx. \]

    Definition at line 119 of file laplace.h.

    @@ -312,11 +312,11 @@

    Weak boundary condition of Nitsche type for the Laplacian, namely on the face F the matrix

    -\[
+<picture><source srcset=\[
 \int_F \Bigl(\gamma u v - \partial_n u v - u \partial_n v\Bigr)\;ds.
-\] +\]" src="form_1557.png"/>

    -

    Here, $\gamma$ is the penalty parameter suitably computed with compute_penalty().

    +

    Here, $\gamma$ is the penalty parameter suitably computed with compute_penalty().

    Definition at line 157 of file laplace.h.

    @@ -360,12 +360,12 @@

    Weak boundary condition of Nitsche type for the Laplacian applied to the tangential component only, namely on the face F the matrix

    -\[
+<picture><source srcset=\[
 \int_F \Bigl(\gamma u_\tau v_\tau - \partial_n u_\tau v_\tau - u_\tau
 \partial_n v_\tau\Bigr)\;ds.
-\] +\]" src="form_1558.png"/>

    -

    Here, $\gamma$ is the penalty parameter suitably computed with compute_penalty().

    +

    Here, $\gamma$ is the penalty parameter suitably computed with compute_penalty().

    Definition at line 198 of file laplace.h.

    @@ -426,12 +426,12 @@

    Weak boundary condition for the Laplace operator by Nitsche, scalar version, namely on the face F the vector

    -\[
+<picture><source srcset=\[
 \int_F \Bigl(\gamma (u-g) v - \partial_n u v - (u-g) \partial_n
 v\Bigr)\;ds.
-\] +\]" src="form_1559.png"/>

    -

    Here, u is the finite element function whose values and gradient are given in the arguments input and Dinput, respectively. g is the inhomogeneous boundary value in the argument data. $\gamma$ is the usual penalty parameter.

    +

    Here, u is the finite element function whose values and gradient are given in the arguments input and Dinput, respectively. g is the inhomogeneous boundary value in the argument data. $\gamma$ is the usual penalty parameter.

    Definition at line 261 of file laplace.h.

    @@ -490,13 +490,13 @@

    Weak boundary condition for the Laplace operator by Nitsche, vector valued version, namely on the face F the vector

    -\[
+<picture><source srcset=\[
 \int_F \Bigl(\gamma (\mathbf u- \mathbf g) \cdot \mathbf v
 - \partial_n \mathbf u \cdot \mathbf v
 - (\mathbf u-\mathbf g) \cdot \partial_n \mathbf v\Bigr)\;ds.
-\] +\]" src="form_1560.png"/>

    -

    Here, u is the finite element function whose values and gradient are given in the arguments input and Dinput, respectively. g is the inhomogeneous boundary value in the argument data. $\gamma$ is the usual penalty parameter.

    +

    Here, u is the finite element function whose values and gradient are given in the arguments input and Dinput, respectively. g is the inhomogeneous boundary value in the argument data. $\gamma$ is the usual penalty parameter.

    Definition at line 308 of file laplace.h.

    @@ -566,10 +566,10 @@

    Flux for the interior penalty method for the Laplacian, namely on the face F the matrices associated with the bilinear form

    -\[
+<picture><source srcset=\[
 \int_F \Bigl( \gamma [u][v] - \{\nabla u\}[v\mathbf n] - [u\mathbf
 n]\{\nabla v\} \Bigr) \; ds.
-\] +\]" src="form_1561.png"/>

    The penalty parameter should always be the mean value of the penalties needed for stability on each side. In the case of constant coefficients, it can be computed using compute_penalty().

    If factor2 is missing or negative, the factor is assumed the same on both sides. If factors differ, note that the penalty parameter has to be computed accordingly.

    @@ -642,10 +642,10 @@

    Flux for the interior penalty method for the Laplacian applied to the tangential components of a vector field, namely on the face F the matrices associated with the bilinear form

    -\[
+<picture><source srcset=\[
 \int_F \Bigl( \gamma [u_\tau][v_\tau] - \{\nabla u_\tau\}[v_\tau\mathbf
 n] - [u_\tau\mathbf n]\{\nabla v_\tau\} \Bigr) \; ds.
-\] +\]" src="form_1562.png"/>

    Warning
    This function is still under development!
    @@ -729,10 +729,10 @@

    Residual term for the symmetric interior penalty method:

    -\[
+<picture><source srcset=\[
 \int_F \Bigl( \gamma [u][v] - \{\nabla u\}[v\mathbf n] - [u\mathbf
 n]\{\nabla v\} \Bigr) \; ds.
-\] +\]" src="form_1561.png"/>

    Definition at line 544 of file laplace.h.

    @@ -813,11 +813,11 @@

    Vector-valued residual term for the symmetric interior penalty method:

    -\[
+<picture><source srcset=\[
 \int_F \Bigl( \gamma [\mathbf u]\cdot[\mathbf v]
 - \{\nabla \mathbf u\}[\mathbf v\otimes \mathbf n]
 - [\mathbf u\otimes \mathbf n]\{\nabla \mathbf v\} \Bigr) \; ds.
-\] +\]" src="form_1563.png"/>

    Definition at line 611 of file laplace.h.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceLocalIntegrators_1_1Maxwell.html differs (JavaScript source, ASCII text, with very long lines (848)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceLocalIntegrators_1_1Maxwell.html 2023-11-25 15:26:00.683207290 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceLocalIntegrators_1_1Maxwell.html 2023-11-25 15:26:00.683207290 +0100 @@ -118,22 +118,22 @@

    Local integrators related to curl operators and their traces.

    We use the following conventions for curl operators. First, in three space dimensions

    -\[
+<picture><source srcset=\[
 \nabla\times \mathbf u = \begin{pmatrix}
   \partial_2 u_3 - \partial_3 u_2 \\
   \partial_3 u_1 - \partial_1 u_3 \\
   \partial_1 u_2 - \partial_2 u_1
 \end{pmatrix}.
-\] +\]" src="form_1566.png"/>

    -

    In two space dimensions, the curl is obtained by extending a vector u to $(u_1, u_2, 0)^T$ and a scalar p to $(0,0,p)^T$. Computing the nonzero components, we obtain the scalar curl of a vector function and the vector curl of a scalar function. The current implementation exchanges the sign and we have:

    -\[
+<p>In two space dimensions, the curl is obtained by extending a vector <b>u</b> to <picture><source srcset=$(u_1, u_2, 0)^T$ and a scalar p to $(0,0,p)^T$. Computing the nonzero components, we obtain the scalar curl of a vector function and the vector curl of a scalar function. The current implementation exchanges the sign and we have:

    +\[
  \nabla \times \mathbf u = \partial_1 u_2 - \partial_2 u_1,
  \qquad
  \nabla \times p = \begin{pmatrix}
    \partial_2 p \\ -\partial_1 p
  \end{pmatrix}
-\] +\]" src="form_1569.png"/>

    Function Documentation

    @@ -167,7 +167,7 @@

    Auxiliary function. Given the tensors of dim second derivatives, compute the curl of the curl of a vector function. The result in two and three dimensions is:

    -\[
+<picture><source srcset=\[
 \nabla\times\nabla\times \mathbf u = \begin{pmatrix}
 \partial_1\partial_2 u_2 - \partial_2^2 u_1 \\
 \partial_1\partial_2 u_1 - \partial_1^2 u_2
@@ -181,7 +181,7 @@
 \partial_3\partial_1 u_1 + \partial_3\partial_2 u_2
 - (\partial_1^2+\partial_2^2) u_3
 \end{pmatrix}
-\] +\]" src="form_1570.png"/>

    Note
    The third tensor argument is not used in two dimensions and can for instance duplicate one of the previous.
    @@ -225,9 +225,9 @@

    Auxiliary function. Given dim tensors of first derivatives and a normal vector, compute the tangential curl

    -\[
+<picture><source srcset=\[
 \mathbf n \times \nabla \times u.
-\] +\]" src="form_1571.png"/>

    Note
    The third tensor argument is not used in two dimensions and can for instance duplicate one of the previous.
    @@ -267,10 +267,10 @@

    The curl-curl operator

    -\[
+<picture><source srcset=\[
 \int_Z \nabla\times u \cdot
 \nabla \times v \,dx
-\] +\]" src="form_1572.png"/>

    in weak form.

    @@ -315,9 +315,9 @@

    The matrix for the curl operator

    -\[
+<picture><source srcset=\[
 \int_Z \nabla \times u \cdot v \,dx.
-\] +\]" src="form_1573.png"/>

    This is the standard curl operator in 3d and the scalar curl in 2d. The vector curl operator can be obtained by exchanging test and trial functions.

    @@ -369,14 +369,14 @@

    The matrix for weak boundary condition of Nitsche type for the tangential component in Maxwell systems.

    -\[
+<picture><source srcset=\[
 \int_F \biggl( 2\gamma
 (u\times n) (v\times n) -
 (u\times n)(\nu \nabla\times
 v) - (v\times
 n)(\nu \nabla\times u)
 \biggr)
-\] +\]" src="form_1574.png"/>

    Definition at line 265 of file maxwell.h.

    @@ -415,10 +415,10 @@

    The product of two tangential traces,

    -\[
+<picture><source srcset=\[
 \int_F (u\times n)(v\times n)
 \, ds.
-\] +\]" src="form_1575.png"/>

    Definition at line 328 of file maxwell.h.

    @@ -498,14 +498,14 @@

    The interior penalty fluxes for Maxwell systems.

    -\[
+<picture><source srcset=\[
 \int_F \biggl( \gamma
 \{u\times n\}\{v\times n\} -
 \{u\times n\}\{\nu \nabla\times
 v\}- \{v\times
 n\}\{\nu \nabla\times u\}
 \biggr)\;dx
-\] +\]" src="form_1576.png"/>

    Definition at line 385 of file maxwell.h.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceMatrixCreator.html differs (JavaScript source, ASCII text, with very long lines (1746)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceMatrixCreator.html 2023-11-25 15:26:00.703206883 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceMatrixCreator.html 2023-11-25 15:26:00.703206883 +0100 @@ -139,7 +139,7 @@

    Detailed Description

    This namespace provides functions that assemble certain standard matrices for a given triangulation, using a given finite element, a given mapping and a quadrature formula.

    Conventions for all functions

    -

    There exist two versions of almost all functions, one that takes an explicit Mapping argument and one that does not. The second one generally calls the first with an implicit $Q_1$ argument (i.e., with an argument of kind MappingQ(1)). If your intend your code to use a different mapping than a (bi-/tri-)linear one, then you need to call the functions with mapping argument should be used.

    +

    There exist two versions of almost all functions, one that takes an explicit Mapping argument and one that does not. The second one generally calls the first with an implicit $Q_1$ argument (i.e., with an argument of kind MappingQ(1)). If your intend your code to use a different mapping than a (bi-/tri-)linear one, then you need to call the functions with mapping argument should be used.

    All functions take a sparse matrix object to hold the matrix to be created. The functions assume that the matrix is initialized with a sparsity pattern (SparsityPattern) corresponding to the given degree of freedom handler, i.e. the sparsity structure is already as needed. You can do this by calling the DoFTools::make_sparsity_pattern() function.

    Furthermore it is assumed that no relevant data is in the matrix. Some entries will be overwritten and some others will contain invalid data if the matrix wasn't empty before. Therefore you may want to clear the matrix before assemblage.

    By default, all created matrices are ‘raw’: they are not condensed, i.e. hanging nodes are not eliminated. The reason is that you may want to add several matrices and could then condense afterwards only once, instead of for every matrix. To actually do computations with these matrices, you have to condense the matrix using the AffineConstraints::condense function; you also have to condense the right hand side accordingly and distribute the solution afterwards. Alternatively, you can give an optional argument AffineConstraints that writes cell matrix (and vector) entries with distribute_local_to_global into the global matrix and vector. This way, adding several matrices from different sources is more complicated and you should make sure that you do not mix different ways of applying constraints. Particular caution is necessary when the given AffineConstraints object contains inhomogeneous constraints: In that case, the matrix assembled this way must be the only matrix (or you need to assemble the same right hand side for every matrix you generate and add together).

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceNonMatching.html differs (JavaScript source, ASCII text, with very long lines (1786)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceNonMatching.html 2023-11-25 15:26:00.719873208 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceNonMatching.html 2023-11-25 15:26:00.719873208 +0100 @@ -160,8 +160,8 @@
    -

    Type describing how a cell or a face is located relative to the zero contour of a level set function, $\psi$. The values of the type correspond to:

    -

    inside if $\psi(x) < 0$, outside if $\psi(x) > 0$, intersected if $\psi(x)$ varies in sign,

    +

    Type describing how a cell or a face is located relative to the zero contour of a level set function, $\psi$. The values of the type correspond to:

    +

    inside if $\psi(x) < 0$, outside if $\psi(x) > 0$, intersected if $\psi(x)$ varies in sign,

    over the cell/face. The value "unassigned" is used to describe that the location of a cell/face has not yet been determined.

    @@ -227,17 +227,17 @@
    Enumerator
    inside 
    const AffineConstraints< number > &&#href_anchor"paramname">immersed_constraints = AffineConstraints<number>()&#href_anchor"memdoc">

    Create a coupling sparsity pattern for non-matching, overlapping grids.

    -

    Given two non-matching triangulations, representing the domains $\Omega$ and $B$, with $B \subseteq \Omega$, and two finite element spaces $V(\Omega) = \text{span}\{v_i\}_{i=0}^n$ and $Q(B) =
-\text{span}\{w_j\}_{j=0}^m$, compute the sparsity pattern that would be necessary to assemble the matrix

    -\[
+<p>Given two non-matching triangulations, representing the domains <picture><source srcset=$\Omega$ and $B$, with $B \subseteq \Omega$, and two finite element spaces $V(\Omega) = \text{span}\{v_i\}_{i=0}^n$ and $Q(B) =
+\text{span}\{w_j\}_{j=0}^m$, compute the sparsity pattern that would be necessary to assemble the matrix

    +\[
 M_{ij} \dealcoloneq \int_{B} v_i(x) w_j(x) dx,
                     \quad i \in [0,n), j \in [0,m),
-\] +\]" src="form_2012.png"/>

    -

    where $V(\Omega)$ is the finite element space associated with the space_dh passed to this function (or part of it, if specified in space_comps), while $Q(B)$ is the finite element space associated with the immersed_dh passed to this function (or part of it, if specified in immersed_comps).

    -

    The sparsity is filled by locating the position of quadrature points (obtained by the reference quadrature quad) defined on elements of $B$ with respect to the embedding triangulation $\Omega$. For each overlapping cell, the entries corresponding to space_comps in space_dh and immersed_comps in immersed_dh are added to the sparsity pattern.

    +

    where $V(\Omega)$ is the finite element space associated with the space_dh passed to this function (or part of it, if specified in space_comps), while $Q(B)$ is the finite element space associated with the immersed_dh passed to this function (or part of it, if specified in immersed_comps).

    +

    The sparsity is filled by locating the position of quadrature points (obtained by the reference quadrature quad) defined on elements of $B$ with respect to the embedding triangulation $\Omega$. For each overlapping cell, the entries corresponding to space_comps in space_dh and immersed_comps in immersed_dh are added to the sparsity pattern.

    The space_comps and immersed_comps masks are assumed to be ordered in the same way: the first component of space_comps will couple with the first component of immersed_comps, the second with the second, and so on. If one of the two masks has more non-zero than the other, then the excess components will be ignored.

    -

    If the domain $B$ does not fall within $\Omega$, an exception will be thrown by the algorithm that computes the quadrature point locations. In particular, notice that this function only makes sens for dim1 lower or equal than dim0. A static assert guards that this is actually the case.

    +

    If the domain $B$ does not fall within $\Omega$, an exception will be thrown by the algorithm that computes the quadrature point locations. In particular, notice that this function only makes sens for dim1 lower or equal than dim0. A static assert guards that this is actually the case.

    For both spaces, it is possible to specify a custom Mapping, which defaults to StaticMappingQ1 for both.

    This function will also work in parallel, provided that the immersed triangulation is of type parallel::shared::Triangulation<dim1,spacedim>. An exception is thrown if you use an immersed parallel::distributed::Triangulation<dim1,spacedim>.

    See the tutorial program step-60 for an example on how to use this function.

    @@ -356,17 +356,17 @@
    const AffineConstraints< typename Matrix::value_type > &&#href_anchor"paramname">immersed_constraints = AffineConstraints<typename&#href_anchor"memdoc">

    Create a coupling mass matrix for non-matching, overlapping grids.

    -

    Given two non-matching triangulations, representing the domains $\Omega$ and $B$, with $B \subseteq \Omega$, and two finite element spaces $V(\Omega) = \text{span}\{v_i\}_{i=0}^n$ and $Q(B) =
-\text{span}\{w_j\}_{j=0}^m$, compute the coupling matrix

    -\[
+<p>Given two non-matching triangulations, representing the domains <picture><source srcset=$\Omega$ and $B$, with $B \subseteq \Omega$, and two finite element spaces $V(\Omega) = \text{span}\{v_i\}_{i=0}^n$ and $Q(B) =
+\text{span}\{w_j\}_{j=0}^m$, compute the coupling matrix

    +\[
 M_{ij} \dealcoloneq \int_{B} v_i(x) w_j(x) dx,
                     \quad i \in [0,n), j \in [0,m),
-\] +\]" src="form_2012.png"/>

    -

    where $V(\Omega)$ is the finite element space associated with the space_dh passed to this function (or part of it, if specified in space_comps), while $Q(B)$ is the finite element space associated with the immersed_dh passed to this function (or part of it, if specified in immersed_comps).

    -

    The corresponding sparsity patterns can be computed by calling the make_coupling_sparsity_pattern function. The elements of the matrix are computed by locating the position of quadrature points defined on elements of $B$ with respect to the embedding triangulation $\Omega$.

    +

    where $V(\Omega)$ is the finite element space associated with the space_dh passed to this function (or part of it, if specified in space_comps), while $Q(B)$ is the finite element space associated with the immersed_dh passed to this function (or part of it, if specified in immersed_comps).

    +

    The corresponding sparsity patterns can be computed by calling the make_coupling_sparsity_pattern function. The elements of the matrix are computed by locating the position of quadrature points defined on elements of $B$ with respect to the embedding triangulation $\Omega$.

    The space_comps and immersed_comps masks are assumed to be ordered in the same way: the first component of space_comps will couple with the first component of immersed_comps, the second with the second, and so on. If one of the two masks has more non-zero entries non-zero than the other, then the excess components will be ignored.

    -

    If the domain $B$ does not fall within $\Omega$, an exception will be thrown by the algorithm that computes the quadrature point locations. In particular, notice that this function only makes sense for dim1 lower or equal than dim0. A static assert guards that this is actually the case.

    +

    If the domain $B$ does not fall within $\Omega$, an exception will be thrown by the algorithm that computes the quadrature point locations. In particular, notice that this function only makes sense for dim1 lower or equal than dim0. A static assert guards that this is actually the case.

    For both spaces, it is possible to specify a custom Mapping, which defaults to StaticMappingQ1 for both.

    This function will also work in parallel, provided that the immersed triangulation is of type parallel::shared::Triangulation<dim1,spacedim>. An exception is thrown if you use an immersed parallel::distributed::Triangulation<dim1,spacedim>.

    See the tutorial program step-60 for an example on how to use this function.

    @@ -491,16 +491,16 @@
    const ComponentMask &&#href_anchor"paramname">comps1 = ComponentMask()&#href_anchor"memdoc">

    Create a coupling sparsity pattern for non-matching independent grids, using a convolution kernel with compact support of radius epsilon.

    -

    Given two non-matching triangulations, representing the domains $\Omega^0$ and $\Omega^1$, both embedded in $\mathbb{R}^d$, and two finite element spaces $V^0(\Omega^0) = \text{span}\{v_i\}_{i=0}^n$ and $V^1(\Omega^1) =
-\text{span}\{w_\alpha\}_{\alpha=0}^m$, compute the sparsity pattern that would be necessary to assemble the matrix

    +

    Given two non-matching triangulations, representing the domains $\Omega^0$ and $\Omega^1$, both embedded in $\mathbb{R}^d$, and two finite element spaces $V^0(\Omega^0) = \text{span}\{v_i\}_{i=0}^n$ and $V^1(\Omega^1) =
+\text{span}\{w_\alpha\}_{\alpha=0}^m$, compute the sparsity pattern that would be necessary to assemble the matrix

    -\[
+<picture><source srcset=\[
 M_{i\alpha} \dealcoloneq \int_{\Omega^0} \int_{\Omega^1}
 v_i(x) K^{\epsilon}(x-y) w_\alpha(y) dx \ dy,
 \quad i \in [0,n), \alpha \in [0,m),
-\] +\]" src="form_2020.png"/>

    -

    where $V^0(\Omega^0)$ is the finite element space associated with the dh0 passed to this function (or part of it, if specified in comps0), while $V^1(\Omega^1)$ is the finite element space associated with the dh1 passed to this function (or part of it, if specified in comps1), and $K^\epsilon$ is a function derived from CutOffFunctionBase with compact support included in a ball of radius $\epsilon$.

    +

    where $V^0(\Omega^0)$ is the finite element space associated with the dh0 passed to this function (or part of it, if specified in comps0), while $V^1(\Omega^1)$ is the finite element space associated with the dh1 passed to this function (or part of it, if specified in comps1), and $K^\epsilon$ is a function derived from CutOffFunctionBase with compact support included in a ball of radius $\epsilon$.

    The comps0 and comps1 masks are assumed to be ordered in the same way: the first component of comps0 will couple with the first component of comps1, the second with the second, and so on. If one of the two masks has more active components than the other, then the excess components will be ignored.

    For both spaces, it is possible to specify a custom Mapping, which defaults to StaticMappingQ1 for both.

    This function will also work in parallel, provided that at least one of the triangulations is of type parallel::shared::Triangulation<dim1,spacedim>. An exception is thrown if both triagnulations are of type parallel::distributed::Triangulation<dim1,spacedim>.

    @@ -577,15 +577,15 @@
    const ComponentMask &&#href_anchor"paramname">comps1 = ComponentMask()&#href_anchor"memdoc">

    Create a coupling mass matrix for non-matching independent grids, using a convolution kernel with compact support.

    -

    Given two non-matching triangulations, representing the domains $\Omega^0$ and $\Omega^1$, both embedded in $\mathbb{R}^d$, and two finite element spaces $V^0(\Omega^0) = \text{span}\{v_i\}_{i=0}^n$ and $V^1(\Omega^1) = \text{span}\{w_\alpha\}_{\alpha=0}^m$, compute the matrix

    +

    Given two non-matching triangulations, representing the domains $\Omega^0$ and $\Omega^1$, both embedded in $\mathbb{R}^d$, and two finite element spaces $V^0(\Omega^0) = \text{span}\{v_i\}_{i=0}^n$ and $V^1(\Omega^1) = \text{span}\{w_\alpha\}_{\alpha=0}^m$, compute the matrix

    -\[
+<picture><source srcset=\[
 M_{i\alpha} \dealcoloneq \int_{\Omega^0} \int_{\Omega^1}
 v_i(x) K^{\epsilon}(x-y) w_\alpha(y) dx \ dy,
 \quad i \in [0,n), \alpha \in [0,m),
-\] +\]" src="form_2020.png"/>

    -

    where $V^0(\Omega^0)$ is the finite element space associated with the dh0 passed to this function (or part of it, if specified in comps0), while $V^1(\Omega^1)$ is the finite element space associated with the dh1 passed to this function (or part of it, if specified in comps1), and $K^\epsilon$ is a function derived from CutOffFunctionBase with compact support included in a ball of radius $\epsilon$.

    +

    where $V^0(\Omega^0)$ is the finite element space associated with the dh0 passed to this function (or part of it, if specified in comps0), while $V^1(\Omega^1)$ is the finite element space associated with the dh1 passed to this function (or part of it, if specified in comps1), and $K^\epsilon$ is a function derived from CutOffFunctionBase with compact support included in a ball of radius $\epsilon$.

    The corresponding sparsity patterns can be computed by calling the make_coupling_sparsity_pattern() function.

    The comps0 and comps1 masks are assumed to be ordered in the same way: the first component of comps0 will couple with the first component of comps1, the second with the second, and so on. If one of the two masks has more active components than the other, then the excess components will be ignored.

    For both spaces, it is possible to specify a custom Mapping, which defaults to StaticMappingQ1 for both.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceNonMatching_1_1internal_1_1QuadratureGeneratorImplementation.html differs (JavaScript source, ASCII text, with very long lines (1443)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceNonMatching_1_1internal_1_1QuadratureGeneratorImplementation.html 2023-11-25 15:26:00.736539537 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceNonMatching_1_1internal_1_1QuadratureGeneratorImplementation.html 2023-11-25 15:26:00.736539537 +0100 @@ -296,7 +296,7 @@
    -

    Returns the max/min bounds on the value, taken over all the entries in the incoming vector of FunctionBounds. That is, given the incoming function bounds, $[L_j, U_j]$, this function returns $[L, U]$, where $L = \min_{j} L_j$ and $U = \max_{j} U_j$.

    +

    Returns the max/min bounds on the value, taken over all the entries in the incoming vector of FunctionBounds. That is, given the incoming function bounds, $[L_j, U_j]$, this function returns $[L, U]$, where $L = \min_{j} L_j$ and $U = \max_{j} U_j$.

    Definition at line 201 of file quadrature_generator.cc.

    @@ -318,21 +318,21 @@
    -

    Finds the best choice of height function direction, given the FunctionBounds for a number of functions $\{\psi_j\}_{j=0}^{n-1}$. Here, "best" is meant in the sense of the implicit function theorem.

    -

    Let $J_I$ be the index set of the indefinite functions:

    -

    $J_I = \{0,..., n - 1\} \setminus \{ j : |\psi_j| > 0 \}$.

    -

    This function converts the incoming bounds to a lower bound, $L_{ij}$, on the absolute value of each component of the gradient:

    -

    $|\partial_k \psi_j| > L_{jk}$.

    -

    and then returns a coordinate direction, $i$, and a lower bound $L$, such that

    +

    Finds the best choice of height function direction, given the FunctionBounds for a number of functions $\{\psi_j\}_{j=0}^{n-1}$. Here, "best" is meant in the sense of the implicit function theorem.

    +

    Let $J_I$ be the index set of the indefinite functions:

    +

    $J_I = \{0,..., n - 1\} \setminus \{ j : |\psi_j| > 0 \}$.

    +

    This function converts the incoming bounds to a lower bound, $L_{ij}$, on the absolute value of each component of the gradient:

    +

    $|\partial_k \psi_j| > L_{jk}$.

    +

    and then returns a coordinate direction, $i$, and a lower bound $L$, such that

    -\[
+<picture><source srcset=\[
 i = \arg \max_{k} \min_{j \in J_I} L_{jk}, \\
 L =      \max_{k} \min_{j \in J_I} L_{jk}.
-\] +\]" src="form_2134.png"/>

    -

    This means $i$ is a coordinate direction such that all functions intersected by the zero contour (i.e. those belonging to $J_I$) fulfill

    -

    $|\partial_i \psi_j| > L$.

    -

    Note that the estimated lower bound, $L$, can be zero or negative. This means that no suitable height function direction exists. If all of the incoming functions are positive or negative definite the returned std::optional is non-set.

    +

    This means $i$ is a coordinate direction such that all functions intersected by the zero contour (i.e. those belonging to $J_I$) fulfill

    +

    $|\partial_i \psi_j| > L$.

    +

    Note that the estimated lower bound, $L$, can be zero or negative. This means that no suitable height function direction exists. If all of the incoming functions are positive or negative definite the returned std::optional is non-set.

    Definition at line 275 of file quadrature_generator.cc.

    @@ -422,7 +422,7 @@
    -

    Given the incoming lower and upper bounds on the value of a function $[L, U]$, return the minimum/maximum of $[L, U]$ and the function values at the vertices. That is, this function returns

    +

    Given the incoming lower and upper bounds on the value of a function $[L, U]$, return the minimum/maximum of $[L, U]$ and the function values at the vertices. That is, this function returns

    $[\min(L, L_f), \max(U, U_f)]$,

    where $L_f = \min_{v} f(x_v)$, $U_f = \max_{v} f(x_v)|$, and $x_v$ is a vertex.

    It is assumed that the incoming function is scalar valued.

    @@ -519,7 +519,7 @@
    -

    Return a lower bound, $L_a$, on the absolute value of a function, $f(x)$:

    +

    Return a lower bound, $L_a$, on the absolute value of a function, $f(x)$:

    $L_a \leq |f(x)|$,

    by estimating it from the incoming lower and upper bounds: $L \leq f(x) \leq U$.

    By rewriting the lower and upper bounds as $F - C \leq f(x) \leq F + C$, where $L = F - C$, $U = F + C$ (or $F = (U + L)/2$, $C = (U - L)/2$), we get $|f(x) - F| \leq C$. Using the inverse triangle inequality gives $|F| - |f(x)| \leq |f(x) - F| \leq C$. Thus, $L_a = |F| - C$.

    @@ -742,7 +742,7 @@
    -

    Let $\{ y_0, ..., y_{n+1} \}$ be such that $[y_0, y_{n+1}]$ is the interval and $\{ y_1, ..., y_n \}$ are the roots. In each subinterval, $[y_i, y_{i+1}]$, distribute point according to the 1D-quadrature rule $\{(x_q, w_q)\}_q$ (quadrature1D). Take the tensor product with the quadrature point $(x, w)$ (point, weight) to create dim-dimensional quadrature points

    +

    Let $\{ y_0, ..., y_{n+1} \}$ be such that $[y_0, y_{n+1}]$ is the interval and $\{ y_1, ..., y_n \}$ are the roots. In each subinterval, $[y_i, y_{i+1}]$, distribute point according to the 1D-quadrature rule $\{(x_q, w_q)\}_q$ (quadrature1D). Take the tensor product with the quadrature point $(x, w)$ (point, weight) to create dim-dimensional quadrature points

    \[
 X_q = x_I \times (y_i + (y_{i+1} - y_i) x_q),
 W_q = w_I (y_{i+1} - y_i) w_q,
@@ -843,7 +843,7 @@
       </table>
 </div><div class=

    Return the coordinate direction that the box should be split in, assuming that the box should be split it half.

    -

    If the box is larger in one coordante direction, this direction is returned. If the box have the same extent in all directions, we choose the coordinate direction which is closest to being a height-function direction. That is, the direction $i$ that has a least negative estimate of $|\partial_i \psi_j|$. As a last resort, we choose the direction 0, if height_direction_data non-set.

    +

    If the box is larger in one coordante direction, this direction is returned. If the box have the same extent in all directions, we choose the coordinate direction which is closest to being a height-function direction. That is, the direction $i$ that has a least negative estimate of $|\partial_i \psi_j|$. As a last resort, we choose the direction 0, if height_direction_data non-set.

    Definition at line 995 of file quadrature_generator.cc.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceOpenCASCADE.html differs (JavaScript source, ASCII text, with very long lines (1087)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceOpenCASCADE.html 2023-11-25 15:26:00.753205865 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceOpenCASCADE.html 2023-11-25 15:26:00.753205865 +0100 @@ -643,7 +643,7 @@ const Mapping< 2, spacedim > &&#href_anchor"paramname">mapping = StaticMappingQ1<2,&#href_anchor"memdoc">

    Given a Triangulation and an optional Mapping, create a vector of smooth curves that interpolate the connected parts of the boundary vertices of the Triangulation and return them as a vector of TopoDS_Edge objects.

    -

    This function constructs closed Bspline curve objects passing through all vertices of the boundary of the triangulation, with $C^2$ Continuity on each vertex except the first, where only $C^1$ continuity is guaranteed.

    +

    This function constructs closed Bspline curve objects passing through all vertices of the boundary of the triangulation, with $C^2$ Continuity on each vertex except the first, where only $C^1$ continuity is guaranteed.

    The returned curves are ordered with respect to the indices of the faces that make up the triangulation boundary, i.e., the first curve is the one extracted starting from the face with the lowest index, and so on.

    Parameters
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceParticles_1_1Utilities.html differs (JavaScript source, ASCII text, with very long lines (971)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceParticles_1_1Utilities.html 2023-11-25 15:26:00.766538927 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceParticles_1_1Utilities.html 2023-11-25 15:26:00.766538927 +0100 @@ -139,12 +139,12 @@
    const ComponentMask &&#href_anchor"paramname">space_comps = ComponentMask()&#href_anchor"memdoc">

    Create an interpolation sparsity pattern for particles.

    -

    Given a triangulation representing the domain $\Omega$, a particle handler of particles in $\Omega$, and a scalar finite element space $V(\Omega) = \text{span}\{v_j\}_{j=0}^n$, compute the sparsity pattern that would be necessary to assemble the matrix

    +

    Given a triangulation representing the domain $\Omega$, a particle handler of particles in $\Omega$, and a scalar finite element space $V(\Omega) = \text{span}\{v_j\}_{j=0}^n$, compute the sparsity pattern that would be necessary to assemble the matrix

    \[
 M_{i,j} \dealcoloneq v_j(x_i) ,
 \]

    -

    where $V(\Omega)$ is the finite element space associated with the space_dh, and the index i is given by the particle id whose position is x_i.

    +

    where $V(\Omega)$ is the finite element space associated with the space_dh, and the index i is given by the particle id whose position is x_i.

    In the case of vector valued finite element spaces, the components on which interpolation must be performed can be selected using a component mask. Only primitive finite element spaces are supported.

    When selecting more than one component, the resulting sparsity will have dimension equal to particle_handler.n_global_particles() * mask.n_selected_components() times space_dh.n_dofs(), and the corresponding matrix entries are given by

    \[
@@ -152,8 +152,8 @@
 \]

    where comp_j is the only non zero component of the vector valued basis function v_j (equal to fe.system_to_component_index(j).first), k corresponds to its index within the selected components of the mask, and $e_{comp_j}$ is the unit vector in the direction comp_j.

    -

    The sparsity is filled by locating the position of the particle with index i within the particle handler with respect to the embedding triangulation $\Omega$, and coupling it with all the local degrees of freedom specified in the component mask space_comps, following the ordering in which they are selected in the mask space_comps.

    -

    If a particle does not fall within $\Omega$, it is ignored, and the corresponding rows of the sparsity will be empty.

    +

    The sparsity is filled by locating the position of the particle with index i within the particle handler with respect to the embedding triangulation $\Omega$, and coupling it with all the local degrees of freedom specified in the component mask space_comps, following the ordering in which they are selected in the mask space_comps.

    +

    If a particle does not fall within $\Omega$, it is ignored, and the corresponding rows of the sparsity will be empty.

    Constraints of the form supported by the AffineConstraints class may be supplied with the constraints argument. The method AffineConstraints::add_entries_local_to_global() is used to fill the final sparsity pattern.

    Definition at line 32 of file utilities.cc.

    @@ -191,12 +191,12 @@
    const ComponentMask &&#href_anchor"paramname">space_comps = ComponentMask()&#href_anchor"memdoc">

    Create an interpolation matrix for particles.

    -

    Given a triangulation representing the domains $\Omega$, a particle handler of particles in $\Omega$, and a scalar finite element space $V(\Omega) = \text{span}\{v_j\}_{j=0}^n$, compute the matrix

    +

    Given a triangulation representing the domains $\Omega$, a particle handler of particles in $\Omega$, and a scalar finite element space $V(\Omega) = \text{span}\{v_j\}_{j=0}^n$, compute the matrix

    \[
 M_{ij} \dealcoloneq v_j(x_i) ,
 \]

    -

    where $V(\Omega)$ is the finite element space associated with the space_dh, and the index i is given by the particle id whose position is x_i.

    +

    where $V(\Omega)$ is the finite element space associated with the space_dh, and the index i is given by the particle id whose position is x_i.

    In the case of vector valued finite element spaces, the components on which interpolation must be performed can be selected using a component mask. Only primitive finite element spaces are supported.

    When selecting more than one component, the resulting sparsity will have dimension equal to particle_handler.n_global_particles() * mask.n_selected_components() times space_dh.n_dofs(), and the corresponding matrix entries are given by

    \[
@@ -204,8 +204,8 @@
 \]

    where comp_j is the only non zero component of the vector valued basis function v_j (equal to fe.system_to_component_index(j).first), k corresponds to its index within the selected components of the mask, and $e_{comp_j}$ is the unit vector in the direction comp_j.

    -

    The matrix is filled by locating the position of the particle with index i within the particle handler with respect to the embedding triangulation $\Omega$, and coupling it with all the local degrees of freedom specified in the component mask space_comps, following the ordering in which they are selected in the mask space_comps.

    -

    If a particle does not fall within $\Omega$, it is ignored, and the corresponding rows of the matrix will be zero.

    +

    The matrix is filled by locating the position of the particle with index i within the particle handler with respect to the embedding triangulation $\Omega$, and coupling it with all the local degrees of freedom specified in the component mask space_comps, following the ordering in which they are selected in the mask space_comps.

    +

    If a particle does not fall within $\Omega$, it is ignored, and the corresponding rows of the matrix will be zero.

    Constraints of the form supported by the AffineConstraints class may be supplied with the constraints argument. The method AffineConstraints::distribute_local_to_global() is used to distribute the entries of the matrix to respect the given constraints.

    Definition at line 114 of file utilities.cc.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespacePhysics_1_1Elasticity_1_1Kinematics.html differs (JavaScript source, ASCII text, with very long lines (808)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespacePhysics_1_1Elasticity_1_1Kinematics.html 2023-11-25 15:26:00.776538722 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespacePhysics_1_1Elasticity_1_1Kinematics.html 2023-11-25 15:26:00.779871989 +0100 @@ -288,10 +288,10 @@

    Return the symmetric Green-Lagrange strain tensor, as constructed from the deformation gradient tensor F. The result is expressed as

    -\[
+<picture><source srcset=\[
  \mathbf{E} \dealcoloneq \frac{1}{2}
  \left[ \mathbf{F}^{T}\cdot\mathbf{F} - \mathbf{I} \right] \, .
-\] +\]" src="form_2435.png"/>

    Note
    For a discussion of the background of this function, see P. Wriggers: "Nonlinear finite element methods" (2008), and in particular formula (3.15) on p. 23 (or thereabouts).
    @@ -316,12 +316,12 @@

    Return the symmetric small strain tensor, as constructed from the displacement gradient tensor Grad_u. The result is expressed as

    -\[
+<picture><source srcset=\[
  \boldsymbol{\varepsilon} \dealcoloneq \frac{1}{2}
  \left[ \nabla_{0}\mathbf{u} + [\nabla_{0}\mathbf{u}]^{T} \right] \, .
-\] +\]" src="form_2436.png"/>

    -

    where $\mathbf{u} = \mathbf{u}(\mathbf{X})$ is the displacement at position $\mathbf{X}$ in the referential configuration. The differential operator $\nabla_{0}$ is defined as $\frac{\partial}{\partial \mathbf{X}}$.

    +

    where $\mathbf{u} = \mathbf{u}(\mathbf{X})$ is the displacement at position $\mathbf{X}$ in the referential configuration. The differential operator $\nabla_{0}$ is defined as $\frac{\partial}{\partial \mathbf{X}}$.

    Note
    For a discussion of the background of this function, see P. Wriggers: "Nonlinear finite element methods" (2008), and in particular formula (3.17) on p. 24 (or thereabouts).
    @@ -343,10 +343,10 @@

    Return the symmetric Almansi strain tensor, as constructed from the deformation gradient tensor F. The result is expressed as

    -\[
+<picture><source srcset=\[
  \mathbf{e} \dealcoloneq \frac{1}{2} \left[ \mathbf{I}
   - \mathbf{F}^{-T}\cdot\mathbf{F}^{-1} \right] \, .
-\] +\]" src="form_2438.png"/>

    Note
    For a discussion of the background of this function, see P. Wriggers: "Nonlinear finite element methods" (2008), and in particular formula (3.35) on p. 30 (or thereabouts).
    @@ -380,9 +380,9 @@

    Return the spatial velocity gradient tensor, as constructed from the deformation gradient tensor F and its material time derivative dF_dt (the material velocity gradient). The result is expressed as

    -\[
+<picture><source srcset=\[
  \mathbf{l} \dealcoloneq \dot{\mathbf{F}}\cdot\mathbf{F}^{-1} \, .
-\] +\]" src="form_2439.png"/>

    Note
    For a discussion of the background of this function, see P. Wriggers: "Nonlinear finite element methods" (2008), and in particular formula (3.47) on p. 32 (or thereabouts).
    @@ -416,15 +416,15 @@

    Return the rate of deformation tensor (also known as the rate of strain tensor), as constructed from the deformation gradient tensor F and its material time derivative dF_dt (the material velocity gradient). The result is expressed as

    -\[
+<picture><source srcset=\[
  \mathbf{d} \dealcoloneq \frac{1}{2}
  \left[ \mathbf{l} + \mathbf{l}^{T} \right]
-\] +\]" src="form_2440.png"/>

    where

    -\[
+<picture><source srcset=\[
  \mathbf{l} = \dot{\mathbf{F}}\cdot\mathbf{F}^{-1}
-\] +\]" src="form_2441.png"/>

    is the spatial velocity gradient tensor.

    Note
    For a discussion of the background of this function, see P. Wriggers: "Nonlinear finite element methods" (2008), and in particular formula (3.49) on p. 32 (or thereabouts).
    @@ -459,15 +459,15 @@

    Return the rate of rotation tensor (also known as the vorticity tensor), as constructed from the deformation gradient tensor F and its material time derivative dF_dt (the material velocity gradient). The result is expressed as

    -\[
+<picture><source srcset=\[
  \mathbf{w} \dealcoloneq \frac{1}{2}
  \left[ \mathbf{l} - \mathbf{l}^{T} \right]
-\] +\]" src="form_2442.png"/>

    where

    -\[
+<picture><source srcset=\[
  \mathbf{l} = \dot{\mathbf{F}}\cdot\mathbf{F}^{-1}
-\] +\]" src="form_2441.png"/>

    is the spatial velocity gradient tensor.

    Note
    For a discussion of the background of this function, see G. A. Holzapfel: "Nonlinear solid mechanics. A Continuum Approach for Engineering" (2007), and in particular formula (2.149) on p. 97 (or thereabouts).
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespacePhysics_1_1Notation_1_1Kelvin.html differs (JavaScript source, ASCII text, with very long lines (1796)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespacePhysics_1_1Notation_1_1Kelvin.html 2023-11-25 15:26:00.796538314 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespacePhysics_1_1Notation_1_1Kelvin.html 2023-11-25 15:26:00.796538314 +0100 @@ -166,8 +166,8 @@ TensorType to_tensor (const FullMatrix< Number > &vec) &#href_anchor"details" id="details">

    Detailed Description

    A namespace with functions that assist in the conversion of vectors and tensors to and from a compressed format using Kelvin notation and weighting.

    -

    Both Kelvin and Voigt notation adopt the same indexing convention. With specific reference to the spatial dimension 3 case, for a rank-2 symmetric tensor $\mathbf{S}$ we enumerate its tensor components

    -\[
+<p>Both <a class=Kelvin and Voigt notation adopt the same indexing convention. With specific reference to the spatial dimension 3 case, for a rank-2 symmetric tensor $\mathbf{S}$ we enumerate its tensor components

    +\[
 \mathbf{S} \dealcoloneq \left[ \begin{array}{ccc}
  S_{00}          & S_{01}          & S_{02} \\
  S_{10} = S_{01} & S_{11}          & S_{12} \\
@@ -179,10 +179,10 @@
  sym   & n = 1 & n = 3 \\
  sym   & sym   & n = 2
 \end{array} \right] ,
-\] +\]" src="form_2465.png"/>

    -

    where $n$ denotes the Kelvin index for the tensor component, while for a general rank-2 tensor $\mathbf{T}$

    -\[
+<p> where <picture><source srcset=$n$ denotes the Kelvin index for the tensor component, while for a general rank-2 tensor $\mathbf{T}$

    +\[
 \mathbf{T} \dealcoloneq \left[ \begin{array}{ccc}
  T_{00} & T_{01} & T_{02} \\
  T_{10} & T_{11} & T_{12} \\
@@ -194,10 +194,10 @@
  n = 6 & n = 1 & n = 3 \\
  n = 7 & n = 8 & n = 2
 \end{array}\right] ,
-\] +\]" src="form_2467.png"/>

    -

    and for a rank-1 tensor $\mathbf{v}$

    -\[
+<p> and for a rank-1 tensor <picture><source srcset=$\mathbf{v}$

    +\[
 \mathbf{v} \dealcoloneq \left[ \begin{array}{c}
  v_{0} \\ v_{1} \\ v_{2}
 \end{array}\right]
@@ -205,7 +205,7 @@
 \left[ \begin{array}{c}
  n = 0 \\ n = 1 \\ n = 2
 \end{array}\right] .
-\] +\]" src="form_2469.png"/>

    To summarize, the relationship between tensor and Kelvin indices for both the three-dimensional case and the analogously discerned two-dimensional case outlined in the following table:

    @@ -247,23 +247,23 @@
    -

    To illustrate the purpose of this notation, consider the rank-2 symmetric tensors $\mathbf{S}$ and $\mathbf{E}$ that are related to one another by $\mathbf{S} = \cal{C} : \mathbf{E}$, where the operator $\cal{C}$ is a fourth-order symmetric tensor. As opposed to the commonly used Voigt notation, Kelvin (or Mandel) notation keeps the same definition of the inner product $\mathbf{S} : \mathbf{E}$ when both $\mathbf{S}$ and $\mathbf{E}$ are symmetric. In general, the inner product of all symmetric and general tensors remain the same regardless of the notation with which it is represented.

    +

    To illustrate the purpose of this notation, consider the rank-2 symmetric tensors $\mathbf{S}$ and $\mathbf{E}$ that are related to one another by $\mathbf{S} = \cal{C} : \mathbf{E}$, where the operator $\cal{C}$ is a fourth-order symmetric tensor. As opposed to the commonly used Voigt notation, Kelvin (or Mandel) notation keeps the same definition of the inner product $\mathbf{S} : \mathbf{E}$ when both $\mathbf{S}$ and $\mathbf{E}$ are symmetric. In general, the inner product of all symmetric and general tensors remain the same regardless of the notation with which it is represented.

    To achieve these two properties, namely that

    -\[
+<picture><source srcset=\[
 \mathbf{S} = \cal{C} : \mathbf{E}
 \quad \Rightarrow   \quad
 \tilde{\mathbf{S}} = \tilde{\cal{C}} \; \tilde{\mathbf{E}}
-\] +\]" src="form_2474.png"/>

    and

    -\[
+<picture><source srcset=\[
 \mathbf{S} : \mathbf{E}
 \, \equiv \,
 \tilde{\mathbf{S}} \cdot \tilde{\mathbf{E}} ,
-\] +\]" src="form_2475.png"/>

    -

    it holds that the Kelvin-condensed equivalents of the previously defined symmetric tensors, indicated by the $\tilde{\left(\bullet\right)}$, must be defined as

    -\[
+<p> it holds that the Kelvin-condensed equivalents of the previously defined symmetric tensors, indicated by the <picture><source srcset=$\tilde{\left(\bullet\right)}$, must be defined as

    +\[
 \tilde{\mathbf{S}}
   = \left[ \begin{array}{c}
   S_{00} \\ S_{11} \\ S_{22} \\ \sqrt{2} S_{12} \\ \sqrt{2} S_{02} \\
@@ -272,10 +272,10 @@
   = \left[ \begin{array}{c}
   E_{00} \\ E_{11} \\ E_{22} \\ \sqrt{2} E_{12} \\ \sqrt{2} E_{02} \\
 \sqrt{2} E_{01} \end{array}\right] .
-\] +\]" src="form_2477.png"/>

    The corresponding and consistent condensed fourth-order symmetric tensor is

    -\[
+<picture><source srcset=\[
 \tilde{\cal{C}}
   = \left[ \begin{array}{cccccc}
   \tilde{\cal{C}}_{00} & \tilde{\cal{C}}_{01} & \tilde{\cal{C}}_{02} &
@@ -310,10 +310,10 @@
 {\cal{C}}_{0201}        \\ \sqrt{2} {\cal{C}}_{0100}  & \sqrt{2}
 {\cal{C}}_{0111} & \sqrt{2} {\cal{C}}_{0122}  & 2 {\cal{C}}_{0112} & 2
 {\cal{C}}_{0102}         & 2 {\cal{C}}_{0101} \end{array}\right] .
-\] +\]" src="form_2478.png"/>

    -

    The mapping from the two Kelvin indices of the FullMatrix $\tilde{\cal{C}}$ to the rank-4 SymmetricTensor $\cal{C}$ can be inferred using the table shown above.

    -

    An important observation is that both the left-hand side tensor $\tilde{\mathbf{S}}$ and right-hand side tensor $\tilde{\mathbf{E}}$ have the same form; this is a property that is not present in Voigt notation. The various factors introduced into $\tilde{\mathbf{S}}$, $\tilde{\mathbf{E}}$ and $\tilde{\cal{C}}$ account for the symmetry of the tensors. The Kelvin description of their non-symmetric counterparts include no such factors.

    +

    The mapping from the two Kelvin indices of the FullMatrix $\tilde{\cal{C}}$ to the rank-4 SymmetricTensor $\cal{C}$ can be inferred using the table shown above.

    +

    An important observation is that both the left-hand side tensor $\tilde{\mathbf{S}}$ and right-hand side tensor $\tilde{\mathbf{E}}$ have the same form; this is a property that is not present in Voigt notation. The various factors introduced into $\tilde{\mathbf{S}}$, $\tilde{\mathbf{E}}$ and $\tilde{\cal{C}}$ account for the symmetry of the tensors. The Kelvin description of their non-symmetric counterparts include no such factors.

    Some useful references that show how this notation works include, amongst others,

    @article{Nagel2016,
    author = {Nagel, T. and G{\"o}rke, U-J. and Moerman, K. and Kolditz,
    O.},
    @@ -395,7 +395,7 @@

    Convert a rank-1 tensor to its compressed vector equivalent.

    -

    The output vector has $dim$ entries.

    +

    The output vector has $dim$ entries.

    @@ -500,7 +500,7 @@

    Convert a rank-1 tensor to its compressed matrix equivalent.

    -

    The output matrix will have $dim$ rows and one column.

    +

    The output matrix will have $dim$ rows and one column.

    @@ -521,7 +521,7 @@

    Convert a rank-2 tensor to its compressed matrix equivalent.

    -

    The output matrix will have $dim$ rows and $dim$ columns.

    +

    The output matrix will have $dim$ rows and $dim$ columns.

    @@ -542,7 +542,7 @@

    Convert a rank-2 symmetric tensor to its compressed matrix equivalent.

    -

    The output matrix will have $dim$ rows and $dim$ columns, with the same format as the equivalent function for non-symmetric tensors. This is because it is not possible to compress the SymmetricTensor<2,dim>::n_independent_components unique entries into a square matrix.

    +

    The output matrix will have $dim$ rows and $dim$ columns, with the same format as the equivalent function for non-symmetric tensors. This is because it is not possible to compress the SymmetricTensor<2,dim>::n_independent_components unique entries into a square matrix.

    @@ -578,7 +578,7 @@
    Definition: full_matrix.h:79
    Definition: tensor.h:516
    -

    the matrix mtrx_1 will have $dim \times dim$ rows and $dim$ columns (i.e. size Tensor<2,dim>::n_independent_components $\times$ Tensor<1,dim>::n_independent_components), while those of the matrix mtrx_2 will have $dim$ rows and $(dim \times dim + dim)/2$ columns (i.e. size Tensor<1,dim>::n_independent_components $\times$ SymmetricTensor<2,dim>::n_independent_components), as it is assumed that the entries corresponding to the alternation of the second and third indices are equal. That is to say that r3_symm_tnsr[i][j][k] == r3_symm_tnsr[i][k][j].

    +

    the matrix mtrx_1 will have $dim \times dim$ rows and $dim$ columns (i.e. size Tensor<2,dim>::n_independent_components $\times$ Tensor<1,dim>::n_independent_components), while those of the matrix mtrx_2 will have $dim$ rows and $(dim \times dim + dim)/2$ columns (i.e. size Tensor<1,dim>::n_independent_components $\times$ SymmetricTensor<2,dim>::n_independent_components), as it is assumed that the entries corresponding to the alternation of the second and third indices are equal. That is to say that r3_symm_tnsr[i][j][k] == r3_symm_tnsr[i][k][j].

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespacePhysics_1_1Transformations.html differs (JavaScript source, ASCII text, with very long lines (1260)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespacePhysics_1_1Transformations.html 2023-11-25 15:26:00.809871379 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespacePhysics_1_1Transformations.html 2023-11-25 15:26:00.809871379 +0100 @@ -121,7 +121,7 @@ &#href_anchor"details" id="details">

    Detailed Description

    A collection of operations to assist in the transformation of tensor quantities from the reference to spatial configuration, and vice versa. These types of transformation are typically used to re-express quantities measured or computed in one configuration in terms of a second configuration.

    Notation

    -

    We will use the same notation for the coordinates $\mathbf{X}, \mathbf{x}$, transformations $\varphi$, differential operator $\nabla_{0}$ and deformation gradient $\mathbf{F}$ as discussed for namespace Physics::Elasticity.

    +

    We will use the same notation for the coordinates $\mathbf{X}, \mathbf{x}$, transformations $\varphi$, differential operator $\nabla_{0}$ and deformation gradient $\mathbf{F}$ as discussed for namespace Physics::Elasticity.

    As a further point on notation, we will follow Holzapfel (2007) and denote the push forward transformation as $\chi\left(\bullet\right)$ and the pull back transformation as $\chi^{-1}\left(\bullet\right)$. We will also use the annotation $\left(\bullet\right)^{\sharp}$ to indicate that a tensor $\left(\bullet\right)$ is a contravariant tensor, and $\left(\bullet\right)^{\flat}$ that it is covariant. In other words, these indices do not actually change the tensor, they just indicate the kind of object a particular tensor is.

    Note
    For these transformations, unless otherwise stated, we will strictly assume that all indices of the transformed tensors derive from one coordinate system; that is to say that they are not multi-point tensors (such as the Piola stress in elasticity).

    Function Documentation

    @@ -150,24 +150,24 @@
    -

    Return the result of applying Nanson's formula for the transformation of the material surface area element $d\mathbf{A}$ to the current surfaces area element $d\mathbf{a}$ under the nonlinear transformation map $\mathbf{x} = \boldsymbol{\varphi} \left( \mathbf{X} \right)$.

    +

    Return the result of applying Nanson's formula for the transformation of the material surface area element $d\mathbf{A}$ to the current surfaces area element $d\mathbf{a}$ under the nonlinear transformation map $\mathbf{x} = \boldsymbol{\varphi} \left( \mathbf{X} \right)$.

    The returned result is the spatial normal scaled by the ratio of areas between the reference and spatial surface elements, i.e.

    -\[
+<picture><source srcset=\[
  \mathbf{n} \frac{da}{dA}
  \dealcoloneq \textrm{det} \mathbf{F} \, \mathbf{F}^{-T} \cdot \mathbf{N}
  = \textrm{cof} \mathbf{F} \cdot \mathbf{N} \, .
-\] +\]" src="form_2503.png"/>

    Parameters
    - - + +
    [in]NThe referential normal unit vector $\mathbf{N}$
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
-\mathbf{X} \right)$
    [in]NThe referential normal unit vector $\mathbf{N}$
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
+\mathbf{X} \right)$
    -
    Returns
    The scaled spatial normal vector $\mathbf{n}
-\frac{da}{dA}$
    +
    Returns
    The scaled spatial normal vector $\mathbf{n}
+\frac{da}{dA}$
    Note
    For a discussion of the background of this function, see G. A. Holzapfel: "Nonlinear solid mechanics. A Continuum Approach for Engineering" (2007), and in particular formula (2.55) on p. 75 (or thereabouts).
    For a discussion of the background of this function, see P. Wriggers: "Nonlinear finite element methods" (2008), and in particular formula (3.11) on p. 23 (or thereabouts).
    @@ -200,18 +200,18 @@

    Return a vector with a changed basis, i.e.

    -\[
+<picture><source srcset=\[
  \mathbf{V}^{\prime} \dealcoloneq \mathbf{B} \cdot \mathbf{V}
-\] +\]" src="form_2506.png"/>

    Parameters
    - - + +
    [in]VThe vector to be transformed $\mathbf{V}$
    [in]BThe transformation matrix $\mathbf{B}$
    [in]VThe vector to be transformed $\mathbf{V}$
    [in]BThe transformation matrix $\mathbf{B}$
    -
    Returns
    $\mathbf{V}^{\prime}$
    +
    Returns
    $\mathbf{V}^{\prime}$
    @@ -241,19 +241,19 @@

    Return a rank-2 tensor with a changed basis, i.e.

    -\[
+<picture><source srcset=\[
  \mathbf{T}^{\prime} \dealcoloneq \mathbf{B} \cdot \mathbf{T} \cdot
 \mathbf{B}^{T}
-\] +\]" src="form_2508.png"/>

    Parameters
    - - + +
    [in]TThe tensor to be transformed $\mathbf{T}$
    [in]BThe transformation matrix $\mathbf{B}$
    [in]TThe tensor to be transformed $\mathbf{T}$
    [in]BThe transformation matrix $\mathbf{B}$
    -
    Returns
    $\mathbf{T}^{\prime}$
    +
    Returns
    $\mathbf{T}^{\prime}$
    @@ -283,19 +283,19 @@

    Return a symmetric rank-2 tensor with a changed basis, i.e.

    -\[
+<picture><source srcset=\[
  \mathbf{T}^{\prime} \dealcoloneq \mathbf{B} \cdot \mathbf{T} \cdot
 \mathbf{B}^{T}
-\] +\]" src="form_2508.png"/>

    Parameters
    - - + +
    [in]TThe tensor to be transformed $\mathbf{T}$
    [in]BThe transformation matrix $\mathbf{B}$
    [in]TThe tensor to be transformed $\mathbf{T}$
    [in]BThe transformation matrix $\mathbf{B}$
    -
    Returns
    $\mathbf{T}^{\prime}$
    +
    Returns
    $\mathbf{T}^{\prime}$
    @@ -325,18 +325,18 @@

    Return a rank-4 tensor with a changed basis, i.e. (in index notation):

    -\[
+<picture><source srcset=\[
  H_{ijkl}^{\prime} \dealcoloneq B_{iI} B_{jJ} H_{IJKL} B_{kK} B_{lL}
-\] +\]" src="form_2510.png"/>

    Parameters
    - - + +
    [in]HThe tensor to be transformed $\mathbf{T}$
    [in]BThe transformation matrix $\mathbf{B}$
    [in]HThe tensor to be transformed $\mathbf{T}$
    [in]BThe transformation matrix $\mathbf{B}$
    -
    Returns
    $\mathbf{H}^{\prime}$
    +
    Returns
    $\mathbf{H}^{\prime}$
    @@ -366,18 +366,18 @@

    Return a symmetric rank-4 tensor with a changed basis, i.e. (in index notation):

    -\[
+<picture><source srcset=\[
  H_{ijkl}^{\prime} \dealcoloneq B_{iI} B_{jJ} H_{IJKL} B_{kK} B_{lL}
-\] +\]" src="form_2510.png"/>

    Parameters
    - - + +
    [in]HThe tensor to be transformed $\mathbf{T}$
    [in]BThe transformation matrix $\mathbf{B}$
    [in]HThe tensor to be transformed $\mathbf{T}$
    [in]BThe transformation matrix $\mathbf{B}$
    -
    Returns
    $\mathbf{H}^{\prime}$
    +
    Returns
    $\mathbf{H}^{\prime}$
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Contravariant.html differs (JavaScript source, ASCII text, with very long lines (587)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Contravariant.html 2023-11-25 15:26:00.823204440 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Contravariant.html 2023-11-25 15:26:00.823204440 +0100 @@ -118,16 +118,16 @@ &#href_anchor"memitem:af70b1a5907ac2a88ab2a053dfb055dbe">template<int dim, typename Number > SymmetricTensor< 4, dim, Number >&#href_anchor"memTemplItemRight" valign="bottom">pull_back (const SymmetricTensor< 4, dim, Number > &h, const Tensor< 2, dim, Number > &F) &#href_anchor"details" id="details">

    Detailed Description

    -

    Transformation of tensors that are defined in terms of a set of contravariant bases. Rank-1 and rank-2 contravariant tensors $\left(\bullet\right)^{\sharp} = \mathbf{T}$ (and its spatial counterpart $\mathbf{t}$) typically satisfy the relation

    -\[
+<div class=

    Transformation of tensors that are defined in terms of a set of contravariant bases. Rank-1 and rank-2 contravariant tensors $\left(\bullet\right)^{\sharp} = \mathbf{T}$ (and its spatial counterpart $\mathbf{t}$) typically satisfy the relation

    +\[
    \int_{V_{0}} \nabla_{0} \cdot \mathbf{T} \; dV
      = \int_{\partial V_{0}} \mathbf{T} \cdot \mathbf{N} \; dA
      = \int_{\partial V_{t}} \mathbf{T} \cdot \mathbf{n} \; da
      = \int_{V_{t}} \nabla \cdot \mathbf{t} \; dv
-\] +\]" src="form_2486.png"/>

    -

    where $V_{0}$ and $V_{t}$ are respectively control volumes in the reference and spatial configurations, and their surfaces $\partial
-V_{0}$ and $\partial V_{t}$ have the outwards facing normals $\mathbf{N}$ and $\mathbf{n}$.

    +

    where $V_{0}$ and $V_{t}$ are respectively control volumes in the reference and spatial configurations, and their surfaces $\partial
+V_{0}$ and $\partial V_{t}$ have the outwards facing normals $\mathbf{N}$ and $\mathbf{n}$.

    Function Documentation

    ◆ push_forward() [1/5]

    @@ -155,20 +155,20 @@

    Return the result of the push forward transformation on a contravariant vector, i.e.

    -\[
+<picture><source srcset=\[
  \chi\left(\bullet\right)^{\sharp}
    \dealcoloneq \mathbf{F} \cdot \left(\bullet\right)^{\sharp}
-\] +\]" src="form_2517.png"/>

    Parameters
    - +
    [in]VThe (referential) vector to be operated on
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
-\mathbf{X} \right)$
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
+\mathbf{X} \right)$
    -
    Returns
    $\chi\left( \mathbf{V} \right)$
    +
    Returns
    $\chi\left( \mathbf{V} \right)$
    @@ -198,21 +198,21 @@

    Return the result of the push forward transformation on a rank-2 contravariant tensor, i.e.

    -\[
+<picture><source srcset=\[
  \chi\left(\bullet\right)^{\sharp}
    \dealcoloneq \mathbf{F} \cdot \left(\bullet\right)^{\sharp} \cdot
 \mathbf{F}^{T}
-\] +\]" src="form_2519.png"/>

    Parameters
    - +
    [in]TThe (referential) rank-2 tensor to be operated on
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
-\mathbf{X} \right)$
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
+\mathbf{X} \right)$
    -
    Returns
    $\chi\left( \mathbf{T} \right)$
    +
    Returns
    $\chi\left( \mathbf{T} \right)$
    @@ -242,21 +242,21 @@

    Return the result of the push forward transformation on a rank-2 contravariant symmetric tensor, i.e.

    -\[
+<picture><source srcset=\[
  \chi\left(\bullet\right)^{\sharp}
    \dealcoloneq \mathbf{F} \cdot \left(\bullet\right)^{\sharp} \cdot
 \mathbf{F}^{T}
-\] +\]" src="form_2519.png"/>

    Parameters
    - +
    [in]TThe (referential) rank-2 symmetric tensor to be operated on
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
-\mathbf{X} \right)$
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
+\mathbf{X} \right)$
    -
    Returns
    $\chi\left( \mathbf{T} \right)$
    +
    Returns
    $\chi\left( \mathbf{T} \right)$
    @@ -286,21 +286,21 @@

    Return the result of the push forward transformation on a rank-4 contravariant tensor, i.e. (in index notation):

    -\[
+<picture><source srcset=\[
  \left[ \chi\left(\bullet\right)^{\sharp} \right]_{ijkl}
    \dealcoloneq F_{iI} F_{jJ}
    \left(\bullet\right)^{\sharp}_{IJKL} F_{kK} F_{lL}
-\] +\]" src="form_2521.png"/>

    Parameters
    - +
    [in]HThe (referential) rank-4 tensor to be operated on
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
-\mathbf{X} \right)$
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
+\mathbf{X} \right)$
    -
    Returns
    $\chi\left( \mathbf{H} \right)$
    +
    Returns
    $\chi\left( \mathbf{H} \right)$
    @@ -330,21 +330,21 @@

    Return the result of the push forward transformation on a rank-4 contravariant symmetric tensor, i.e. (in index notation):

    -\[
+<picture><source srcset=\[
  \left[ \chi\left(\bullet\right)^{\sharp} \right]_{ijkl}
    \dealcoloneq F_{iI} F_{jJ}
    \left(\bullet\right)^{\sharp}_{IJKL} F_{kK} F_{lL}
-\] +\]" src="form_2521.png"/>

    Parameters
    - +
    [in]HThe (referential) rank-4 symmetric tensor to be operated on
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
-\mathbf{X} \right)$
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
+\mathbf{X} \right)$
    -
    Returns
    $\chi\left( \mathbf{H} \right)$
    +
    Returns
    $\chi\left( \mathbf{H} \right)$
    @@ -374,20 +374,20 @@

    Return the result of the pull back transformation on a contravariant vector, i.e.

    -\[
+<picture><source srcset=\[
  \chi^{-1}\left(\bullet\right)^{\sharp}
    \dealcoloneq \mathbf{F}^{-1} \cdot \left(\bullet\right)^{\sharp}
-\] +\]" src="form_2523.png"/>

    Parameters
    - +
    [in]vThe (spatial) vector to be operated on
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
-\mathbf{X} \right)$
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
+\mathbf{X} \right)$
    -
    Returns
    $\chi^{-1}\left( \mathbf{v} \right)$
    +
    Returns
    $\chi^{-1}\left( \mathbf{v} \right)$
    @@ -417,21 +417,21 @@

    Return the result of the pull back transformation on a rank-2 contravariant tensor, i.e.

    -\[
+<picture><source srcset=\[
  \chi^{-1}\left(\bullet\right)^{\sharp}
    \dealcoloneq \mathbf{F}^{-1} \cdot \left(\bullet\right)^{\sharp}
    \cdot \mathbf{F}^{-T}
-\] +\]" src="form_2525.png"/>

    Parameters
    -
    [in]tThe (spatial) tensor to be operated on
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Covariant.html differs (JavaScript source, ASCII text, with very long lines (1446))
--- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Covariant.html	2023-11-25 15:26:00.833204237 +0100
+++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Covariant.html	2023-11-25 15:26:00.833204237 +0100
@@ -118,16 +118,16 @@
 <tr class=&#href_anchor"memitem:a138fff54a44ba86bc2d1a6200b148e90">template<int dim, typename Number >
    SymmetricTensor< 4, dim, Number >&#href_anchor"memTemplItemRight" valign="bottom">pull_back (const SymmetricTensor< 4, dim, Number > &h, const Tensor< 2, dim, Number > &F)
    &#href_anchor"details" id="details">

    Detailed Description

    -

    Transformation of tensors that are defined in terms of a set of covariant basis vectors. Rank-1 and rank-2 covariant tensors $\left(\bullet\right)^{\flat} = \mathbf{T}$ (and its spatial counterpart $\mathbf{t}$) typically satisfy the relation

    -\[
+<div class=

    Transformation of tensors that are defined in terms of a set of covariant basis vectors. Rank-1 and rank-2 covariant tensors $\left(\bullet\right)^{\flat} = \mathbf{T}$ (and its spatial counterpart $\mathbf{t}$) typically satisfy the relation

    +\[
    \int_{\partial V_{0}} \left[ \nabla_{0} \times \mathbf{T} \right]
 \cdot \mathbf{N} \; dA = \oint_{\partial A_{0}} \mathbf{T} \cdot
 \mathbf{L} \; dL = \oint_{\partial A_{t}} \mathbf{t} \cdot \mathbf{l} \;
 dl = \int_{\partial V_{t}} \left[ \nabla \times \mathbf{t} \right] \cdot
 \mathbf{n} \; da
-\] +\]" src="form_2494.png"/>

    -

    where the control surfaces $\partial V_{0}$ and $\partial V_{t}$ with outwards facing normals $\mathbf{N}$ and $\mathbf{n}$ are bounded by the curves $\partial A_{0}$ and $\partial A_{t}$ that are, respectively, associated with the line directors $\mathbf{L}$ and $\mathbf{l}$.

    +

    where the control surfaces $\partial V_{0}$ and $\partial V_{t}$ with outwards facing normals $\mathbf{N}$ and $\mathbf{n}$ are bounded by the curves $\partial A_{0}$ and $\partial A_{t}$ that are, respectively, associated with the line directors $\mathbf{L}$ and $\mathbf{l}$.

    Function Documentation

    ◆ push_forward() [1/5]

    @@ -155,20 +155,20 @@

    Return the result of the push forward transformation on a covariant vector, i.e.

    -\[
+<picture><source srcset=\[
  \chi\left(\bullet\right)^{\flat}
    \dealcoloneq \mathbf{F}^{-T} \cdot \left(\bullet\right)^{\flat}
-\] +\]" src="form_2530.png"/>

    Parameters
    - +
    [in]VThe (referential) vector to be operated on
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
-\mathbf{X} \right)$
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
+\mathbf{X} \right)$
    -
    Returns
    $\chi\left( \mathbf{V} \right)$
    +
    Returns
    $\chi\left( \mathbf{V} \right)$
    @@ -198,21 +198,21 @@

    Return the result of the push forward transformation on a rank-2 covariant tensor, i.e.

    -\[
+<picture><source srcset=\[
  \chi\left(\bullet\right)^{\flat}
    \dealcoloneq \mathbf{F}^{-T} \cdot \left(\bullet\right)^{\flat}
    \cdot \mathbf{F}^{-1}
-\] +\]" src="form_2531.png"/>

    Parameters
    - +
    [in]TThe (referential) rank-2 tensor to be operated on
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
-\mathbf{X} \right)$
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
+\mathbf{X} \right)$
    -
    Returns
    $\chi\left( \mathbf{T} \right)$
    +
    Returns
    $\chi\left( \mathbf{T} \right)$
    @@ -242,21 +242,21 @@

    Return the result of the push forward transformation on a rank-2 covariant symmetric tensor, i.e.

    -\[
+<picture><source srcset=\[
  \chi\left(\bullet\right)^{\flat}
    \dealcoloneq \mathbf{F}^{-T} \cdot \left(\bullet\right)^{\flat}
    \cdot \mathbf{F}^{-1}
-\] +\]" src="form_2531.png"/>

    Parameters
    - +
    [in]TThe (referential) rank-2 symmetric tensor to be operated on
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
-\mathbf{X} \right)$
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
+\mathbf{X} \right)$
    -
    Returns
    $\chi\left( \mathbf{T} \right)$
    +
    Returns
    $\chi\left( \mathbf{T} \right)$
    @@ -286,21 +286,21 @@

    Return the result of the push forward transformation on a rank-4 covariant tensor, i.e. (in index notation):

    -\[
+<picture><source srcset=\[
  \left[ \chi\left(\bullet\right)^{\flat} \right]_{ijkl}
    \dealcoloneq F^{-T}_{iI} F^{-T}_{jJ}
    \left(\bullet\right)^{\flat}_{IJKL} F^{-T}_{kK} F^{-T}_{lL}
-\] +\]" src="form_2532.png"/>

    Parameters
    - +
    [in]HThe (referential) rank-4 tensor to be operated on
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
-\mathbf{X} \right)$
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
+\mathbf{X} \right)$
    -
    Returns
    $\chi\left( \mathbf{H} \right)$
    +
    Returns
    $\chi\left( \mathbf{H} \right)$
    @@ -330,21 +330,21 @@

    Return the result of the push forward transformation on a rank-4 covariant symmetric tensor, i.e. (in index notation):

    -\[
+<picture><source srcset=\[
  \left[ \chi\left(\bullet\right)^{\flat} \right]_{ijkl}
    \dealcoloneq F^{-T}_{iI} F^{-T}_{jJ}
    \left(\bullet\right)^{\flat}_{IJKL} F^{-T}_{kK} F^{-T}_{lL}
-\] +\]" src="form_2532.png"/>

    Parameters
    - +
    [in]HThe (referential) rank-4 symmetric tensor to be operated on
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
-\mathbf{X} \right)$
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
+\mathbf{X} \right)$
    -
    Returns
    $\chi\left( \mathbf{H} \right)$
    +
    Returns
    $\chi\left( \mathbf{H} \right)$
    @@ -374,20 +374,20 @@

    Return the result of the pull back transformation on a covariant vector, i.e.

    -\[
+<picture><source srcset=\[
  \chi^{-1}\left(\bullet\right)^{\flat}
    \dealcoloneq \mathbf{F}^{T} \cdot \left(\bullet\right)^{\flat}
-\] +\]" src="form_2533.png"/>

    Parameters
    - +
    [in]vThe (spatial) vector to be operated on
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
-\mathbf{X} \right)$
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
+\mathbf{X} \right)$
    -
    Returns
    $\chi^{-1}\left( \mathbf{v} \right)$
    +
    Returns
    $\chi^{-1}\left( \mathbf{v} \right)$
    @@ -417,21 +417,21 @@

    Return the result of the pull back transformation on a rank-2 covariant tensor, i.e.

    -\[
+<picture><source srcset=\[
  \chi^{-1}\left(\bullet\right)^{\flat}
    \dealcoloneq \mathbf{F}^{T} \cdot \left(\bullet\right)^{\flat} \cdot
 \mathbf{F}
-\] +\]" src="form_2534.png"/>

    Parameters
    - /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Piola.html differs (JavaScript source, ASCII text, with very long lines (551)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Piola.html 2023-11-25 15:26:00.846537297 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Piola.html 2023-11-25 15:26:00.846537297 +0100 @@ -146,22 +146,22 @@
    [in]tThe (spatial) tensor to be operated on
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
-\mathbf{X} \right)$

    Return the result of the push forward transformation on a contravariant vector, i.e.

    -\[
+<picture><source srcset=\[
  \textrm{det} \mathbf{F}^{-1} \; \chi\left(\bullet\right)^{\sharp}
  \dealcoloneq \frac{1}{\textrm{det} \mathbf{F}} \; \mathbf{F} \cdot
  \left(\bullet\right)^{\sharp}
-\] +\]" src="form_2537.png"/>

    Parameters
    - +
    [in]VThe (referential) vector to be operated on
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
-\mathbf{X} \right)$
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
+\mathbf{X} \right)$
    -
    Returns
    $\frac{1}{\textrm{det} \mathbf{F}} \; \chi\left(
-\mathbf{V} \right)$
    +
    Returns
    $\frac{1}{\textrm{det} \mathbf{F}} \; \chi\left(
+\mathbf{V} \right)$
    @@ -191,22 +191,22 @@

    Return the result of the push forward transformation on a rank-2 contravariant tensor, i.e.

    -\[
+<picture><source srcset=\[
  \textrm{det} \mathbf{F}^{-1} \; \chi\left(\bullet\right)^{\sharp}
    \dealcoloneq \frac{1}{\textrm{det} \mathbf{F}} \; \mathbf{F} \cdot
 \left(\bullet\right)^{\sharp} \cdot \mathbf{F}^{T}
-\] +\]" src="form_2539.png"/>

    Parameters
    - +
    [in]TThe (referential) rank-2 tensor to be operated on
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
-\mathbf{X} \right)$
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
+\mathbf{X} \right)$
    -
    Returns
    $\frac{1}{\textrm{det} \mathbf{F}} \; \chi\left(
-\mathbf{T} \right)$
    +
    Returns
    $\frac{1}{\textrm{det} \mathbf{F}} \; \chi\left(
+\mathbf{T} \right)$
    @@ -236,22 +236,22 @@

    Return the result of the push forward transformation on a rank-2 contravariant symmetric tensor, i.e.

    -\[
+<picture><source srcset=\[
  \textrm{det} \mathbf{F}^{-1} \; \chi\left(\bullet\right)^{\sharp}
    \dealcoloneq \frac{1}{\textrm{det} \mathbf{F}} \; \mathbf{F} \cdot
 \left(\bullet\right)^{\sharp} \cdot \mathbf{F}^{T}
-\] +\]" src="form_2539.png"/>

    Parameters
    - +
    [in]TThe (referential) rank-2 symmetric tensor to be operated on
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
-\mathbf{X} \right)$
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
+\mathbf{X} \right)$
    -
    Returns
    $\frac{1}{\textrm{det} \mathbf{F}} \; \chi\left(
-\mathbf{T} \right)$
    +
    Returns
    $\frac{1}{\textrm{det} \mathbf{F}} \; \chi\left(
+\mathbf{T} \right)$
    @@ -281,23 +281,23 @@

    Return the result of the push forward transformation on a rank-4 contravariant tensor, i.e. (in index notation):

    -\[
+<picture><source srcset=\[
  \textrm{det} \mathbf{F}^{-1} \; \left[
 \chi\left(\bullet\right)^{\sharp} \right]_{ijkl}
    \dealcoloneq \frac{1}{\textrm{det} \mathbf{F}} \; F_{iI} F_{jJ}
 \left(\bullet\right)^{\sharp}_{IJKL} F_{kK} F_{lL}
-\] +\]" src="form_2541.png"/>

    Parameters
    - +
    [in]HThe (referential) rank-4 tensor to be operated on
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
-\mathbf{X} \right)$
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
+\mathbf{X} \right)$
    -
    Returns
    $\frac{1}{\textrm{det} \mathbf{F}} \; \chi\left(
-\mathbf{H} \right)$
    +
    Returns
    $\frac{1}{\textrm{det} \mathbf{F}} \; \chi\left(
+\mathbf{H} \right)$
    @@ -327,23 +327,23 @@

    Return the result of the push forward transformation on a rank-4 contravariant symmetric tensor, i.e. (in index notation):

    -\[
+<picture><source srcset=\[
  \textrm{det} \mathbf{F}^{-1} \; \left[
 \chi\left(\bullet\right)^{\sharp} \right]_{ijkl}
    \dealcoloneq \frac{1}{\textrm{det} \mathbf{F}} \; F_{iI} F_{jJ}
 \left(\bullet\right)^{\sharp}_{IJKL} F_{kK} F_{lL}
-\] +\]" src="form_2541.png"/>

    Parameters
    - +
    [in]HThe (referential) rank-4 symmetric tensor to be operated on
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
-\mathbf{X} \right)$
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
+\mathbf{X} \right)$
    -
    Returns
    $\frac{1}{\textrm{det} \mathbf{F}} \; \chi\left(
-\mathbf{H} \right)$
    +
    Returns
    $\frac{1}{\textrm{det} \mathbf{F}} \; \chi\left(
+\mathbf{H} \right)$
    @@ -373,22 +373,22 @@

    Return the result of the pull back transformation on a contravariant vector, i.e.

    -\[
+<picture><source srcset=\[
  \textrm{det} \mathbf{F} \; \chi^{-1}\left(\bullet\right)^{\sharp}
    \dealcoloneq \textrm{det} \mathbf{F} \; \mathbf{F}^{-1} \cdot
 \left(\bullet\right)^{\sharp}
-\] +\]" src="form_2543.png"/>

    Parameters
    - +
    [in]vThe (spatial) vector to be operated on
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
-\mathbf{X} \right)$
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
+\mathbf{X} \right)$
    -
    Returns
    $\textrm{det} \mathbf{F} \; \chi^{-1}\left( \mathbf{v}
-\right)$
    +
    Returns
    $\textrm{det} \mathbf{F} \; \chi^{-1}\left( \mathbf{v}
+\right)$
    @@ -418,22 +418,22 @@

    Return the result of the pull back transformation on a rank-2 contravariant tensor, i.e.

    -\[
+<picture><source srcset=\[
  \textrm{det} \mathbf{F} \; \chi^{-1}\left(\bullet\right)^{\sharp}
    \dealcoloneq \textrm{det} \mathbf{F} \; \mathbf{F}^{-1} \cdot
 \left(\bullet\right)^{\sharp} \cdot \mathbf{F}^{-T}
-\] +\]" src="form_2545.png"/>

    Parameters
    - +
    [in]tThe (spatial) tensor to be operated on
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
-\mathbf{X} \right)$
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
+\mathbf{X} \right)$
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Rotations.html differs (JavaScript source, ASCII text, with very long lines (939)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Rotations.html 2023-11-25 15:26:00.859870361 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Rotations.html 2023-11-25 15:26:00.859870361 +0100 @@ -123,14 +123,14 @@

    Return the rotation matrix for 2-d Euclidean space, namely

    -\[
+<picture><source srcset=\[
  \mathbf{R} \dealcoloneq \left[ \begin{array}{cc}
  cos(\theta) & -sin(\theta) \\
  sin(\theta) & cos(\theta)
 \end{array}\right]
-\] +\]" src="form_2512.png"/>

    -

    where $\theta$ is the rotation angle given in radians. In particular, this describes the counter-clockwise rotation of a vector relative to a fixed set of right-handed axes.

    +

    where $\theta$ is the rotation angle given in radians. In particular, this describes the counter-clockwise rotation of a vector relative to a fixed set of right-handed axes.

    Parameters
    @@ -167,12 +167,12 @@
    [in]angleThe rotation angle (about the z-axis) in radians

    Return the rotation matrix for 3-d Euclidean space. Most concisely stated using the Rodrigues' rotation formula, this function returns the equivalent of

    -\[
+<picture><source srcset=\[
  \mathbf{R} \dealcoloneq cos(\theta)\mathbf{I} + sin(\theta)\mathbf{W}
              + (1-cos(\theta))\mathbf{u}\otimes\mathbf{u}
-\] +\]" src="form_2514.png"/>

    -

    where $\mathbf{u}$ is the axial vector (an axial vector) and $\theta$ is the rotation angle given in radians, $\mathbf{I}$ is the identity tensor and $\mathbf{W}$ is the skew symmetric tensor of $\mathbf{u}$.

    +

    where $\mathbf{u}$ is the axial vector (an axial vector) and $\theta$ is the rotation angle given in radians, $\mathbf{I}$ is the identity tensor and $\mathbf{W}$ is the skew symmetric tensor of $\mathbf{u}$.

    Note
    For a discussion of the background of this function, see P. Wriggers: "Nonlinear finite element methods" (2008), and in particular formula (9.194) on p. 374 (or thereabouts). This presents Rodrigues' rotation formula, but the implementation used in this function is described in this wikipedia link. In particular, this describes the counter-clockwise rotation of a vector in a plane with its normal. defined by the axis of rotation. An alternative implementation is discussed at this link, but is inconsistent (sign-wise) with the Rodrigues' rotation formula as it describes the rotation of a coordinate system.
    Parameters
    @@ -213,12 +213,12 @@

    Return the rotation matrix for 3-d Euclidean space. Most concisely stated using the Rodrigues' rotation formula, this function returns the equivalent of

    -\[
+<picture><source srcset=\[
  \mathbf{R} \dealcoloneq cos(\theta)\mathbf{I} + sin(\theta)\mathbf{W}
              + (1-cos(\theta))\mathbf{u}\otimes\mathbf{u}
-\] +\]" src="form_2514.png"/>

    -

    where $\mathbf{u}$ is the axial vector (an axial vector) and $\theta$ is the rotation angle given in radians, $\mathbf{I}$ is the identity tensor and $\mathbf{W}$ is the skew symmetric tensor of $\mathbf{u}$.

    +

    where $\mathbf{u}$ is the axial vector (an axial vector) and $\theta$ is the rotation angle given in radians, $\mathbf{I}$ is the identity tensor and $\mathbf{W}$ is the skew symmetric tensor of $\mathbf{u}$.

    Note
    For a discussion of the background of this function, see P. Wriggers: "Nonlinear finite element methods" (2008), and in particular formula (9.194) on p. 374 (or thereabouts). This presents Rodrigues' rotation formula, but the implementation used in this function is described in this wikipedia link. In particular, this describes the counter-clockwise rotation of a vector in a plane with its normal. defined by the axis of rotation. An alternative implementation is discussed at this link, but is inconsistent (sign-wise) with the Rodrigues' rotation formula as it describes the rotation of a coordinate system.
    Parameters
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespacePhysics_1_1VectorRelations.html differs (JavaScript source, ASCII text, with very long lines (545)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespacePhysics_1_1VectorRelations.html 2023-11-25 15:26:00.869870158 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespacePhysics_1_1VectorRelations.html 2023-11-25 15:26:00.869870158 +0100 @@ -129,11 +129,11 @@
    -

    Calculate the angle $\theta$ between two vectors a and b. The returned angle will be in the range $[0, \pi]$.

    +

    Calculate the angle $\theta$ between two vectors a and b. The returned angle will be in the range $[0, \pi]$.

    This function uses the geometric definition of the scalar product.

    -\[
+<picture><source srcset=\[
   \vec{a} \cdot \vec{b} = \|\vec{a}\| \|\vec{b}\| \cos(\theta)
-\] +\]" src="form_2550.png"/>

    @@ -168,21 +168,21 @@
    -

    Calculate the angle $\theta$ between two vectors a and b, where both vectors are located in a plane described by a normal vector axis.

    -

    The angle computed by this function corresponds to the rotation angle that would transform the vector a into the vector b around the vector axis. Thus, contrary to the function above, we get a signed angle which will be in the range $[-\pi, \pi]$.

    +

    Calculate the angle $\theta$ between two vectors a and b, where both vectors are located in a plane described by a normal vector axis.

    +

    The angle computed by this function corresponds to the rotation angle that would transform the vector a into the vector b around the vector axis. Thus, contrary to the function above, we get a signed angle which will be in the range $[-\pi, \pi]$.

    The vector axis needs to be a unit vector and be perpendicular to both vectors a and b.

    This function uses the geometric definitions of both the scalar and cross product.

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   \vec{a} \cdot  \vec{b} &= \|\vec{a}\| \|\vec{b}\| \cos(\theta) \\
   \vec{a} \times \vec{b} &= \|\vec{a}\| \|\vec{b}\| \sin(\theta) \vec{n}
-\end{align*} +\end{align*}" src="form_2552.png"/>

    We can create the tangent of the angle using both products.

    -\[
+<picture><source srcset=\[
   \tan{\theta}
   = \frac{\sin(\theta)}{\cos(theta)}
   = \frac{(\vec{a} \times \vec{b}) \cdot \vec{n}}{\vec{a} \cdot \vec{b}}
-\] +\]" src="form_2553.png"/>

    Note
    Only applicable for three-dimensional vectors spacedim == 3.
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceSLEPcWrappers.html differs (JavaScript source, ASCII text, with very long lines (1351)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceSLEPcWrappers.html 2023-11-25 15:26:00.879869954 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceSLEPcWrappers.html 2023-11-25 15:26:00.879869954 +0100 @@ -108,13 +108,13 @@ &#href_anchor"memitem:">class  TransformationSpectrumFolding &#href_anchor"details" id="details">

    Detailed Description

    Base namespace for solver classes using the SLEPc solvers which are selected based on flags passed to the eigenvalue problem solver context. Derived classes set the right flags to set the right solver.

    -

    The SLEPc solvers are intended to be used for solving the generalized eigenspectrum problem $(A-\lambda B)x=0$, for $x\neq0$; where $A$ is a system matrix, $B$ is a mass matrix, and $\lambda, x$ are a set of eigenvalues and eigenvectors respectively. The emphasis is on methods and techniques appropriate for problems in which the associated matrices are sparse. Most of the methods offered by the SLEPc library are projection methods or other methods with similar properties; and wrappers are provided to interface to SLEPc solvers that handle both of these problem sets.

    +

    The SLEPc solvers are intended to be used for solving the generalized eigenspectrum problem $(A-\lambda B)x=0$, for $x\neq0$; where $A$ is a system matrix, $B$ is a mass matrix, and $\lambda, x$ are a set of eigenvalues and eigenvectors respectively. The emphasis is on methods and techniques appropriate for problems in which the associated matrices are sparse. Most of the methods offered by the SLEPc library are projection methods or other methods with similar properties; and wrappers are provided to interface to SLEPc solvers that handle both of these problem sets.

    SLEPcWrappers can be implemented in application codes in the following way:

    SolverControl solver_control (1000, 1e-9);
    SolverArnoldi system (solver_control, mpi_communicator);
    system.solve (A, B, lambda, x, size_of_spectrum);
    -

    for the generalized eigenvalue problem $Ax=B\lambda x$, where the variable const unsigned int size_of_spectrum tells SLEPc the number of eigenvector/eigenvalue pairs to solve for. Additional options and solver parameters can be passed to the SLEPc solvers before calling solve(). For example, if the matrices of the general eigenspectrum problem are not hermitian and the lower eigenvalues are wanted only, the following code can be implemented before calling solve():

    system.set_problem_type (EPS_NHEP);
    +

    for the generalized eigenvalue problem $Ax=B\lambda x$, where the variable const unsigned int size_of_spectrum tells SLEPc the number of eigenvector/eigenvalue pairs to solve for. Additional options and solver parameters can be passed to the SLEPc solvers before calling solve(). For example, if the matrices of the general eigenspectrum problem are not hermitian and the lower eigenvalues are wanted only, the following code can be implemented before calling solve():

    system.set_problem_type (EPS_NHEP);
    system.set_which_eigenpairs (EPS_SMALLEST_REAL);

    These options can also be set at the command line.

    See also step-36 for a hands-on example.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceSUNDIALS.html differs (JavaScript source, ASCII text, with very long lines (1159)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceSUNDIALS.html 2023-11-25 15:26:00.893203014 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceSUNDIALS.html 2023-11-25 15:26:00.893203014 +0100 @@ -133,7 +133,7 @@

    Type of function objects to interface with SUNDIALS' linear solvers

    -

    This function type encapsulates the action of solving $P^{-1}Ax=P^{-1}b$. The LinearOperator op encapsulates the matrix vector product $Ax$ and the LinearOperator prec encapsulates the application of the preconditioner $P^{-1}z$. The user can specify function objects of this type to attach custom linear solver routines to SUNDIALS. The two LinearOperators op and prec are built internally by SUNDIALS based on user settings. The parameters are interpreted as follows:

    +

    This function type encapsulates the action of solving $P^{-1}Ax=P^{-1}b$. The LinearOperator op encapsulates the matrix vector product $Ax$ and the LinearOperator prec encapsulates the application of the preconditioner $P^{-1}z$. The user can specify function objects of this type to attach custom linear solver routines to SUNDIALS. The two LinearOperators op and prec are built internally by SUNDIALS based on user settings. The parameters are interpreted as follows:

    Parameters
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceSmoothnessEstimator_1_1Fourier.html differs (JavaScript source, ASCII text, with very long lines (1257)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceSmoothnessEstimator_1_1Fourier.html 2023-11-25 15:26:00.906536078 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceSmoothnessEstimator_1_1Fourier.html 2023-11-25 15:26:00.903202810 +0100 @@ -107,19 +107,19 @@
    [in]opA LinearOperator that applies the matrix vector product

    Detailed Description

    Smoothness estimation strategy based on the decay of Fourier expansion coefficients.

    -

    From the definition, we can write our Fourier series expansion $a_{\bf k}$ of the finite element solution on cell $K$ with polynomial degree $p$ as a matrix product

    -\begin{eqnarray*}
+<p>From the definition, we can write our <a class=Fourier series expansion $a_{\bf k}$ of the finite element solution on cell $K$ with polynomial degree $p$ as a matrix product

    +\begin{eqnarray*}
    u_h({\bf x}) &=& \sum_j u_j \varphi_j ({\bf x}) \\
    u_{h, {\bf k}}({\bf x}) &=&
      \sum_{{\bf k}, \|{\bf k}\|\le p} a_{\bf k} \phi_{\bf k}({\bf x}),
      \quad a_{\bf k} = \sum_j {\cal F}_{{\bf k},j} u_j
-\end{eqnarray*} +\end{eqnarray*}" src="form_2228.png"/>

    -

    with $u_j$ the degrees of freedom and $\varphi_j$ the corresponding shape functions. $\{\phi_{\bf k}({\bf x}) = \exp(i \, 2 \pi \, {\bf k} \cdot
-{\bf x}) \}$ are exponential functions on cell $K$. $a_{\bf k}$ and ${\cal
-F}_{{\bf k},j}$ are coefficients and transformation matrices from the Fourier expansion of each shape function. For practical reasons, we will perform the calculation of these matrices and coefficients only on the reference cell $\hat K$. We only have to calculate the transformation matrices once this way. However, results are only applicable if mapping from the reference cell to the actual cell is linear. We use the class FESeries::Fourier to determine all coefficients $a_{\bf k}$.

    -

    If the finite element approximation on cell $K$ is part of the Hilbert space $H^s(K)$, then the following integral must exist for both the finite element and spectral representation of our solution

    -\begin{eqnarray*}
+<p> with <picture><source srcset=$u_j$ the degrees of freedom and $\varphi_j$ the corresponding shape functions. $\{\phi_{\bf k}({\bf x}) = \exp(i \, 2 \pi \, {\bf k} \cdot
+{\bf x}) \}$ are exponential functions on cell $K$. $a_{\bf k}$ and ${\cal
+F}_{{\bf k},j}$ are coefficients and transformation matrices from the Fourier expansion of each shape function. For practical reasons, we will perform the calculation of these matrices and coefficients only on the reference cell $\hat K$. We only have to calculate the transformation matrices once this way. However, results are only applicable if mapping from the reference cell to the actual cell is linear. We use the class FESeries::Fourier to determine all coefficients $a_{\bf k}$.

    +

    If the finite element approximation on cell $K$ is part of the Hilbert space $H^s(K)$, then the following integral must exist for both the finite element and spectral representation of our solution

    +\begin{eqnarray*}
   \| \nabla^s u_h({\bf x}) \|_{L^2(K)}^2 &=&
     \int\limits_K \left| \nabla^s u_h({\bf x}) \right|^2 d{\bf x} <
     \infty \\
@@ -128,40 +128,40 @@
     a_{\bf k} \, \phi_{\bf k}({\bf x}) \right|^2 d{\bf x} =
     (2 \pi)^{2s} \sum\limits_{\bf k} \left| a_{\bf k} \right|^2
     \|{\bf k}\|_2^{2s} < \infty
-\end{eqnarray*} +\end{eqnarray*}" src="form_2232.png"/>

    The sum is finite only if the summands decay at least with order

    -\[
+<picture><source srcset=\[
   |a_{\bf k}|^2 \|{\bf k}\|_2^{2s} \|{\bf k}\|_2^{d - 1} =
     {\cal O}\left( \|{\bf k}\|_2^{-1-\epsilon} \right)
-\] +\]" src="form_2233.png"/>

    -

    for all $\epsilon > 0$. The additional factor stems from the fact that, since we sum over all multi-indices ${\bf k}$ that are located on a dim-dimensional sphere, we actually have, up to a constant, $\|{\bf k}\|_2^{d-1}$ modes located in each increment $\|{\bf k}\|_2 +
-d\|{\bf k}\|_2$ that need to be taken into account. By a comparison of exponents, we can rewrite this condition as

    -\[
+<p> for all <picture><source srcset=$\epsilon > 0$. The additional factor stems from the fact that, since we sum over all multi-indices ${\bf k}$ that are located on a dim-dimensional sphere, we actually have, up to a constant, $\|{\bf k}\|_2^{d-1}$ modes located in each increment $\|{\bf k}\|_2 +
+d\|{\bf k}\|_2$ that need to be taken into account. By a comparison of exponents, we can rewrite this condition as

    +\[
   |a_{\bf k}| = {\cal O}\left(\|{\bf k}\|_2^
     {-\left(s + \frac d2 + \epsilon \right)} \right)
-\] +\]" src="form_2238.png"/>

    -

    The next step is to estimate how fast these coefficients decay with $\|{\bf k}\|_2$. Thus, we perform a least-squares fit

    -\[
+<p>The next step is to estimate how fast these coefficients decay with <picture><source srcset=$\|{\bf k}\|_2$. Thus, we perform a least-squares fit

    +\[
    \min_{\alpha,\sigma}
    \frac 12 \sum_{{\bf k}, \|{\bf k}\|_2 \le p}
    \left( |a_{\bf k}| - \alpha \|{\bf k}\|_2^{-\sigma}\right)^2
-\] +\]" src="form_2240.png"/>

    -

    with regression coefficients $\alpha$ and $\sigma$. For simplification, we apply a logarithm on our minimization problem

    -\[
+<p> with regression coefficients <picture><source srcset=$\alpha$ and $\sigma$. For simplification, we apply a logarithm on our minimization problem

    +\[
    \min_{\beta,\sigma}
    Q(\beta,\sigma) =
    \frac 12 \sum_{{\bf k}, \|{\bf k}\|_2 \le p}
    \left( \ln |a_{\bf k}| - \beta + \sigma \ln \|{\bf k}\|_2
 \right)^2,
-\] +\]" src="form_2241.png"/>

    -

    where $\beta=\ln \alpha$. This is now a problem for which the optimality conditions $\frac{\partial Q}{\partial\beta}=0,
-\frac{\partial Q}{\partial\sigma}=0$, are linear in $\beta,\sigma$. We can write these conditions as follows:

    -\[
+<p> where <picture><source srcset=$\beta=\ln \alpha$. This is now a problem for which the optimality conditions $\frac{\partial Q}{\partial\beta}=0,
+\frac{\partial Q}{\partial\sigma}=0$, are linear in $\beta,\sigma$. We can write these conditions as follows:

    +\[
    \left(\begin{array}{cc}
    \sum_{{\bf k}, \|{\bf k}\|_2 \le p} 1 &
    \sum_{{\bf k}, \|{\bf k}\|_2 \le p} \ln \|{\bf k}\|_2
@@ -178,10 +178,10 @@
    \\
    \sum_{{\bf k}, \|{\bf k}\|_2\le p} \ln |a_{{\bf k}}| \ln \|{\bf
 k}\|_2 \end{array}\right)
-\] +\]" src="form_2245.png"/>

    -

    Solving for $\beta$ and $\sigma$ is just a linear regression fit and to do that we will use FESeries::linear_regression().

    -

    While we are not particularly interested in the actual value of $\beta$, the formula above gives us a means to calculate the value of the exponent $\sigma$ that we can then use to determine that $u(\hat{\bf x})$ is in $H^s(K)$ with $s=\sigma-\frac d2$. The decay rates $\sigma$ will suffice as our smoothness indicators and will be calculated on each cell for any provided solution.

    +

    Solving for $\beta$ and $\sigma$ is just a linear regression fit and to do that we will use FESeries::linear_regression().

    +

    While we are not particularly interested in the actual value of $\beta$, the formula above gives us a means to calculate the value of the exponent $\sigma$ that we can then use to determine that $u(\hat{\bf x})$ is in $H^s(K)$ with $s=\sigma-\frac d2$. The decay rates $\sigma$ will suffice as our smoothness indicators and will be calculated on each cell for any provided solution.

    Note
    An extensive demonstration of the use of these functions is provided in step-27.

    Function Documentation

    @@ -237,17 +237,17 @@
    -

    In this variant of the estimation strategy for higher dimensions, we will consider all mode vectors $\bf k$ describing Fourier polynomials $P_{\bf k}$ and perform one least-squares fit over all coefficients at once. If there are multiple coefficients corresponding to the same absolute value of modes $\|\bf k\|_2$, we take the maximum among those. Thus, the least-squares fit is performed on

    -\[
+<p>In this variant of the estimation strategy for higher dimensions, we will consider all mode vectors <picture><source srcset=$\bf k$ describing Fourier polynomials $P_{\bf k}$ and perform one least-squares fit over all coefficients at once. If there are multiple coefficients corresponding to the same absolute value of modes $\|\bf k\|_2$, we take the maximum among those. Thus, the least-squares fit is performed on

    +\[
   \ln \left( \max\limits_{\|{\bf k}\|_2} |a_{\bf k}| \right) \sim
     C - \sigma \ln \|{\bf k}\|_2
-\] +\]" src="form_2259.png"/>

    -

    for ${\bf k}=(k_1,\ldots,k_d)$ and $k_i=0,\ldots,p$, with $p$ the polynomial degree of the finite element. We exclude the $\|{\bf k}\|_2=0$ modes to avoid the singularity of the logarithm.

    -

    The regression_strategy parameter determines which norm will be used on the subset of coefficients $\bf k$ with the same absolute value $\|{\bf k}\|_2$. Default is VectorTools::Linfty_norm for a maximum approximation.

    -

    For a provided solution vector solution defined on a DoFHandler dof_handler, this function returns a vector smoothness_indicators with as many elements as there are cells where each element contains the estimated regularity $\sigma$.

    +

    for ${\bf k}=(k_1,\ldots,k_d)$ and $k_i=0,\ldots,p$, with $p$ the polynomial degree of the finite element. We exclude the $\|{\bf k}\|_2=0$ modes to avoid the singularity of the logarithm.

    +

    The regression_strategy parameter determines which norm will be used on the subset of coefficients $\bf k$ with the same absolute value $\|{\bf k}\|_2$. Default is VectorTools::Linfty_norm for a maximum approximation.

    +

    For a provided solution vector solution defined on a DoFHandler dof_handler, this function returns a vector smoothness_indicators with as many elements as there are cells where each element contains the estimated regularity $\sigma$.

    A series expansion object fe_fourier has to be supplied, which needs to be constructed with the same FECollection object as the dof_handler.

    -

    The parameter smallest_abs_coefficient allows to ignore small (in absolute value) coefficients within the linear regression fit. In case there are less than two nonzero coefficients for a coordinate direction, this direction will be skipped. If all coefficients are zero, the returned value for this cell will be $\sigma=\infty$.

    +

    The parameter smallest_abs_coefficient allows to ignore small (in absolute value) coefficients within the linear regression fit. In case there are less than two nonzero coefficients for a coordinate direction, this direction will be skipped. If all coefficients are zero, the returned value for this cell will be $\sigma=\infty$.

    Smoothness indicators are usually used to decide whether to perform h- or p-adaptation. So in most cases, we only need to calculate those indicators on cells flagged for refinement or coarsening. The parameter only_flagged_cells controls whether this particular subset or all cells will be considered. By default, all cells will be considered. When only flagged cells are supposed to be considered, smoothness indicators will only be set on those vector entries of flagged cells; the others will be set to a signaling NaN.

    Definition at line 370 of file smoothness_estimator.cc.

    @@ -305,11 +305,11 @@
    -

    In this variant of the estimation strategy for higher dimensions, we only consider modes in each coordinate direction, i.e., only mode vectors $\bf k$ with one nonzero entry. We perform the least-squares fit in each coordinate direction separately and take the lowest decay rate $\sigma$ among them.

    -

    The coefficients_predicate parameter selects Fourier coefficients $a_j$, $j=0,\ldots,p$ for linear regression in each coordinate direction. The user is responsible for updating the vector of flags provided to this function. Note that its size is $p+1$, where $p$ is the polynomial degree of the FE basis on a given element. The default implementation will use all Fourier coefficients in each coordinate direction, i.e., set all the elements of the vector to true.

    -

    For a provided solution vector solution defined on a DoFHandler dof_handler, this function returns a vector smoothness_indicators with as many elements as there are cells where each element contains the estimated regularity $\sigma$.

    +

    In this variant of the estimation strategy for higher dimensions, we only consider modes in each coordinate direction, i.e., only mode vectors $\bf k$ with one nonzero entry. We perform the least-squares fit in each coordinate direction separately and take the lowest decay rate $\sigma$ among them.

    +

    The coefficients_predicate parameter selects Fourier coefficients $a_j$, $j=0,\ldots,p$ for linear regression in each coordinate direction. The user is responsible for updating the vector of flags provided to this function. Note that its size is $p+1$, where $p$ is the polynomial degree of the FE basis on a given element. The default implementation will use all Fourier coefficients in each coordinate direction, i.e., set all the elements of the vector to true.

    +

    For a provided solution vector solution defined on a DoFHandler dof_handler, this function returns a vector smoothness_indicators with as many elements as there are cells where each element contains the estimated regularity $\sigma$.

    A series expansion object fe_fourier has to be supplied, which needs to be constructed with the same FECollection object as the dof_handler.

    -

    The parameter smallest_abs_coefficient allows to ignore small (in absolute value) coefficients within the linear regression fit. In case there are fewer than two nonzero coefficients for a coordinate direction, this direction will be skipped. If all coefficients are zero, the returned value for this cell will be $\sigma=\infty$.

    +

    The parameter smallest_abs_coefficient allows to ignore small (in absolute value) coefficients within the linear regression fit. In case there are fewer than two nonzero coefficients for a coordinate direction, this direction will be skipped. If all coefficients are zero, the returned value for this cell will be $\sigma=\infty$.

    Smoothness indicators are usually used to decide whether to perform h- or p-adaptation. So in most cases, we only need to calculate those indicators on cells flagged for refinement or coarsening. The parameter only_flagged_cells controls whether this particular subset or all cells will be considered. By default, all cells will be considered. When only flagged cells are supposed to be considered, smoothness indicators will only be set on those vector entries of flagged cells; the others will be set to a signaling NaN.

    Definition at line 468 of file smoothness_estimator.cc.

    @@ -342,7 +342,7 @@

    Returns a FESeries::Fourier object for Fourier series expansions with the default configuration for smoothness estimation purposes.

    -

    For each finite element of the provided fe_collection, we use as many modes as its polynomial degree plus two. Further for each element, we use a 5-point Gaussian quarature iterated in each dimension by the maximal wave number, which is the number of modes decreased by one since we start with $k = 0$.

    +

    For each finite element of the provided fe_collection, we use as many modes as its polynomial degree plus two. Further for each element, we use a 5-point Gaussian quarature iterated in each dimension by the maximal wave number, which is the number of modes decreased by one since we start with $k = 0$.

    As the Fourier expansion can only be performed on scalar fields, this class does not operate on vector-valued finite elements and will therefore throw an assertion. However, each component of a finite element field can be treated as a scalar field, respectively, on which Fourier expansions are again possible. For this purpose, the optional parameter component defines which component of each FiniteElement will be used. The default value of component only applies to scalar FEs, in which case it indicates that the sole component is to be decomposed. For vector-valued FEs, a non-default value must be explicitly provided.

    Definition at line 577 of file smoothness_estimator.cc.

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceSmoothnessEstimator_1_1Legendre.html differs (JavaScript source, ASCII text, with very long lines (2010)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceSmoothnessEstimator_1_1Legendre.html 2023-11-25 15:26:00.919869140 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceSmoothnessEstimator_1_1Legendre.html 2023-11-25 15:26:00.919869140 +0100 @@ -107,25 +107,25 @@

    Detailed Description

    Smoothness estimation strategy based on the decay of Legendre expansion coefficients.

    -

    In one dimension, the finite element solution on cell $K$ with polynomial degree $p$ can be written as

    -\begin{eqnarray*}
+<p>In one dimension, the finite element solution on cell <picture><source srcset=$K$ with polynomial degree $p$ can be written as

    +\begin{eqnarray*}
    u_h(x) &=& \sum_j u_j \varphi_j (x) \\
    u_{h, k}(x) &=& \sum_{k=0}^{p} a_k \widetilde P_k (x),
    \quad a_k = \sum_j {\cal L}_{k,j} u_j
-\end{eqnarray*} +\end{eqnarray*}" src="form_2217.png"/>

    -

    where $u_j$ are degrees of freedom and $\varphi_j$ are the corresponding shape functions. $\{\widetilde P_k(x)\}$ are Legendre polynomials on cell $K$. $a_k$ and ${\cal L}_{k,j}$ are coefficients and transformation matrices from the Legendre expansion of each shape function. For practical reasons, we will perform the calculation of these matrices and coefficients only on the reference cell $\hat K$. We only have to calculate the transformation matrices once this way. However, results are only applicable if the mapping from the reference cell to the actual cell is affine. We use the class FESeries::Legendre to determine all coefficients $a_k$.

    +

    where $u_j$ are degrees of freedom and $\varphi_j$ are the corresponding shape functions. $\{\widetilde P_k(x)\}$ are Legendre polynomials on cell $K$. $a_k$ and ${\cal L}_{k,j}$ are coefficients and transformation matrices from the Legendre expansion of each shape function. For practical reasons, we will perform the calculation of these matrices and coefficients only on the reference cell $\hat K$. We only have to calculate the transformation matrices once this way. However, results are only applicable if the mapping from the reference cell to the actual cell is affine. We use the class FESeries::Legendre to determine all coefficients $a_k$.

    A function is analytic, i.e., representable by a power series, if and only if their Legendre expansion coefficients decay as (see [eibner2007hp])

    -\[
+<picture><source srcset=\[
   |a_k| \sim c \, \exp(-\sigma k)
-\] +\]" src="form_2222.png"/>

    -

    We determine their decay rate $\sigma$ by performing the linear regression fit of

    -\[
+<p> We determine their decay rate <picture><source srcset=$\sigma$ by performing the linear regression fit of

    +\[
   \ln |a_k| \sim C - \sigma k
-\] +\]" src="form_2224.png"/>

    -

    for $k=0,\ldots,p$, with $p$ the polynomial degree of the finite element. The rate of the decay $\sigma$ can be used to estimate the smoothness. For example, one strategy to implement hp-refinement criteria is to perform p-refinement if $\sigma>1$ (see [mavriplis1994hp]).

    +

    for $k=0,\ldots,p$, with $p$ the polynomial degree of the finite element. The rate of the decay $\sigma$ can be used to estimate the smoothness. For example, one strategy to implement hp-refinement criteria is to perform p-refinement if $\sigma>1$ (see [mavriplis1994hp]).

    Function Documentation

    ◆ coefficient_decay()

    @@ -180,24 +180,24 @@
    -

    In this variant of the estimation strategy for higher dimensions, we will consider all mode vectors $\bf k$ describing Legendre polynomials $\widetilde P_{\bf k}$ and perform one least-squares fit over all coefficients at once. If there are multiple coefficients corresponding to the same absolute value of modes $\|{\bf k}\|_1$, we take the maximum among those. Thus, the least-squares fit is performed on

    -\begin{eqnarray*}
+<p>In this variant of the estimation strategy for higher dimensions, we will consider all mode vectors <picture><source srcset=$\bf k$ describing Legendre polynomials $\widetilde P_{\bf k}$ and perform one least-squares fit over all coefficients at once. If there are multiple coefficients corresponding to the same absolute value of modes $\|{\bf k}\|_1$, we take the maximum among those. Thus, the least-squares fit is performed on

    +\begin{eqnarray*}
   \widetilde P_{\bf k}({\bf x}) &=&
     \widetilde P_{k_1} (x_1) \ldots \widetilde P_{k_d} (x_d) \\
   \ln \left( \max\limits_{\|{\bf k}\|_1} |a_{\bf k}| \right) &\sim&
     C - \sigma \|{\bf k}\|_1
-\end{eqnarray*} +\end{eqnarray*}" src="form_2251.png"/>

    -

    for ${\bf k}=(k_1,\ldots,k_d)$ and $k_i=0,\ldots,p$, with $p$ the polynomial degree of the finite element.

    +

    for ${\bf k}=(k_1,\ldots,k_d)$ and $k_i=0,\ldots,p$, with $p$ the polynomial degree of the finite element.

    For a finite element approximation solution, this function writes the decay rate for every cell into the output vector smoothness_indicators.

    Parameters
    - + - - + +
    [in]fe_legendreFESeries::Legendre object to calculate coefficients. This object needs to be initialized to have at least $p+1$ coefficients in each direction for every finite element in the collection, where $p$ is its polynomial degree.
    [in]fe_legendreFESeries::Legendre object to calculate coefficients. This object needs to be initialized to have at least $p+1$ coefficients in each direction for every finite element in the collection, where $p$ is its polynomial degree.
    [in]dof_handlerA DoFHandler.
    [in]solutionA solution vector.
    [out]smoothness_indicatorsA vector for smoothness indicators.
    [in]regression_strategyDetermines which norm will be used on the subset of coefficients $\mathbf{k}$ with the same absolute value $\|{\bf k}\|_1$. Default is VectorTools::Linfty_norm for a maximum approximation.
    [in]smallest_abs_coefficientThe smallest absolute value of the coefficient to be used in linear regression. Note that Legendre coefficients of some functions may have a repeating pattern of zero coefficients (i.e. for functions that are locally symmetric or antisymmetric about the midpoint of the element in any coordinate direction). Thus this parameters allows to ignore small (in absolute value) coefficients within the linear regression fit. In case there are less than two nonzero coefficients, the returned value for this cell will be $\sigma=\infty$.
    [in]regression_strategyDetermines which norm will be used on the subset of coefficients $\mathbf{k}$ with the same absolute value $\|{\bf k}\|_1$. Default is VectorTools::Linfty_norm for a maximum approximation.
    [in]smallest_abs_coefficientThe smallest absolute value of the coefficient to be used in linear regression. Note that Legendre coefficients of some functions may have a repeating pattern of zero coefficients (i.e. for functions that are locally symmetric or antisymmetric about the midpoint of the element in any coordinate direction). Thus this parameters allows to ignore small (in absolute value) coefficients within the linear regression fit. In case there are less than two nonzero coefficients, the returned value for this cell will be $\sigma=\infty$.
    [in]only_flagged_cellsSmoothness indicators are usually used to decide whether to perform h- or p-adaptation. So in most cases, we only need to calculate those indicators on cells flagged for refinement or coarsening. This parameter controls whether this particular subset or all cells will be considered. By default, all cells will be considered. When only flagged cells are supposed to be considered, smoothness indicators will only be set on those vector entries of flagged cells; the others will be set to a signaling NaN.
    @@ -259,16 +259,16 @@
    -

    In this variant of the estimation strategy for higher dimensions, we only consider modes in each coordinate direction, i.e., only mode vectors $\bf k$ with one nonzero entry. We perform the least-squares fit in each coordinate direction separately and take the lowest decay rate $\sigma$ among them.

    +

    In this variant of the estimation strategy for higher dimensions, we only consider modes in each coordinate direction, i.e., only mode vectors $\bf k$ with one nonzero entry. We perform the least-squares fit in each coordinate direction separately and take the lowest decay rate $\sigma$ among them.

    For a finite element approximation solution, this function writes the decay rate for every cell into the output vector smoothness_indicators.

    Parameters
    - + - - + +
    [in]fe_legendreFESeries::Legendre object to calculate coefficients. This object needs to be initialized to have at least $p+1$ coefficients in each direction, where $p$ is the maximum polynomial degree to be used.
    [in]fe_legendreFESeries::Legendre object to calculate coefficients. This object needs to be initialized to have at least $p+1$ coefficients in each direction, where $p$ is the maximum polynomial degree to be used.
    [in]dof_handlerA DoFHandler
    [in]solutionA solution vector
    [out]smoothness_indicatorsA vector for smoothness indicators
    [in]coefficients_predicateA predicate to select Legendre coefficients $a_j$, $j=0,\ldots,p$ for linear regression in each coordinate direction. The user is responsible for updating the vector of flags provided to this function. Note that its size is $p+1$, where $p$ is the polynomial degree of the FE basis on a given element. The default implementation will use all Legendre coefficients in each coordinate direction, i.e. set all elements of the vector to true.
    [in]smallest_abs_coefficientThe smallest absolute value of the coefficient to be used in linear regression in each coordinate direction. Note that Legendre coefficients of some functions may have a repeating pattern of zero coefficients (i.e. for functions that are locally symmetric or antisymmetric about the midpoint of the element in any coordinate direction). Thus this parameters allows to ignore small (in absolute value) coefficients within the linear regression fit. In case there are less than two nonzero coefficients for a coordinate direction, this direction will be skipped. If all coefficients are zero, the returned value for this cell will be $\sigma=\infty$.
    [in]coefficients_predicateA predicate to select Legendre coefficients $a_j$, $j=0,\ldots,p$ for linear regression in each coordinate direction. The user is responsible for updating the vector of flags provided to this function. Note that its size is $p+1$, where $p$ is the polynomial degree of the FE basis on a given element. The default implementation will use all Legendre coefficients in each coordinate direction, i.e. set all elements of the vector to true.
    [in]smallest_abs_coefficientThe smallest absolute value of the coefficient to be used in linear regression in each coordinate direction. Note that Legendre coefficients of some functions may have a repeating pattern of zero coefficients (i.e. for functions that are locally symmetric or antisymmetric about the midpoint of the element in any coordinate direction). Thus this parameters allows to ignore small (in absolute value) coefficients within the linear regression fit. In case there are less than two nonzero coefficients for a coordinate direction, this direction will be skipped. If all coefficients are zero, the returned value for this cell will be $\sigma=\infty$.
    [in]only_flagged_cellsSmoothness indicators are usually used to decide whether to perform h- or p-adaptation. So in most cases, we only need to calculate those indicators on cells flagged for refinement or coarsening. This parameter controls whether this particular subset or all cells will be considered. By default, all cells will be considered. When only flagged cells are supposed to be considered, smoothness indicators will only be set on those vector entries of flagged cells; the others will be set to NaN.
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceSparseMatrixTools.html differs (JavaScript source, ASCII text, with very long lines (622)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceSparseMatrixTools.html 2023-11-25 15:26:00.929868936 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceSparseMatrixTools.html 2023-11-25 15:26:00.929868936 +0100 @@ -149,18 +149,18 @@

    Given a sparse matrix (system_matrix, sparsity_pattern), construct a new sparse matrix (system_matrix_out, sparsity_pattern_out) by restriction

    -\[
+<picture><source srcset=\[
  A_i = R_i A R_i^T,
-\] +\]" src="form_1926.png"/>

    -

    where the Boolean matrix $R_i$ is defined by the entries of requested_is.

    -

    The function can be called by multiple processes with different sets of indices, allowing to assign each process a different $A_i$.

    +

    where the Boolean matrix $R_i$ is defined by the entries of requested_is.

    +

    The function can be called by multiple processes with different sets of indices, allowing to assign each process a different $A_i$.

    Such a function is useful to implement Schwarz methods, where operations of type

    -\[
+<picture><source srcset=\[
  u^{n} = u^{n-1} + \sum_{i} R_i^T A_i^{-1} R_i (f - A u^{n-1})
-\] +\]" src="form_1928.png"/>

    -

    are performed to iteratively solve a system of type $Au=f$.

    +

    are performed to iteratively solve a system of type $Au=f$.

    Warning
    This is a collective call that needs to be executed by all processes in the communicator of sparse_matrix_in.
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceTensorAccessors.html differs (JavaScript source, ASCII text, with very long lines (489)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceTensorAccessors.html 2023-11-25 15:26:00.939868733 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceTensorAccessors.html 2023-11-25 15:26:00.939868733 +0100 @@ -175,7 +175,7 @@
    Note
    This function returns an internal class object consisting of an array subscript operator operator[](unsigned int) and an alias value_type describing its return value.
    Template Parameters
    - +
    indexThe index to be shifted to the end. Indices are counted from 0, thus the valid range is $0\le\text{index}<\text{rank}$.
    indexThe index to be shifted to the end. Indices are counted from 0, thus the valid range is $0\le\text{index}<\text{rank}$.
    rankRank of the tensorial object t
    TA tensorial object of rank rank. T must provide a local alias value_type and an index operator operator[]() that returns a (const or non-const) reference of value_type.
    @@ -280,12 +280,12 @@

    This function contracts two tensorial objects left and right and stores the result in result. The contraction is done over the last no_contr indices of both tensorial objects:

    -\[
+<picture><source srcset=\[
   \text{result}_{i_1,..,i_{r1},j_1,..,j_{r2}}
   = \sum_{k_1,..,k_{\mathrm{no\_contr}}}
     \mathrm{left}_{i_1,..,i_{r1},k_1,..,k_{\mathrm{no\_contr}}}
     \mathrm{right}_{j_1,..,j_{r2},k_1,..,k_{\mathrm{no\_contr}}}
-\] +\]" src="form_865.png"/>

    Calling this function is equivalent of writing the following low level code:

    for(unsigned int i_0 = 0; i_0 < dim; ++i_0)
    ...
    @@ -351,12 +351,12 @@

    Full contraction of three tensorial objects:

    -\[
+<picture><source srcset=\[
   \sum_{i_1,..,i_{r1},j_1,..,j_{r2}}
   \text{left}_{i_1,..,i_{r1}}
   \text{middle}_{i_1,..,i_{r1},j_1,..,j_{r2}}
   \text{right}_{j_1,..,j_{r2}}
-\] +\]" src="form_866.png"/>

    Calling this function is equivalent of writing the following low level code:

    T1 result = T1();
    for(unsigned int i_0 = 0; i_0 < dim; ++i_0)
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceUtilities_1_1LinearAlgebra.html differs (JavaScript source, ASCII text, with very long lines (2082)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceUtilities_1_1LinearAlgebra.html 2023-11-25 15:26:00.953201793 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceUtilities_1_1LinearAlgebra.html 2023-11-25 15:26:00.953201793 +0100 @@ -139,8 +139,8 @@

    Return the elements of a continuous Givens rotation matrix and the norm of the input vector.

    -

    That is for a given pair x and y, return $c$ , $s$ and $\sqrt{x^2+y^2}$ such that

    -\[
+<p>That is for a given pair <code>x</code> and <code>y</code>, return <picture><source srcset=$c$ , $s$ and $\sqrt{x^2+y^2}$ such that

    +\[
 \begin{bmatrix}
 c  & s \\
 -s & c
@@ -154,7 +154,7 @@
 \sqrt{x^2+y^2} \\
 0
 \end{bmatrix}
-\] +\]" src="form_1964.png"/>

    Note
    The function is implemented for real valued numbers only.
    @@ -188,8 +188,8 @@

    Return the elements of a hyperbolic rotation matrix.

    -

    That is for a given pair x and y, return $c$ , $s$ and $r$ such that

    -\[
+<p>That is for a given pair <code>x</code> and <code>y</code>, return <picture><source srcset=$c$ , $s$ and $r$ such that

    +\[
 \begin{bmatrix}
 c  & -s \\
 -s & c
@@ -203,9 +203,9 @@
 r \\
 0
 \end{bmatrix}
-\] +\]" src="form_1965.png"/>

    -

    Real valued solution only exists if $|x|>|g|$, the function will throw an error otherwise.

    +

    Real valued solution only exists if $|x|>|g|$, the function will throw an error otherwise.

    Note
    The function is implemented for real valued numbers only.
    @@ -253,7 +253,7 @@
    -

    Estimate an upper bound for the largest eigenvalue of H by a k -step Lanczos process starting from the initial vector v0. Typical values of k are below 10. This estimator computes a k-step Lanczos decomposition $H V_k=V_k T_k+f_k e_k^T$ where $V_k$ contains k Lanczos basis, $V_k^TV_k=I_k$, $T_k$ is the tridiagonal Lanczos matrix, $f_k$ is a residual vector $f_k^TV_k=0$, and $e_k$ is the k-th canonical basis of $R^k$. The returned value is $ ||T_k||_2 + ||f_k||_2$. If eigenvalues is not nullptr, the eigenvalues of $T_k$ will be written there.

    +

    Estimate an upper bound for the largest eigenvalue of H by a k -step Lanczos process starting from the initial vector v0. Typical values of k are below 10. This estimator computes a k-step Lanczos decomposition $H V_k=V_k T_k+f_k e_k^T$ where $V_k$ contains k Lanczos basis, $V_k^TV_k=I_k$, $T_k$ is the tridiagonal Lanczos matrix, $f_k$ is a residual vector $f_k^TV_k=0$, and $e_k$ is the k-th canonical basis of $R^k$. The returned value is $ ||T_k||_2 + ||f_k||_2$. If eigenvalues is not nullptr, the eigenvalues of $T_k$ will be written there.

    vector_memory is used to allocate memory for temporary vectors. OperatorType has to provide vmult operation with VectorType.

    This function implements the algorithm from

    @article{Zhou2006,
    Title = {Self-consistent-field Calculations Using Chebyshev-filtered
    @@ -265,7 +265,7 @@
    Volume = {219},
    Pages = {172--184},
    }
    -
    Note
    This function uses Lapack routines to compute the largest eigenvalue of $T_k$.
    +
    Note
    This function uses Lapack routines to compute the largest eigenvalue of $T_k$.
    This function provides an alternate estimate to that obtained from several steps of SolverCG with SolverCG<VectorType>::connect_eigenvalues_slot().
    @@ -320,19 +320,19 @@
    -

    Apply Chebyshev polynomial of the operator H to x. For a non-defective operator $H$ with a complete set of eigenpairs $H \psi_i = \lambda_i \psi_i$, the action of a polynomial filter $p$ is given by $p(H)x =\sum_i a_i p(\lambda_i) \psi_i$, where $x=: \sum_i a_i
-\psi_i$. Thus by appropriately choosing the polynomial filter, one can alter the eigenmodes contained in $x$.

    -

    This function uses Chebyshev polynomials of first kind. Below is an example of polynomial $T_n(x)$ of degree $n=8$ normalized to unity at $-1.2$.

    +

    Apply Chebyshev polynomial of the operator H to x. For a non-defective operator $H$ with a complete set of eigenpairs $H \psi_i = \lambda_i \psi_i$, the action of a polynomial filter $p$ is given by $p(H)x =\sum_i a_i p(\lambda_i) \psi_i$, where $x=: \sum_i a_i
+\psi_i$. Thus by appropriately choosing the polynomial filter, one can alter the eigenmodes contained in $x$.

    +

    This function uses Chebyshev polynomials of first kind. Below is an example of polynomial $T_n(x)$ of degree $n=8$ normalized to unity at $-1.2$.

    -

    By introducing a linear mapping $L$ from unwanted_spectrum to $[-1,1]$, we can dump the corresponding modes in x. The higher the polynomial degree $n$, the more rapid it grows outside of the $[-1,1]$. In order to avoid numerical overflow, we normalize polynomial filter to unity at tau. Thus, the filtered operator is $p(H) = T_n(L(H))/T_n(L(\tau))$.

    -

    The action of the Chebyshev filter only requires evaluation of vmult() of H and is based on the recursion equation for Chebyshev polynomial of degree $n$: $T_{n}(x) = 2x T_{n-1}(x) - T_{n-2}(x)$ with $T_0(x)=1$ and $T_1(x)=x$.

    +

    By introducing a linear mapping $L$ from unwanted_spectrum to $[-1,1]$, we can dump the corresponding modes in x. The higher the polynomial degree $n$, the more rapid it grows outside of the $[-1,1]$. In order to avoid numerical overflow, we normalize polynomial filter to unity at tau. Thus, the filtered operator is $p(H) = T_n(L(H))/T_n(L(\tau))$.

    +

    The action of the Chebyshev filter only requires evaluation of vmult() of H and is based on the recursion equation for Chebyshev polynomial of degree $n$: $T_{n}(x) = 2x T_{n-1}(x) - T_{n-2}(x)$ with $T_0(x)=1$ and $T_1(x)=x$.

    vector_memory is used to allocate memory for temporary objects.

    -

    This function implements the algorithm (with a minor fix of sign of $\sigma_1$) from

    @article{Zhou2014,
    +

    This function implements the algorithm (with a minor fix of sign of $\sigma_1$) from

    @article{Zhou2014,
    Title = {Chebyshev-filtered subspace iteration method free of sparse
    diagonalization for solving the Kohn--Sham equation},
    Author = {Zhou, Yunkai and Chelikowsky, James R and Saad, Yousef},
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceUtilities_1_1MPI_1_1ConsensusAlgorithms.html differs (JavaScript source, ASCII text, with very long lines (1407)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceUtilities_1_1MPI_1_1ConsensusAlgorithms.html 2023-11-25 15:26:00.966534857 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceUtilities_1_1MPI_1_1ConsensusAlgorithms.html 2023-11-25 15:26:00.966534857 +0100 @@ -135,7 +135,7 @@

    Detailed Description

    A namespace for algorithms that implement the task of communicating in a dynamic-sparse way. In computer science, this is often called a consensus problem.

    -

    The problem consensus algorithms are trying to solve is this: Let's say you have $P$ processes that work together via MPI. Each (or at least some) of these want to send information to some of the other processes, or request information from other processes. No process knows which other process wants to communicate with them. The challenge is to determine who needs to talk to whom and what information needs to be sent, and to come up with an algorithm that ensures that this communication happens.

    +

    The problem consensus algorithms are trying to solve is this: Let's say you have $P$ processes that work together via MPI. Each (or at least some) of these want to send information to some of the other processes, or request information from other processes. No process knows which other process wants to communicate with them. The challenge is to determine who needs to talk to whom and what information needs to be sent, and to come up with an algorithm that ensures that this communication happens.

    That this is not a trivial problem can be seen by an analogy of the postal service. There, some senders may request information from some other participants in the postal service. So they send a letter that requests the information, but the recipients do not know how many such letters they need to expect (or that they should expect any at all). They also do not know how long they need to keep checking their mailbox for incoming requests. The recipients can be considered reliable, however: We can assume that everyone who is sent a request puts a letter with the answer in the mail. This time at least the recipients of these answers know that they are waiting for these answers because they have previously sent a request. They do not know in advance, however, when the answer will arrive and how long to wait. The goal of a consensus algorithm is then to come up with a strategy in which every participant can say who they want to send requests to, what that request is, and is then guaranteed an answer. The algorithm will only return when all requests by all participants have been answered and the answer delivered to the requesters.

    The problem is generally posed in terms of requests and answers. In practice, either of these two may be empty messages. For example, processes may simply want to send information to others that they know these others need; in this case, the "answer" message may be empty and its meaning is simply an affirmation that the information was received. Similarly, in some cases processes simply need to inform others that they want information, but the destination process knows what information is being requested (based on where in the program the request happens) and can send that information without there be any identifying information in the request; in that case, the request message may be empty and simply serve to identify the requester. (Each message can be queried for its sender.)

    As mentioned in the first paragraph, the algorithms we are interested in are "dynamic-sparse":

      /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceVectorTools.html differs (JavaScript source, ASCII text, with very long lines (1577)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceVectorTools.html 2023-11-25 15:26:01.016533842 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceVectorTools.html 2023-11-25 15:26:01.016533842 +0100 @@ -321,7 +321,7 @@

      Detailed Description

      Provide a namespace which offers some operations on vectors. Among these are assembling of standard vectors, integration of the difference of a finite element solution and a continuous function, interpolations and projections of continuous functions to the finite element space and other operations.

      -
      Note
      There exist two versions of almost all functions, one that takes an explicit Mapping argument and one that does not. The second one generally calls the first with an implicit $Q_1$ argument (i.e., with an argument of kind MappingQ(1)). If your intend your code to use a different mapping than a (bi-/tri-)linear one, then you need to call the functions with mapping argument should be used.
      +
      Note
      There exist two versions of almost all functions, one that takes an explicit Mapping argument and one that does not. The second one generally calls the first with an implicit $Q_1$ argument (i.e., with an argument of kind MappingQ(1)). If your intend your code to use a different mapping than a (bi-/tri-)linear one, then you need to call the functions with mapping argument should be used.

      Description of operations

      This collection of methods offers the following operations:

      • @@ -330,7 +330,7 @@

      • -

        Projection: compute the L2-projection of the given function onto the finite element space, i.e. if f is the function to be projected, compute fh in Vh such that (fh,vh)=(f,vh) for all discrete test functions vh. This is done through the solution of the linear system of equations M v = f where M is the mass matrix $m_{ij} = \int_\Omega \phi_i(x) \phi_j(x) dx$ and $f_i = \int_\Omega f(x) \phi_i(x) dx$. The solution vector $v$ then is the nodal representation of the projection fh. The project() functions are used in the step-21 and step-23 tutorial programs.

        +

        Projection: compute the L2-projection of the given function onto the finite element space, i.e. if f is the function to be projected, compute fh in Vh such that (fh,vh)=(f,vh) for all discrete test functions vh. This is done through the solution of the linear system of equations M v = f where M is the mass matrix $m_{ij} = \int_\Omega \phi_i(x) \phi_j(x) dx$ and $f_i = \int_\Omega f(x) \phi_i(x) dx$. The solution vector $v$ then is the nodal representation of the projection fh. The project() functions are used in the step-21 and step-23 tutorial programs.

        In order to get proper results, it be may necessary to treat boundary conditions right. Below are listed some cases where this may be needed. If needed, this is done by L2-projection of the trace of the given function onto the finite element space restricted to the boundary of the domain, then taking this information and using it to eliminate the boundary nodes from the mass matrix of the whole domain, using the MatrixTools::apply_boundary_values() function. The projection of the trace of the function to the boundary is done with the VectorTools::project_boundary_values() (see below) function, which is called with a map of boundary functions std::map<types::boundary_id, const Function<spacedim,number>*> in which all boundary indicators from zero to numbers::internal_face_boundary_id-1 (numbers::internal_face_boundary_id is used for other purposes, see the Triangulation class documentation) point to the function to be projected. The projection to the boundary takes place using a second quadrature formula on the boundary given to the project() function. The first quadrature formula is used to compute the right hand side and for numerical quadrature of the mass matrix.

        The projection of the boundary values first, then eliminating them from the global system of equations is not needed usually. It may be necessary if you want to enforce special restrictions on the boundary values of the projected function, for example in time dependent problems: you may want to project the initial values but need consistency with the boundary values for later times. Since the latter are projected onto the boundary in each time step, it is necessary that we also project the boundary values of the initial values, before projecting them to the whole domain.

        Obviously, the results of the two schemes for projection are different. Usually, when projecting to the boundary first, the L2-norm of the difference between original function and projection over the whole domain will be larger (factors of five have been observed) while the L2-norm of the error integrated over the boundary should of course be less. The reverse should also hold if no projection to the boundary is performed.

        @@ -340,17 +340,17 @@

      • -

        Creation of right hand side vectors: The create_right_hand_side() function computes the vector $f_i = \int_\Omega f(x) \phi_i(x) dx$. This is the same as what the MatrixCreator::create_* functions which take a right hand side do, but without assembling a matrix.

        +

        Creation of right hand side vectors: The create_right_hand_side() function computes the vector $f_i = \int_\Omega f(x) \phi_i(x) dx$. This is the same as what the MatrixCreator::create_* functions which take a right hand side do, but without assembling a matrix.

      • -

        Creation of right hand side vectors for point sources: The create_point_source_vector() function computes the vector $F_i =
-\int_\Omega \delta(x-x_0) \phi_i(x) dx$.

        +

        Creation of right hand side vectors for point sources: The create_point_source_vector() function computes the vector $F_i =
+\int_\Omega \delta(x-x_0) \phi_i(x) dx$.

      • -

        Creation of boundary right hand side vectors: The create_boundary_right_hand_side() function computes the vector $f_i =
-\int_{\partial\Omega} g(x) \phi_i(x) dx$. This is the right hand side contribution of boundary forces when having inhomogeneous Neumann boundary values in Laplace's equation or other second order operators. This function also takes an optional argument denoting over which parts of the boundary the integration shall extend. If the default argument is used, it is applied to all boundaries.

        +

        Creation of boundary right hand side vectors: The create_boundary_right_hand_side() function computes the vector $f_i =
+\int_{\partial\Omega} g(x) \phi_i(x) dx$. This is the right hand side contribution of boundary forces when having inhomogeneous Neumann boundary values in Laplace's equation or other second order operators. This function also takes an optional argument denoting over which parts of the boundary the integration shall extend. If the default argument is used, it is applied to all boundaries.

      • @@ -393,220 +393,220 @@
      -

      Denote which norm/integral is to be computed by the integrate_difference() function on each cell and compute_global_error() for the whole domain. Let $f:\Omega \rightarrow \mathbb{R}^c$ be a finite element function with $c$ components where component $c$ is denoted by $f_c$ and $\hat{f}$ be the reference function (the fe_function and exact_solution arguments to integrate_difference()). Let $e_c = \hat{f}_c - f_c$ be the difference or error between the two. Further, let $w:\Omega \rightarrow \mathbb{R}^c$ be the weight function of integrate_difference(), which is assumed to be equal to one if not supplied. Finally, let $p$ be the exponent argument (for $L_p$-norms).

      -

      In the following,we denote by $E_K$ the local error computed by integrate_difference() on cell $K$, whereas $E$ is the global error computed by compute_global_error(). Note that integrals are approximated by quadrature in the usual way:

      -\[
+<p>Denote which norm/integral is to be computed by the <a class=integrate_difference() function on each cell and compute_global_error() for the whole domain. Let $f:\Omega \rightarrow \mathbb{R}^c$ be a finite element function with $c$ components where component $c$ is denoted by $f_c$ and $\hat{f}$ be the reference function (the fe_function and exact_solution arguments to integrate_difference()). Let $e_c = \hat{f}_c - f_c$ be the difference or error between the two. Further, let $w:\Omega \rightarrow \mathbb{R}^c$ be the weight function of integrate_difference(), which is assumed to be equal to one if not supplied. Finally, let $p$ be the exponent argument (for $L_p$-norms).

      +

      In the following,we denote by $E_K$ the local error computed by integrate_difference() on cell $K$, whereas $E$ is the global error computed by compute_global_error(). Note that integrals are approximated by quadrature in the usual way:

      +\[
 \int_A f(x) dx \approx \sum_q f(x_q) \omega_q.
-\] +\]" src="form_2308.png"/>

      -

      Similarly for suprema over a cell $T$:

      -\[
+<p> Similarly for suprema over a cell <picture><source srcset=$T$:

      +\[
 \sup_{x\in T} |f(x)| dx \approx \max_q |f(x_q)|.
-\] +\]" src="form_2309.png"/>

      -
      Enumerator
      mean 

      The function or difference of functions is integrated on each cell $K$:

      -\[
+<picture><source srcset=\[
   E_K
 = \int_K \sum_c (\hat{f}_c - f_c) \, w_c
 = \int_K \sum_c e_c \, w_c
-\] +\]" src="form_2310.png"/>

      and summed up to get

      -\[
+<picture><source srcset=\[
   E = \sum_K E_K
     = \int_\Omega \sum_c (\hat{f}_c - f_c) \, w_c
-\] +\]" src="form_2311.png"/>

      -

      or, for $w \equiv 1$:

      -\[
+<p> or, for <picture><source srcset=$w \equiv 1$:

      +\[
   E = \int_\Omega (\hat{f} - f)
     = \int_\Omega e.
-\] +\]" src="form_2313.png"/>

      -

      Note: This differs from what is typically known as the mean of a function by a factor of $\frac{1}{|\Omega|}$. To compute the mean you can also use compute_mean_value(). Finally, pay attention to the sign: if $\hat{f}=0$, this will compute the negative of the mean of $f$.

      +

      Note: This differs from what is typically known as the mean of a function by a factor of $\frac{1}{|\Omega|}$. To compute the mean you can also use compute_mean_value(). Finally, pay attention to the sign: if $\hat{f}=0$, this will compute the negative of the mean of $f$.

      L1_norm 

      The absolute value of the function is integrated:

      -\[
+<picture><source srcset=\[
   E_K = \int_K \sum_c |e_c| \, w_c
-\] +\]" src="form_2316.png"/>

      and

      -\[
+<picture><source srcset=\[
   E = \sum_K E_K = \int_\Omega \sum_c |e_c| w_c,
-\] +\]" src="form_2317.png"/>

      -

      or, for $w \equiv 1$:

      -\[
+<p> or, for <picture><source srcset=$w \equiv 1$:

      +\[
   E  = \| e \|_{L^1}.
-\] +\]" src="form_2318.png"/>

      L2_norm 

      The square of the function is integrated and the square root of the result is computed on each cell:

      -\[
+<picture><source srcset=\[
   E_K = \sqrt{ \int_K \sum_c e_c^2 \, w_c }
-\] +\]" src="form_2319.png"/>

      and

      -\[
+<picture><source srcset=\[
   E = \sqrt{\sum_K E_K^2} = \sqrt{ \int_\Omega  \sum_c e_c^2 \, w_c }
-\] +\]" src="form_2320.png"/>

      -

      or, for $w \equiv 1$:

      -\[
+<p> or, for <picture><source srcset=$w \equiv 1$:

      +\[
   E = \sqrt{ \int_\Omega e^2 }
     = \| e \|_{L^2}
-\] +\]" src="form_2321.png"/>

      Lp_norm 

      The absolute value to the $p$-th power is integrated and the $p$-th root is computed on each cell. The exponent $p$ is the exponent argument of integrate_difference() and compute_mean_value():

      -\[
+<tr><td class=Lp_norm 

      The absolute value to the $p$-th power is integrated and the $p$-th root is computed on each cell. The exponent $p$ is the exponent argument of integrate_difference() and compute_mean_value():

      +\[
   E_K = \left( \int_K \sum_c |e_c|^p \, w_c \right)^{1/p}
-\] +\]" src="form_2322.png"/>

      and

      -\[
+<picture><source srcset=\[
   E = \left( \sum_K E_K^p \right)^{1/p}
-\] +\]" src="form_2323.png"/>

      -

      or, for $w \equiv 1$:

      -\[
+<p> or, for <picture><source srcset=$w \equiv 1$:

      +\[
   E = \| e \|_{L^p}.
-\] +\]" src="form_2324.png"/>

      Linfty_norm 

      The maximum absolute value of the function:

      -\[
+<picture><source srcset=\[
   E_K = \sup_K \max_c |e_c| \, w_c
-\] +\]" src="form_2325.png"/>

      and

      -\[
+<picture><source srcset=\[
   E = \max_K E_K
 = \sup_\Omega \max_c |e_c| \, w_c
-\] +\]" src="form_2326.png"/>

      -

      or, for $w \equiv 1$:

      -\[
+<p> or, for <picture><source srcset=$w \equiv 1$:

      +\[
   E  = \sup_\Omega \|e\|_\infty = \| e \|_{L^\infty}.
-\] +\]" src="form_2327.png"/>

      H1_seminorm 

      L2_norm of the gradient:

      -\[
+<picture><source srcset=\[
   E_K = \sqrt{ \int_K \sum_c (\nabla e_c)^2 \, w_c }
-\] +\]" src="form_2328.png"/>

      and

      -\[
+<picture><source srcset=\[
/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespacehp_1_1Refinement.html differs (JavaScript source, ASCII text, with very long lines (1719))
--- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespacehp_1_1Refinement.html	2023-11-25 15:26:01.033200170 +0100
+++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespacehp_1_1Refinement.html	2023-11-25 15:26:01.033200170 +0100
@@ -581,9 +581,9 @@
         </tr>
       </table>
 </div><div class= -

      Predict how the current error_indicators will adapt after refinement and coarsening were to happen on the provided dof_handler, and write its results to predicted_errors. Each entry of error_indicators and predicted_errors corresponds to an active cell on the underlying Triangulation, thus each container has to be of size Triangulation::n_active_cells(). The errors are interpreted to be measured in the energy norm; this assumption enters the rate of convergence that is used in the prediction. The $l_2$-norm of the output argument predicted_errors corresponds to the predicted global error after adaptation.

      +

      Predict how the current error_indicators will adapt after refinement and coarsening were to happen on the provided dof_handler, and write its results to predicted_errors. Each entry of error_indicators and predicted_errors corresponds to an active cell on the underlying Triangulation, thus each container has to be of size Triangulation::n_active_cells(). The errors are interpreted to be measured in the energy norm; this assumption enters the rate of convergence that is used in the prediction. The $l_2$-norm of the output argument predicted_errors corresponds to the predicted global error after adaptation.

      For p-adaptation, the local error is expected to converge exponentially with the polynomial degree of the assigned finite element. Each increase or decrease of the degree will thus change its value by a user-defined control parameter gamma_p.

      -

      For h-adaptation, we expect the local error $\eta_K$ on cell $K$ to be proportional to $(h_K)^{p_K}$ in the energy norm, where $h_K$ denotes the cell diameter and $p_K$ the polynomial degree of the currently assigned finite element on cell $K$.

      +

      For h-adaptation, we expect the local error $\eta_K$ on cell $K$ to be proportional to $(h_K)^{p_K}$ in the energy norm, where $h_K$ denotes the cell diameter and $p_K$ the polynomial degree of the currently assigned finite element on cell $K$.

      During h-coarsening, the finite elements on siblings may be different, and their parent cell will be assigned to their least dominating finite element that belongs to its most general child. Thus, we will always interpolate on an enclosing finite element space. Additionally assuming that the finite elements on the cells to be coarsened are sufficient to represent the solution correctly (e.g. at least quadratic basis functions for a quadratic solution), we are confident to say that the error will not change by sole interpolation on the larger finite element space.

      For p-adaptation, the local error is expected to converge exponentially with the polynomial degree of the assigned finite element. Each increase or decrease of the degree will thus change its value by a user-defined control parameter gamma_p. The assumption of exponential convergence is only valid if both h- and p-adaptive methods are combined in a sense that they are both utilized throughout a mesh, but do not have to be applied both on a cell simultaneously.

      The prediction algorithm is formulated as follows with control parameters gamma_p, gamma_h and gamma_n that may be used to influence prediction for each adaptation type individually. The results for each individual cell are stored in the predicted_errors output argument.

      @@ -605,7 +605,7 @@ \gamma_\text{p}^{(p_{K,\text{future}} - p_{K})}$" src="form_1500.png"/>

      On basis of the refinement history, we use the predicted error estimates to decide how cells will be adapted in the next adaptation step. Comparing the predicted error from the previous adaptation step to the error estimates of the current step allows us to justify whether our previous choice of adaptation was justified, and lets us decide how to adapt in the next one.

      -

      We thus have to transfer the predicted error from the old to the adapted mesh. When transferring the predicted error to the adapted mesh, make sure to configure your CellDataTransfer object with AdaptationStrategies::Refinement::l2_norm() as a refinement strategy and AdaptationStrategies::Coarsening::l2_norm() as a coarsening strategy. This ensures that the $l_2$-norm of the predict errors is preserved on both meshes.

      +

      We thus have to transfer the predicted error from the old to the adapted mesh. When transferring the predicted error to the adapted mesh, make sure to configure your CellDataTransfer object with AdaptationStrategies::Refinement::l2_norm() as a refinement strategy and AdaptationStrategies::Coarsening::l2_norm() as a coarsening strategy. This ensures that the $l_2$-norm of the predict errors is preserved on both meshes.

      In this context, we assume that the local error on a cell to be h-refined will be divided equally on all of its $n_{K_c}$ children, whereas local errors on siblings will be summed up on the parent cell in case of h-coarsening. This assumption is often not satisfied in practice: For example, if a cell is at a corner singularity, then the one child cell that ends up closest to the singularity will inherit the majority of the remaining error – but this function can not know where the singularity will be, and consequently assumes equal distribution.

      Incorporating the transfer from the old to the adapted mesh, the complete error prediction algorithm reads as follows:

      /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceinternal.html differs (JavaScript source, ASCII text, with very long lines (831)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceinternal.html 2023-11-25 15:26:01.079865887 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceinternal.html 2023-11-25 15:26:01.079865887 +0100 @@ -857,8 +857,8 @@

      Creates a (dim + 1)-dimensional point by copying over the coordinates of the incoming dim-dimensional point and setting the "missing" (dim + 1)-dimensional component to the incoming coordinate value.

      -

      For example, given the input $\{(x, y), 2, z \}$ this function creates the point $(x, y, z)$.

      -

      The coordinates of the dim-dimensional point are written to the coordinates of the (dim + 1)-dimensional point in the order of the convention given by the function coordinate_to_one_dim_higher. Thus, the order of coordinates on the lower-dimensional point are not preserved: $\{(z, x), 1, y \}$ creates the point $(x, y, z)$.

      +

      For example, given the input $\{(x, y), 2, z \}$ this function creates the point $(x, y, z)$.

      +

      The coordinates of the dim-dimensional point are written to the coordinates of the (dim + 1)-dimensional point in the order of the convention given by the function coordinate_to_one_dim_higher. Thus, the order of coordinates on the lower-dimensional point are not preserved: $\{(z, x), 1, y \}$ creates the point $(x, y, z)$.

      Definition at line 24 of file function_restriction.cc.

      @@ -1959,7 +1959,7 @@
      -

      Compute the polynomial interpolation of a tensor product shape function $\varphi_i$ given a vector of coefficients $u_i$ in the form $u_h(\mathbf{x}) = \sum_{i=1}^{k^d} \varphi_i(\mathbf{x}) u_i$. The shape functions $\varphi_i(\mathbf{x}) =
+<p>Compute the polynomial interpolation of a tensor product shape function <picture><source srcset=$\varphi_i$ given a vector of coefficients $u_i$ in the form $u_h(\mathbf{x}) = \sum_{i=1}^{k^d} \varphi_i(\mathbf{x}) u_i$. The shape functions $\varphi_i(\mathbf{x}) =
 \prod_{d=1}^{\text{dim}}\varphi_{i_d}^\text{1d}(x_d)$ represent a tensor product. The function returns a pair with the value of the interpolation as the first component and the gradient in reference coordinates as the second component. Note that for compound types (e.g. the values field begin a Point<spacedim> argument), the components of the gradient are sorted as Tensor<1, dim, Tensor<1, spacedim>> with the derivatives as the first index; this is a consequence of the generic arguments in the function.

      Parameters
      /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceparallel.html differs (JavaScript source, ASCII text, with very long lines (1089)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceparallel.html 2023-11-25 15:26:01.093198949 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/namespaceparallel.html 2023-11-25 15:26:01.093198949 +0100 @@ -356,7 +356,7 @@

      This function applies the given function argument f to all elements in the range [begin,end) and may do so in parallel. An example of its use is given in step-69.

      However, in many cases it is not efficient to call a function on each element, so this function calls the given function object on sub-ranges. In other words: if the given range [begin,end) is smaller than grainsize or if multithreading is not enabled, then we call f(begin,end); otherwise, we may execute, possibly in parallel, a sequence of calls f(b,e) where [b,e) are subintervals of [begin,end) and the collection of calls we do to f(.,.) will happen on disjoint subintervals that collectively cover the original interval [begin,end).

      -

      Oftentimes, the called function will of course have to get additional information, such as the object to work on for a given value of the iterator argument. This can be achieved by binding certain arguments. For example, here is an implementation of a matrix-vector multiplication $y=Ax$ for a full matrix $A$ and vectors $x,y$:

      void matrix_vector_product (const FullMatrix &A,
      +

      Oftentimes, the called function will of course have to get additional information, such as the object to work on for a given value of the iterator argument. This can be achieved by binding certain arguments. For example, here is an implementation of a matrix-vector multiplication $y=Ax$ for a full matrix $A$ and vectors $x,y$:

      void matrix_vector_product (const FullMatrix &A,
      const Vector &x,
      Vector &y)
      {
      @@ -433,7 +433,7 @@

      This function works a lot like the apply_to_subranges() function, but it allows to accumulate numerical results computed on each subrange into one number. The type of this number is given by the ResultType template argument that needs to be explicitly specified.

      -

      An example of use of this function is to compute the value of the expression $x^T A x$ for a square matrix $A$ and a vector $x$. The sum over rows can be parallelized and the whole code might look like this:

      void matrix_norm (const FullMatrix &A,
      +

      An example of use of this function is to compute the value of the expression $x^T A x$ for a square matrix $A$ and a vector $x$. The sum over rows can be parallelized and the whole code might look like this:

      void matrix_norm (const FullMatrix &A,
      const Vector &x)
      {
      return
      /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_1.html differs (JavaScript source, Unicode text, UTF-8 text, with very long lines (2096)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_1.html 2023-11-25 15:26:01.106532009 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_1.html 2023-11-25 15:26:01.106532009 +0100 @@ -300,7 +300,7 @@

      This program obviously does not have a whole lot of functionality, but in particular the second_grid function has a bunch of places where you can play with it. For example, you could modify the criterion by which we decide which cells to refine. An example would be to change the condition to this:

      for (auto &cell: triangulation.active_cell_iterators())
      if (cell->center()[1] > 0)
      cell->set_refine_flag ();
      -

      This would refine all cells for which the $y$-coordinate of the cell's center is greater than zero (the TriaAccessor::center function that we call by dereferencing the cell iterator returns a Point<2> object; subscripting [0] would give the $x$-coordinate, subscripting [1] the $y$-coordinate). By looking at the functions that TriaAccessor provides, you can also use more complicated criteria for refinement.

      +

      This would refine all cells for which the $y$-coordinate of the cell's center is greater than zero (the TriaAccessor::center function that we call by dereferencing the cell iterator returns a Point<2> object; subscripting [0] would give the $x$-coordinate, subscripting [1] the $y$-coordinate). By looking at the functions that TriaAccessor provides, you can also use more complicated criteria for refinement.

      In general, what you can do with operations of the form cell->something() is a bit difficult to find in the documentation because cell is not a pointer but an iterator. The functions you can call on a cell can be found in the documentation of the classes TriaAccessor (which has functions that can also be called on faces of cells or, more generally, all sorts of geometric objects that appear in a triangulation), and CellAccessor (which adds a few functions that are specific to cells).

      A more thorough description of the whole iterator concept can be found in the Iterators on mesh-like containers documentation module.

      Different geometries

      /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_10.html differs (JavaScript source, Unicode text, UTF-8 text, with very long lines (1782)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_10.html 2023-11-25 15:26:01.126531602 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_10.html 2023-11-25 15:26:01.126531602 +0100 @@ -108,8 +108,8 @@
    • The plain program

      Introduction

      -

      This is a rather short example which only shows some aspects of using higher order mappings. By mapping we mean the transformation between the unit cell (i.e. the unit line, square, or cube) to the cells in real space. In all the previous examples, we have implicitly used linear or d-linear mappings; you will not have noticed this at all, since this is what happens if you do not do anything special. However, if your domain has curved boundaries, there are cases where the piecewise linear approximation of the boundary (i.e. by straight line segments) is not sufficient, and you want that your computational domain is an approximation to the real domain using curved boundaries as well. If the boundary approximation uses piecewise quadratic parabolas to approximate the true boundary, then we say that this is a quadratic or $Q_2$ approximation. If we use piecewise graphs of cubic polynomials, then this is a $Q_3$ approximation, and so on.

      -

      For some differential equations, it is known that piecewise linear approximations of the boundary, i.e. $Q_1$ mappings, are not sufficient if the boundary of the exact domain is curved. Examples are the biharmonic equation using $C^1$ elements, or the Euler equations of gas dynamics on domains with curved reflective boundaries. In these cases, it is necessary to compute the integrals using a higher order mapping. If we do not use such a higher order mapping, the order of approximation of the boundary dominates the order of convergence of the entire numerical scheme, irrespective of the order of convergence of the discretization in the interior of the domain.

      +

      This is a rather short example which only shows some aspects of using higher order mappings. By mapping we mean the transformation between the unit cell (i.e. the unit line, square, or cube) to the cells in real space. In all the previous examples, we have implicitly used linear or d-linear mappings; you will not have noticed this at all, since this is what happens if you do not do anything special. However, if your domain has curved boundaries, there are cases where the piecewise linear approximation of the boundary (i.e. by straight line segments) is not sufficient, and you want that your computational domain is an approximation to the real domain using curved boundaries as well. If the boundary approximation uses piecewise quadratic parabolas to approximate the true boundary, then we say that this is a quadratic or $Q_2$ approximation. If we use piecewise graphs of cubic polynomials, then this is a $Q_3$ approximation, and so on.

      +

      For some differential equations, it is known that piecewise linear approximations of the boundary, i.e. $Q_1$ mappings, are not sufficient if the boundary of the exact domain is curved. Examples are the biharmonic equation using $C^1$ elements, or the Euler equations of gas dynamics on domains with curved reflective boundaries. In these cases, it is necessary to compute the integrals using a higher order mapping. If we do not use such a higher order mapping, the order of approximation of the boundary dominates the order of convergence of the entire numerical scheme, irrespective of the order of convergence of the discretization in the interior of the domain.

      Rather than demonstrating the use of higher order mappings with one of these more complicated examples, we do only a brief computation: calculating the value of $\pi=3.141592653589793238462643\ldots$ by two different methods.

      The first method uses a triangulated approximation of the circle with unit radius and integrates a unit magnitude constant function ( $f = 1$) over it. Of course, if the domain were the exact unit circle, then the area would be $\pi$, but since we only use an approximation by piecewise polynomial segments, the value of the area we integrate over is not exactly $\pi$. However, it is known that as we refine the triangulation, a $Q_p$ mapping approximates the boundary with an order $h^{p+1}$, where $h$ is the mesh size. We will check the values of the computed area of the circle and their convergence towards $\pi$ under mesh refinement for different mappings. We will also find a convergence behavior that is surprising at first, but has a good explanation.

      The second method works similarly, but this time does not use the area of the triangulated unit circle, but rather its perimeter. $\pi$ is then approximated by half of the perimeter, as we choose the radius equal to one.

      @@ -155,7 +155,7 @@
      void hyper_ball(Triangulation< dim > &tria, const Point< dim > &center=Point< dim >(), const double radius=1., const bool attach_spherical_manifold_on_boundary_cells=false)
      const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation
      -
    • Then alternate between generating output on the current mesh for $Q_1$, $Q_2$, and $Q_3$ mappings, and (at the end of the loop body) refining the mesh once globally.

      +

      Then alternate between generating output on the current mesh for $Q_1$, $Q_2$, and $Q_3$ mappings, and (at the end of the loop body) refining the mesh once globally.

        for (unsigned int refinement = 0; refinement < 2; ++refinement)
        {
        std::cout << "Refinement level: " << refinement << std::endl;
      @@ -194,7 +194,7 @@
       

      Now we proceed with the main part of the code, the approximation of $\pi$. The area of a circle is of course given by $\pi r^2$, so having a circle of radius 1, the area represents just the number that is searched for. The numerical computation of the area is performed by integrating the constant function of value 1 over the whole computational domain, i.e. by computing the areas $\int_K 1 dx=\int_{\hat K} 1
    \ \textrm{det}\ J(\hat x) d\hat x \approx \sum_i \textrm{det}
-   \ J(\hat x_i)w(\hat x_i)$, where the sum extends over all quadrature points on all active cells in the triangulation, with $w(x_i)$ being the weight of quadrature point $x_i$. The integrals on each cell are approximated by numerical quadrature, hence the only additional ingredient we need is to set up a FEValues object that provides the corresponding JxW values of each cell. (Note that JxW is meant to abbreviate Jacobian determinant times weight; since in numerical quadrature the two factors always occur at the same places, we only offer the combined quantity, rather than two separate ones.) We note that here we won't use the FEValues object in its original purpose, i.e. for the computation of values of basis functions of a specific finite element at certain quadrature points. Rather, we use it only to gain the JxW at the quadrature points, irrespective of the (dummy) finite element we will give to the constructor of the FEValues object. The actual finite element given to the FEValues object is not used at all, so we could give any.

      + \ J(\hat x_i)w(\hat x_i)$" src="form_2751.png"/>, where the sum extends over all quadrature points on all active cells in the triangulation, with $w(x_i)$ being the weight of quadrature point $x_i$. The integrals on each cell are approximated by numerical quadrature, hence the only additional ingredient we need is to set up a FEValues object that provides the corresponding JxW values of each cell. (Note that JxW is meant to abbreviate Jacobian determinant times weight; since in numerical quadrature the two factors always occur at the same places, we only offer the combined quantity, rather than two separate ones.) We note that here we won't use the FEValues object in its original purpose, i.e. for the computation of values of basis functions of a specific finite element at certain quadrature points. Rather, we use it only to gain the JxW at the quadrature points, irrespective of the (dummy) finite element we will give to the constructor of the FEValues object. The actual finite element given to the FEValues object is not used at all, so we could give any.

        template <int dim>
        void compute_pi_by_area()
        {
      @@ -402,11 +402,11 @@
      unset ytics
      plot [-1:1][-1:1] "ball_0_mapping_q_1.dat" lw 4 lt rgb "black"

      or using one of the other filenames. The second line makes sure that the aspect ratio of the generated output is actually 1:1, i.e. a circle is drawn as a circle on your screen, rather than as an ellipse. The third line switches off the key in the graphic, as that will only print information (the filename) which is not that important right now. Similarly, the fourth and fifth disable tick marks. The plot is then generated with a specific line width ("lw", here set to 4) and line type ("lt", here chosen by saying that the line should be drawn using the RGB color "black").

      -

      The following table shows the triangulated computational domain for $Q_1$, $Q_2$, and $Q_3$ mappings, for the original coarse grid (left), and a once uniformly refined grid (right).

      +

      The following table shows the triangulated computational domain for $Q_1$, $Q_2$, and $Q_3$ mappings, for the original coarse grid (left), and a once uniformly refined grid (right).

      Five-cell discretization of the disk.
      20-cell discretization of the disk (i.e., five cells
               refined once).
      Five-cell discretization of the disk with quadratic edges. The
               boundary is nearly indistinguishable from the actual circle.
      20-cell discretization with quadratic edges.
      Five-cell discretization of the disk with cubic edges. The
-              boundary is nearly indistinguishable from the actual circle.
      20-cell discretization with cubic edges.

      These pictures show the obvious advantage of higher order mappings: they approximate the true boundary quite well also on rather coarse meshes. To demonstrate this a little further, here is part of the upper right quarter circle of the coarse meshes with $Q_2$ and $Q_3$ mappings, where the dashed red line marks the actual circle:

      + boundary is nearly indistinguishable from the actual circle." style="pointer-events: none;" width="400" height="400" class="inline"/>
    20-cell discretization with cubic edges.

    These pictures show the obvious advantage of higher order mappings: they approximate the true boundary quite well also on rather coarse meshes. To demonstrate this a little further, here is part of the upper right quarter circle of the coarse meshes with $Q_2$ and $Q_3$ mappings, where the dashed red line marks the actual circle:

    Close-up of quadratic discretization. The distance between the
          quadratic interpolant and the actual circle is small.
    Close-up of cubic discretization. The distance between the
          cubic interpolant and the actual circle is very small.

    Obviously the quadratic mapping approximates the boundary quite well, while for the cubic mapping the difference between approximated domain and true one is hardly visible already for the coarse grid. You can also see that the mapping only changes something at the outer boundaries of the triangulation. In the interior, all lines are still represented by linear functions, resulting in additional computations only on cells at the boundary. Higher order mappings are therefore usually not noticeably slower than lower order ones, because the additional computations are only performed on a small subset of all cells.

    @@ -499,7 +499,7 @@
    5120 3.1415926535897940 8.8818e-16 2.00
    unsigned int level
    Definition: grid_out.cc:4618
    Note
    Once the error reaches a level on the order of $10^{-13}$ to $10^{-15}$, it is essentially dominated by round-off and consequently dominated by what precisely the library is doing in internal computations. Since these things change, the precise values and errors change from release to release at these round-off levels, though the overall order of errors should of course remain the same. See also the comment below in the section on Possibilities for extensions about how to compute these results more accurately.
    -

    One of the immediate observations from the output above is that in all cases the values converge quickly to the true value of $\pi=3.141592653589793238462643$. Note that for the $Q_4$ mapping, we are already in the regime of roundoff errors and the convergence rate levels off, which is already quite a lot. However, also note that for the $Q_1$ mapping, even on the finest grid the accuracy is significantly worse than on the coarse grid for a $Q_3$ mapping!

    +

    One of the immediate observations from the output above is that in all cases the values converge quickly to the true value of $\pi=3.141592653589793238462643$. Note that for the $Q_4$ mapping, we are already in the regime of roundoff errors and the convergence rate levels off, which is already quite a lot. However, also note that for the $Q_1$ mapping, even on the finest grid the accuracy is significantly worse than on the coarse grid for a $Q_3$ mapping!

    The last column of the output shows the convergence order, in powers of the mesh width $h$. In the introduction, we had stated that the convergence order for a $Q_p$ mapping should be $h^{p+1}$. However, in the example shown, the order is rather $h^{2p}$! This at first surprising fact is explained by the properties of the $Q_p$ mapping. At order p, it uses support points that are based on the p+1 point Gauss-Lobatto quadrature rule that selects the support points in such a way that the quadrature rule converges at order 2p. Even though these points are here only used for interpolation of a pth order polynomial, we get a superconvergence effect when numerically evaluating the integral, resulting in the observed high order of convergence. (This effect is also discussed in detail in the following publication: A. Bonito, A. Demlow, and J. Owen: "A priori error estimates for finite element approximations to eigenvalues and eigenfunctions of the Laplace-Beltrami operator", submitted, 2018.)

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_11.html differs (JavaScript source, Unicode text, UTF-8 text, with very long lines (1225)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_11.html 2023-11-25 15:26:01.143197931 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_11.html 2023-11-25 15:26:01.143197931 +0100 @@ -119,15 +119,15 @@ \int_\Omega f\; dx + \int_{\partial\Omega} g\; ds = 0. \]" src="form_2761.png"/>

    -

    We will consider the special case that $\Omega$ is the circle of radius 1 around the origin, and $f=-2$, $g=1$. This choice satisfies the compatibility condition.

    +

    We will consider the special case that $\Omega$ is the circle of radius 1 around the origin, and $f=-2$, $g=1$. This choice satisfies the compatibility condition.

    The compatibility condition allows a solution of the above equation, but it nevertheless retains an ambiguity: since only derivatives of the solution appear in the equations, the solution is only determined up to a constant. For this reason, we have to pose another condition for the numerical solution, which fixes this constant.

    For this, there are various possibilities:

    1. -

      Fix one node of the discretization to zero or any other fixed value. This amounts to an additional condition $u_h(x_0)=0$. Although this is common practice, it is not necessarily a good idea, since we know that the solutions of Laplace's equation are only in $H^1$, which does not allow for the definition of point values because it is not a subset of the continuous functions. Therefore, even though fixing one node is allowed for discretized functions, it is not for continuous functions, and one can often see this in a resulting error spike at this point in the numerical solution.

      +

      Fix one node of the discretization to zero or any other fixed value. This amounts to an additional condition $u_h(x_0)=0$. Although this is common practice, it is not necessarily a good idea, since we know that the solutions of Laplace's equation are only in $H^1$, which does not allow for the definition of point values because it is not a subset of the continuous functions. Therefore, even though fixing one node is allowed for discretized functions, it is not for continuous functions, and one can often see this in a resulting error spike at this point in the numerical solution.

    2. -

      Fixing the mean value over the domain to zero or any other value. This is allowed on the continuous level, since $H^1(\Omega)\subset L^1(\Omega)$ by Sobolev's inequality, and thus also on the discrete level since we there only consider subsets of $H^1$.

      +

      Fixing the mean value over the domain to zero or any other value. This is allowed on the continuous level, since $H^1(\Omega)\subset L^1(\Omega)$ by Sobolev's inequality, and thus also on the discrete level since we there only consider subsets of $H^1$.

    3. @@ -155,8 +155,8 @@ CU = b \]" src="form_2771.png"/>

      -

      where $C$ denotes a matrix, $b$ denotes a vector, and $U$ the vector of nodal values. In this case, since $C$ represents one homogeneous constraint, $b$ is the zero vector.

      -

      In this example, the mean value along the boundary allows just such a representation, with $C$ being a matrix with just one row (i.e. there is only one constraint). In the implementation, we will create an AffineConstraints object, add one constraint (i.e. add another row to the matrix) referring to the first boundary node $i_0$, and insert the weights with which all the other nodes contribute, which in this example happens to be just $-1$.

      +

      where $C$ denotes a matrix, $b$ denotes a vector, and $U$ the vector of nodal values. In this case, since $C$ represents one homogeneous constraint, $b$ is the zero vector.

      +

      In this example, the mean value along the boundary allows just such a representation, with $C$ being a matrix with just one row (i.e. there is only one constraint). In the implementation, we will create an AffineConstraints object, add one constraint (i.e. add another row to the matrix) referring to the first boundary node $i_0$, and insert the weights with which all the other nodes contribute, which in this example happens to be just $-1$.

      Later, we will use this object to eliminate the first boundary node from the linear system of equations, reducing it to one which has a solution without the ambiguity of the constant shift value. One of the problems of the implementation will be that the explicit elimination of this node results in a number of additional elements in the matrix, of which we do not know in advance where they are located and how many additional entries will be in each of the rows of the matrix. We will show how we can use an intermediate object to work around this problem.

      But now on to the implementation of the program solving this problem...

      The commented program

      @@ -322,8 +322,8 @@
      ::VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)

    That's quite simple, right?

    Two remarks are in order, though: First, these functions are used in a lot of contexts. Maybe you want to create a Laplace or mass matrix for a vector values finite element; or you want to use the default Q1 mapping; or you want to assembled the matrix with a coefficient in the Laplace operator. For this reason, there are quite a large number of variants of these functions in the MatrixCreator and MatrixTools namespaces. Whenever you need a slightly different version of these functions than the ones called above, it is certainly worthwhile to take a look at the documentation and to check whether something fits your needs.

    -

    The second remark concerns the quadrature formula we use: we want to integrate over bilinear shape functions, so we know that we have to use at least an order two Gauss quadrature formula. On the other hand, we want the quadrature rule to have at least the order of the boundary approximation. Since the order of Gauss rule with $r$ points is $2r -
-   1$, and the order of the boundary approximation using polynomials of degree $p$ is $p+1$, we know that $2r \geq p$. Since r has to be an integer and (as mentioned above) has to be at least $2$, this makes up for the formula above computing gauss_degree.

    +

    The second remark concerns the quadrature formula we use: we want to integrate over bilinear shape functions, so we know that we have to use at least an order two Gauss quadrature formula. On the other hand, we want the quadrature rule to have at least the order of the boundary approximation. Since the order of Gauss rule with $r$ points is $2r -
+   1$, and the order of the boundary approximation using polynomials of degree $p$ is $p+1$, we know that $2r \geq p$. Since r has to be an integer and (as mentioned above) has to be at least $2$, this makes up for the formula above computing gauss_degree.

    Since the generation of the body force contributions to the right hand side vector was so simple, we do that all over again for the boundary forces as well: allocate a vector of the right size and call the right function. The boundary function has constant values, so we can generate an object from the library on the fly, and we use the same quadrature formula as above, but this time of lower dimension since we integrate over faces now instead of cells:

      Vector<double> tmp(system_rhs.size());
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_12.html differs (JavaScript source, Unicode text, UTF-8 text, with very long lines (1075)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_12.html 2023-11-25 15:26:01.166530788 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_12.html 2023-11-25 15:26:01.166530788 +0100 @@ -143,21 +143,21 @@ u=g\quad\mbox{on }\Gamma_-, \]" src="form_2775.png"/>

    -

    on the inflow part $\Gamma_-$ of the boundary $\Gamma=\partial\Omega$ of the domain. Here, ${\mathbf \beta}={\mathbf \beta}({\bf x})$ denotes a vector field, $u$ the (scalar) solution function, $g$ a boundary value function,

    +

    on the inflow part $\Gamma_-$ of the boundary $\Gamma=\partial\Omega$ of the domain. Here, ${\mathbf \beta}={\mathbf \beta}({\bf x})$ denotes a vector field, $u$ the (scalar) solution function, $g$ a boundary value function,

    \[
 \Gamma_- \dealcoloneq \{{\bf x}\in\Gamma, {\mathbf \beta}({\bf x})\cdot{\bf n}({\bf x})<0\}
 \]

    the inflow part of the boundary of the domain and ${\bf n}$ denotes the unit outward normal to the boundary $\Gamma$. This equation is the conservative version of the advection equation already considered in step-9 of this tutorial.

    -

    On each cell $T$, we multiply by a test function $v_h$ from the left and integrate by parts to get:

    +

    On each cell $T$, we multiply by a test function $v_h$ from the left and integrate by parts to get:

    \[
   \left( v_h, \nabla \cdot (\beta u_h) \right)_T
 = -(\nabla v_h, \beta u_h) + \int_{\partial T} v_h u_h \beta \cdot n
 \]

    -

    When summing this expression over all cells $T$, the boundary integral is done over all internal and external faces and as such there are three cases:

      +

      When summing this expression over all cells $T$, the boundary integral is done over all internal and external faces and as such there are three cases:

      1. -outer boundary on the inflow (we replace $u_h$ by given $g$): $\int_{\Gamma_-} v_h g \beta \cdot n$
      2. +outer boundary on the inflow (we replace $u_h$ by given $g$): $\int_{\Gamma_-} v_h g \beta \cdot n$
      3. outer boundary on the outflow: $\int_{\Gamma_+} v_h u_h \beta \cdot n$
      4. @@ -621,7 +621,7 @@
          }
         
         
        -

    We refine the grid according to a very simple refinement criterion, namely an approximation to the gradient of the solution. As here we consider the DG(1) method (i.e. we use piecewise bilinear shape functions) we could simply compute the gradients on each cell. But we do not want to base our refinement indicator on the gradients on each cell only, but want to base them also on jumps of the discontinuous solution function over faces between neighboring cells. The simplest way of doing that is to compute approximative gradients by difference quotients including the cell under consideration and its neighbors. This is done by the DerivativeApproximation class that computes the approximate gradients in a way similar to the GradientEstimation described in step-9 of this tutorial. In fact, the DerivativeApproximation class was developed following the GradientEstimation class of step-9. Relating to the discussion in step-9, here we consider $h^{1+d/2}|\nabla_h u_h|$. Furthermore we note that we do not consider approximate second derivatives because solutions to the linear advection equation are in general not in $H^2$ but only in $H^1$ (or, to be more precise: in $H^1_\beta$, i.e., the space of functions whose derivatives in direction $\beta$ are square integrable).

    +

    We refine the grid according to a very simple refinement criterion, namely an approximation to the gradient of the solution. As here we consider the DG(1) method (i.e. we use piecewise bilinear shape functions) we could simply compute the gradients on each cell. But we do not want to base our refinement indicator on the gradients on each cell only, but want to base them also on jumps of the discontinuous solution function over faces between neighboring cells. The simplest way of doing that is to compute approximative gradients by difference quotients including the cell under consideration and its neighbors. This is done by the DerivativeApproximation class that computes the approximate gradients in a way similar to the GradientEstimation described in step-9 of this tutorial. In fact, the DerivativeApproximation class was developed following the GradientEstimation class of step-9. Relating to the discussion in step-9, here we consider $h^{1+d/2}|\nabla_h u_h|$. Furthermore we note that we do not consider approximate second derivatives because solutions to the linear advection equation are in general not in $H^2$ but only in $H^1$ (or, to be more precise: in $H^1_\beta$, i.e., the space of functions whose derivatives in direction $\beta$ are square integrable).

      template <int dim>
      void AdvectionProblem<dim>::refine_grid()
      {
    @@ -821,8 +821,8 @@

    In refinement iteration 5, the image can't be plotted in a reasonable way any more as a 3d plot. We thus show a color plot with a range of $[-1,2]$ (the solution values of the exact solution lie in $[0,1]$, of course). In any case, it is clear that the continuous Galerkin solution exhibits oscillatory behavior that gets worse and worse as the mesh is refined more and more.

    There are a number of strategies to stabilize the cG method, if one wants to use continuous elements for some reason. Discussing these methods is beyond the scope of this tutorial program; an interested reader could, for example, take a look at step-31.

    Possibilities for extensions

    -

    Given that the exact solution is known in this case, one interesting avenue for further extensions would be to confirm the order of convergence for this program. In the current case, the solution is non-smooth, and so we can not expect to get a particularly high order of convergence, even if we used higher order elements. But even if the solution is smooth, the equation is not elliptic and so it is not immediately clear that we should obtain a convergence order that equals that of the optimal interpolation estimates (i.e. for example that we would get $h^3$ convergence in the $L^2$ norm by using quadratic elements).

    -

    In fact, for hyperbolic equations, theoretical predictions often indicate that the best one can hope for is an order one half below the interpolation estimate. For example, for the streamline diffusion method (an alternative method to the DG method used here to stabilize the solution of the transport equation), one can prove that for elements of degree $p$, the order of convergence is $p+\frac 12$ on arbitrary meshes. While the observed order is frequently $p+1$ on uniformly refined meshes, one can construct so-called Peterson meshes on which the worse theoretical bound is actually attained. This should be relatively simple to verify, for example using the VectorTools::integrate_difference function.

    +

    Given that the exact solution is known in this case, one interesting avenue for further extensions would be to confirm the order of convergence for this program. In the current case, the solution is non-smooth, and so we can not expect to get a particularly high order of convergence, even if we used higher order elements. But even if the solution is smooth, the equation is not elliptic and so it is not immediately clear that we should obtain a convergence order that equals that of the optimal interpolation estimates (i.e. for example that we would get $h^3$ convergence in the $L^2$ norm by using quadratic elements).

    +

    In fact, for hyperbolic equations, theoretical predictions often indicate that the best one can hope for is an order one half below the interpolation estimate. For example, for the streamline diffusion method (an alternative method to the DG method used here to stabilize the solution of the transport equation), one can prove that for elements of degree $p$, the order of convergence is $p+\frac 12$ on arbitrary meshes. While the observed order is frequently $p+1$ on uniformly refined meshes, one can construct so-called Peterson meshes on which the worse theoretical bound is actually attained. This should be relatively simple to verify, for example using the VectorTools::integrate_difference function.

    A different direction is to observe that the solution of transport problems often has discontinuities and that therefore a mesh in which we bisect every cell in every coordinate direction may not be optimal. Rather, a better strategy would be to only cut cells in the direction parallel to the discontinuity. This is called anisotropic mesh refinement and is the subject of step-30.

    The plain program

    /* ---------------------------------------------------------------------
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_12b.html differs (JavaScript source, Unicode text, UTF-8 text, with very long lines (2317)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_12b.html 2023-11-25 15:26:01.189863648 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_12b.html 2023-11-25 15:26:01.189863648 +0100 @@ -521,7 +521,7 @@
      }
     
     
    -

    We refine the grid according to a very simple refinement criterion, namely an approximation to the gradient of the solution. As here we consider the DG(1) method (i.e. we use piecewise bilinear shape functions) we could simply compute the gradients on each cell. But we do not want to base our refinement indicator on the gradients on each cell only, but want to base them also on jumps of the discontinuous solution function over faces between neighboring cells. The simplest way of doing that is to compute approximative gradients by difference quotients including the cell under consideration and its neighbors. This is done by the DerivativeApproximation class that computes the approximate gradients in a way similar to the GradientEstimation described in step-9 of this tutorial. In fact, the DerivativeApproximation class was developed following the GradientEstimation class of step-9. Relating to the discussion in step-9, here we consider $h^{1+d/2}|\nabla_h u_h|$. Furthermore we note that we do not consider approximate second derivatives because solutions to the linear advection equation are in general not in $H^2$ but only in $H^1$ (or, to be more precise: in $H^1_\beta$, i.e., the space of functions whose derivatives in direction $\beta$ are square integrable).

    +

    We refine the grid according to a very simple refinement criterion, namely an approximation to the gradient of the solution. As here we consider the DG(1) method (i.e. we use piecewise bilinear shape functions) we could simply compute the gradients on each cell. But we do not want to base our refinement indicator on the gradients on each cell only, but want to base them also on jumps of the discontinuous solution function over faces between neighboring cells. The simplest way of doing that is to compute approximative gradients by difference quotients including the cell under consideration and its neighbors. This is done by the DerivativeApproximation class that computes the approximate gradients in a way similar to the GradientEstimation described in step-9 of this tutorial. In fact, the DerivativeApproximation class was developed following the GradientEstimation class of step-9. Relating to the discussion in step-9, here we consider $h^{1+d/2}|\nabla_h u_h|$. Furthermore we note that we do not consider approximate second derivatives because solutions to the linear advection equation are in general not in $H^2$ but only in $H^1$ (or, to be more precise: in $H^1_\beta$, i.e., the space of functions whose derivatives in direction $\beta$ are square integrable).

      template <int dim>
      void AdvectionProblem<dim>::refine_grid()
      {
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_13.html differs (JavaScript source, Unicode text, UTF-8 text, with very long lines (1374)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_13.html 2023-11-25 15:26:01.219863038 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_13.html 2023-11-25 15:26:01.219863038 +0100 @@ -921,7 +921,7 @@
    void refine_and_coarsen_fixed_number(Triangulation< dim, spacedim > &triangulation, const Vector< Number > &criteria, const double top_fraction_of_cells, const double bottom_fraction_of_cells, const unsigned int max_n_cells=std::numeric_limits< unsigned int >::max())

    Equation data

    As this is one more academic example, we'd like to compare exact and computed solution against each other. For this, we need to declare function classes representing the exact solution (for comparison and for the Dirichlet boundary values), as well as a class that denotes the right hand side of the equation (this is simply the Laplace operator applied to the exact solution we'd like to recover).

    -

    For this example, let us choose as exact solution the function $u(x,y)=exp(x+sin(10y+5x^2))$. In more than two dimensions, simply repeat the sine-factor with y replaced by z and so on. Given this, the following two classes are probably straightforward from the previous examples.

    +

    For this example, let us choose as exact solution the function $u(x,y)=exp(x+sin(10y+5x^2))$. In more than two dimensions, simply repeat the sine-factor with y replaced by z and so on. Given this, the following two classes are probably straightforward from the previous examples.

      template <int dim>
      class Solution : public Function<dim>
      {
    @@ -1170,9 +1170,9 @@

    While we're already at watching pictures, this is the eighth grid, as viewed from top:

    -

    However, we are not yet finished with evaluation the point value computation. In fact, plotting the error $e=|u(x_0)-u_h(x_0)|$ for the two refinement criteria yields the following picture:

    +

    However, we are not yet finished with evaluation the point value computation. In fact, plotting the error $e=|u(x_0)-u_h(x_0)|$ for the two refinement criteria yields the following picture:

    -

    What is disturbing about this picture is that not only is the adaptive mesh refinement not better than global refinement as one would usually expect, it is even significantly worse since its convergence is irregular, preventing all extrapolation techniques when using the values of subsequent meshes! On the other hand, global refinement provides a perfect $1/N$ or $h^{-2}$ convergence history and provides every opportunity to even improve on the point values by extrapolation. Global mesh refinement must therefore be considered superior in this example! This is even more surprising as the evaluation point is not somewhere in the left part where the mesh is coarse, but rather to the right and the adaptive refinement should refine the mesh around the evaluation point as well.

    +

    What is disturbing about this picture is that not only is the adaptive mesh refinement not better than global refinement as one would usually expect, it is even significantly worse since its convergence is irregular, preventing all extrapolation techniques when using the values of subsequent meshes! On the other hand, global refinement provides a perfect $1/N$ or $h^{-2}$ convergence history and provides every opportunity to even improve on the point values by extrapolation. Global mesh refinement must therefore be considered superior in this example! This is even more surprising as the evaluation point is not somewhere in the left part where the mesh is coarse, but rather to the right and the adaptive refinement should refine the mesh around the evaluation point as well.

    We thus close the discussion of this example program with a question:

    What is wrong with adaptivity if it is not better than global refinement?

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_14.html differs (JavaScript source, Unicode text, UTF-8 text, with very long lines (2605)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_14.html 2023-11-25 15:26:01.279861816 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_14.html 2023-11-25 15:26:01.279861816 +0100 @@ -161,30 +161,30 @@

    The Heidelberg group of Professor Rolf Rannacher, to which the three initial authors of the deal.II library belonged during their PhD time and partly also afterwards, has been involved with adaptivity and error estimation for finite element discretizations since the mid-1990ies. The main achievement is the development of error estimates for arbitrary functionals of the solution, and of optimal mesh refinement for its computation.

    We will not discuss the derivation of these concepts in too great detail, but will implement the main ideas in the present example program. For a thorough introduction into the general idea, we refer to the seminal work of Becker and Rannacher [BR95], [BR96r], and the overview article of the same authors in Acta Numerica [BR01]; the first introduces the concept of error estimation and adaptivity for general functional output for the Laplace equation, while the second gives many examples of applications of these concepts to a large number of other, more complicated equations. For applications to individual types of equations, see also the publications by Becker [Bec95], [Bec98], Kanschat [Kan96], [FK97], Suttmeier [Sut96], [RS97], [RS98c], [RS99], Bangerth [BR99b], [Ban00w], [BR01a], [Ban02], and Hartmann [Har02], [HH01], [HH01b]. All of these works, from the original introduction by Becker and Rannacher to individual contributions to particular equations, have later been summarized in a book by Bangerth and Rannacher that covers all of these topics, see [BR03].

    The basic idea is the following: in applications, one is not usually interested in the solution per se, but rather in certain aspects of it. For example, in simulations of flow problems, one may want to know the lift or drag of a body immersed in the fluid; it is this quantity that we want to know to best accuracy, and whether the rest of the solution of the describing equations is well resolved is not of primary interest. Likewise, in elasticity one might want to know about values of the stress at certain points to guess whether maximal load values of joints are safe, for example. Or, in radiative transfer problems, mean flux intensities are of interest.

    -

    In all the cases just listed, it is the evaluation of a functional $J(u)$ of the solution which we are interested in, rather than the values of $u$ everywhere. Since the exact solution $u$ is not available, but only its numerical approximation $u_h$, it is sensible to ask whether the computed value $J(u_h)$ is within certain limits of the exact value $J(u)$, i.e. we want to bound the error with respect to this functional, $J(u)-J(u_h)$.

    -

    For simplicity of exposition, we henceforth assume that both the quantity of interest $J$ as well as the equation are linear, and we will in particular show the derivation for the Laplace equation with homogeneous Dirichlet boundary conditions, although the concept is much more general. For this general case, we refer to the references listed above. The goal is to obtain bounds on the error, $J(e)=J(u)-J(u_h)$. For this, let us denote by $z$ the solution of a dual problem, defined as follows:

    -\[
+<p>In all the cases just listed, it is the evaluation of a functional <picture><source srcset=$J(u)$ of the solution which we are interested in, rather than the values of $u$ everywhere. Since the exact solution $u$ is not available, but only its numerical approximation $u_h$, it is sensible to ask whether the computed value $J(u_h)$ is within certain limits of the exact value $J(u)$, i.e. we want to bound the error with respect to this functional, $J(u)-J(u_h)$.

    +

    For simplicity of exposition, we henceforth assume that both the quantity of interest $J$ as well as the equation are linear, and we will in particular show the derivation for the Laplace equation with homogeneous Dirichlet boundary conditions, although the concept is much more general. For this general case, we refer to the references listed above. The goal is to obtain bounds on the error, $J(e)=J(u)-J(u_h)$. For this, let us denote by $z$ the solution of a dual problem, defined as follows:

    +\[
   a(\varphi,z) = J(\varphi) \qquad \forall \varphi,
-\] +\]" src="form_2820.png"/>

    -

    where $a(\cdot,\cdot)$ is the bilinear form associated with the differential equation, and the test functions are chosen from the corresponding solution space. Then, taking as special test function $\varphi=e$ the error, we have that

    -\[
+<p> where <picture><source srcset=$a(\cdot,\cdot)$ is the bilinear form associated with the differential equation, and the test functions are chosen from the corresponding solution space. Then, taking as special test function $\varphi=e$ the error, we have that

    +\[
   J(e) = a(e,z)
-\] +\]" src="form_2823.png"/>

    and we can, by Galerkin orthogonality, rewrite this as

    -\[
+<picture><source srcset=\[
   J(e) = a(e,z-\varphi_h)
-\] +\]" src="form_2824.png"/>

    -

    where $\varphi_h$ can be chosen from the discrete test space in whatever way we find convenient.

    +

    where $\varphi_h$ can be chosen from the discrete test space in whatever way we find convenient.

    Concretely, for Laplace's equation, the error identity reads

    -\[
+<picture><source srcset=\[
   J(e) = (\nabla e, \nabla(z-\varphi_h)).
-\] +\]" src="form_2826.png"/>

    Because we want to use this formula not only to compute error, but also to refine the mesh, we need to rewrite the expression above as a sum over cells where each cell's contribution can then be used as an error indicator for this cell. Thus, we split the scalar products into terms for each cell, and integrate by parts on each of them:

    -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   J(e)
   &=&
   \sum_K (\nabla (u-u_h), \nabla (z-\varphi_h))_K
@@ -192,54 +192,54 @@
   &=&
   \sum_K (-\Delta (u-u_h), z-\varphi_h)_K
   + (\partial_n (u-u_h), z-z_h)_{\partial K}.
-\end{eqnarray*} +\end{eqnarray*}" src="form_2827.png"/>

    -

    Next we use that $-\Delta u=f$, and that the solution of the Laplace equation is smooth enough that $\partial_n u$ is continuous almost everywhere – so the terms involving $\partial_n u$ on one cell cancels with that on its neighbor, where the normal vector has the opposite sign. (The same is not true for $\partial_n u_h$, though.) At the boundary of the domain, where there is no neighbor cell with which this term could cancel, the weight $z-\varphi_h$ can be chosen as zero, and the whole term disappears.

    +

    Next we use that $-\Delta u=f$, and that the solution of the Laplace equation is smooth enough that $\partial_n u$ is continuous almost everywhere – so the terms involving $\partial_n u$ on one cell cancels with that on its neighbor, where the normal vector has the opposite sign. (The same is not true for $\partial_n u_h$, though.) At the boundary of the domain, where there is no neighbor cell with which this term could cancel, the weight $z-\varphi_h$ can be chosen as zero, and the whole term disappears.

    Thus, we have

    -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   J(e)
   &=&
   \sum_K (f+\Delta u_h, z-\varphi_h)_K
   - (\partial_n u_h, z-\varphi_h)_{\partial K\backslash \partial\Omega}.
-\end{eqnarray*} +\end{eqnarray*}" src="form_2832.png"/>

    -

    In a final step, note that when taking the normal derivative of $u_h$, we mean the value of this quantity as taken from this side of the cell (for the usual Lagrange elements, derivatives are not continuous across edges). We then rewrite the above formula by exchanging half of the edge integral of cell $K$ with the neighbor cell $K'$, to obtain

    -\begin{eqnarray*}
+<p> In a final step, note that when taking the normal derivative of <picture><source srcset=$u_h$, we mean the value of this quantity as taken from this side of the cell (for the usual Lagrange elements, derivatives are not continuous across edges). We then rewrite the above formula by exchanging half of the edge integral of cell $K$ with the neighbor cell $K'$, to obtain

    +\begin{eqnarray*}
   J(e)
   &=&
   \sum_K (f+\Delta u_h, z-\varphi_h)_K
   - \frac 12 (\partial_n u_h|_K + \partial_{n'} u_h|_{K'},
               z-\varphi_h)_{\partial K\backslash \partial\Omega}.
-\end{eqnarray*} +\end{eqnarray*}" src="form_2833.png"/>

    -

    Using that for the normal vectors on adjacent cells we have $n'=-n$, we define the jump of the normal derivative by

    -\[
+<p> Using that for the normal vectors on adjacent cells we have <picture><source srcset=$n'=-n$, we define the jump of the normal derivative by

    +\[
   [\partial_n u_h] \dealcoloneq \partial_n u_h|_K + \partial_{n'} u_h|_{K'}
   =
   \partial_n u_h|_K - \partial_n u_h|_{K'},
-\] +\]" src="form_2835.png"/>

    -

    and get the final form after setting the discrete function $\varphi_h$, which is by now still arbitrary, to the point interpolation of the dual solution, $\varphi_h=I_h z$:

    -\begin{eqnarray*}
+<p> and get the final form after setting the discrete function <picture><source srcset=$\varphi_h$, which is by now still arbitrary, to the point interpolation of the dual solution, $\varphi_h=I_h z$:

    +\begin{eqnarray*}
   J(e)
   &=&
   \sum_K (f+\Delta u_h, z-I_h z)_K
   - \frac 12 ([\partial_n u_h],
               z-I_h z)_{\partial K\backslash \partial\Omega}.
-\end{eqnarray*} +\end{eqnarray*}" src="form_2837.png"/>

    -

    With this, we have obtained an exact representation of the error of the finite element discretization with respect to arbitrary (linear) functionals $J(\cdot)$. Its structure is a weighted form of a residual estimator, as both $f+\Delta u_h$ and $[\partial_n u_h]$ are cell and edge residuals that vanish on the exact solution, and $z-I_h z$ are weights indicating how important the residual on a certain cell is for the evaluation of the given functional. Furthermore, it is a cell-wise quantity, so we can use it as a mesh refinement criterion. The question is: how to evaluate it? After all, the evaluation requires knowledge of the dual solution $z$, which carries the information about the quantity we want to know to best accuracy.

    -

    In some, very special cases, this dual solution is known. For example, if the functional $J(\cdot)$ is the point evaluation, $J(\varphi)=\varphi(x_0)$, then the dual solution has to satisfy

    -\[
+<p>With this, we have obtained an exact representation of the error of the finite element discretization with respect to arbitrary (linear) functionals <picture><source srcset=$J(\cdot)$. Its structure is a weighted form of a residual estimator, as both $f+\Delta u_h$ and $[\partial_n u_h]$ are cell and edge residuals that vanish on the exact solution, and $z-I_h z$ are weights indicating how important the residual on a certain cell is for the evaluation of the given functional. Furthermore, it is a cell-wise quantity, so we can use it as a mesh refinement criterion. The question is: how to evaluate it? After all, the evaluation requires knowledge of the dual solution $z$, which carries the information about the quantity we want to know to best accuracy.

    +

    In some, very special cases, this dual solution is known. For example, if the functional $J(\cdot)$ is the point evaluation, $J(\varphi)=\varphi(x_0)$, then the dual solution has to satisfy

    +\[
   -\Delta z = \delta(x-x_0),
-\] +\]" src="form_2843.png"/>

    -

    with the Dirac delta function on the right hand side, and the dual solution is the Green's function with respect to the point $x_0$. For simple geometries, this function is analytically known, and we could insert it into the error representation formula.

    -

    However, we do not want to restrict ourselves to such special cases. Rather, we will compute the dual solution numerically, and approximate $z$ by some numerically obtained $\tilde z$. We note that it is not sufficient to compute this approximation $\tilde z$ using the same method as used for the primal solution $u_h$, since then $\tilde z-I_h \tilde z=0$, and the overall error estimate would be zero. Rather, the approximation $\tilde z$ has to be from a larger space than the primal finite element space. There are various ways to obtain such an approximation (see the cited literature), and we will choose to compute it with a higher order finite element space. While this is certainly not the most efficient way, it is simple since we already have all we need to do that in place, and it also allows for simple experimenting. For more efficient methods, again refer to the given literature, in particular [BR95], [BR03].

    +

    with the Dirac delta function on the right hand side, and the dual solution is the Green's function with respect to the point $x_0$. For simple geometries, this function is analytically known, and we could insert it into the error representation formula.

    +

    However, we do not want to restrict ourselves to such special cases. Rather, we will compute the dual solution numerically, and approximate $z$ by some numerically obtained $\tilde z$. We note that it is not sufficient to compute this approximation $\tilde z$ using the same method as used for the primal solution $u_h$, since then $\tilde z-I_h \tilde z=0$, and the overall error estimate would be zero. Rather, the approximation $\tilde z$ has to be from a larger space than the primal finite element space. There are various ways to obtain such an approximation (see the cited literature), and we will choose to compute it with a higher order finite element space. While this is certainly not the most efficient way, it is simple since we already have all we need to do that in place, and it also allows for simple experimenting. For more efficient methods, again refer to the given literature, in particular [BR95], [BR03].

    With this, we end the discussion of the mathematical side of this program and turn to the actual implementation.

    -
    Note
    There are two steps above that do not seem necessary if all you care about is computing the error: namely, (i) the subtraction of $\phi_h$ from $z$, and (ii) splitting the integral into a sum of cells and integrating by parts on each. Indeed, neither of these two steps change $J(e)$ at all, as we only ever consider identities above until the substitution of $z$ by $\tilde z$. In other words, if you care only about estimating the global error $J(e)$, then these steps are not necessary. On the other hand, if you want to use the error estimate also as a refinement criterion for each cell of the mesh, then it is necessary to (i) break the estimate into a sum of cells, and (ii) massage the formulas in such a way that each cell's contributions have something to do with the local error. (While the contortions above do not change the value of the sum $J(e)$, they change the values we compute for each cell $K$.) To this end, we want to write everything in the form "residual times dual weight" where a "residual" is something that goes to zero as the approximation becomes $u_h$ better and better. For example, the quantity $\partial_n
-u_h$ is not a residual, since it simply converges to the (normal component of) the gradient of the exact solution. On the other hand, $[\partial_n u_h]$ is a residual because it converges to $[\partial_n
-u]=0$. All of the steps we have taken above in developing the final form of $J(e)$ have indeed had the goal of bringing the final formula into a form where each term converges to zero as the discrete solution $u_h$ converges to $u$. This then allows considering each cell's contribution as an "error indicator" that also converges to zero – as it should as the mesh is refined.
    +
    Note
    There are two steps above that do not seem necessary if all you care about is computing the error: namely, (i) the subtraction of $\phi_h$ from $z$, and (ii) splitting the integral into a sum of cells and integrating by parts on each. Indeed, neither of these two steps change $J(e)$ at all, as we only ever consider identities above until the substitution of $z$ by $\tilde z$. In other words, if you care only about estimating the global error $J(e)$, then these steps are not necessary. On the other hand, if you want to use the error estimate also as a refinement criterion for each cell of the mesh, then it is necessary to (i) break the estimate into a sum of cells, and (ii) massage the formulas in such a way that each cell's contributions have something to do with the local error. (While the contortions above do not change the value of the sum $J(e)$, they change the values we compute for each cell $K$.) To this end, we want to write everything in the form "residual times dual weight" where a "residual" is something that goes to zero as the approximation becomes $u_h$ better and better. For example, the quantity $\partial_n
+u_h$ is not a residual, since it simply converges to the (normal component of) the gradient of the exact solution. On the other hand, $[\partial_n u_h]$ is a residual because it converges to $[\partial_n
+u]=0$. All of the steps we have taken above in developing the final form of $J(e)$ have indeed had the goal of bringing the final formula into a form where each term converges to zero as the discrete solution $u_h$ converges to $u$. This then allows considering each cell's contribution as an "error indicator" that also converges to zero – as it should as the mesh is refined.

    The software

    The step-14 example program builds heavily on the techniques already used in the step-13 program. Its implementation of the dual weighted residual error estimator explained above is done by deriving a second class, properly called DualSolver, from the Solver base class, and having a class (WeightedResidual) that joins the two again and controls the solution of the primal and dual problem, and then uses both to compute the error indicator for mesh refinement.

    The program continues the modular concept of the previous example, by implementing the dual functional, describing quantity of interest, by an abstract base class, and providing two different functionals which implement this interface. Adding a different quantity of interest is thus simple.

    @@ -2572,15 +2572,15 @@

    Note the subtle interplay between resolving the corner singularities, and resolving around the point of evaluation. It will be rather difficult to generate such a mesh by hand, as this would involve to judge quantitatively how much which of the four corner singularities should be resolved, and to set the weight compared to the vicinity of the evaluation point.

    The program prints the point value and the estimated error in this quantity. From extrapolating it, we can guess that the exact value is somewhere close to 0.0334473, plus or minus 0.0000001 (note that we get almost 6 valid digits from only 22,000 (primal) degrees of freedom. This number cannot be obtained from the value of the functional alone, but I have used the assumption that the error estimator is mostly exact, and extrapolated the computed value plus the estimated error, to get an approximation of the true value. Computing with more degrees of freedom shows that this assumption is indeed valid.

    -

    From the computed results, we can generate two graphs: one that shows the convergence of the error $J(u)-J(u_h)$ (taking the extrapolated value as correct) in the point value, and the value that we get by adding up computed value $J(u_h)$ and estimated error eta (if the error estimator $eta$ were exact, then the value $J(u_h)+\eta$ would equal the exact point value, and the error in this quantity would always be zero; however, since the error estimator is only a - good - approximation to the true error, we can by this only reduce the size of the error). In this graph, we also indicate the complexity ${\cal O}(1/N)$ to show that mesh refinement acts optimal in this case. The second chart compares true and estimated error, and shows that the two are actually very close to each other, even for such a complicated quantity as the point value:

    +

    From the computed results, we can generate two graphs: one that shows the convergence of the error $J(u)-J(u_h)$ (taking the extrapolated value as correct) in the point value, and the value that we get by adding up computed value $J(u_h)$ and estimated error eta (if the error estimator $eta$ were exact, then the value $J(u_h)+\eta$ would equal the exact point value, and the error in this quantity would always be zero; however, since the error estimator is only a - good - approximation to the true error, we can by this only reduce the size of the error). In this graph, we also indicate the complexity ${\cal O}(1/N)$ to show that mesh refinement acts optimal in this case. The second chart compares true and estimated error, and shows that the two are actually very close to each other, even for such a complicated quantity as the point value:

    Comparing refinement criteria

    -

    Since we have accepted quite some effort when using the mesh refinement driven by the dual weighted error estimator (for solving the dual problem, and for evaluating the error representation), it is worth while asking whether that effort was successful. To this end, we first compare the achieved error levels for different mesh refinement criteria. To generate this data, simply change the value of the mesh refinement criterion variable in the main program. The results are thus (for the weight in the Kelly indicator, we have chosen the function $1/(r^2+0.1^2)$, where $r$ is the distance to the evaluation point; it can be shown that this is the optimal weight if we neglect the effects of boundaries):

    +

    Since we have accepted quite some effort when using the mesh refinement driven by the dual weighted error estimator (for solving the dual problem, and for evaluating the error representation), it is worth while asking whether that effort was successful. To this end, we first compare the achieved error levels for different mesh refinement criteria. To generate this data, simply change the value of the mesh refinement criterion variable in the main program. The results are thus (for the weight in the Kelly indicator, we have chosen the function $1/(r^2+0.1^2)$, where $r$ is the distance to the evaluation point; it can be shown that this is the optimal weight if we neglect the effects of boundaries):

    -

    Checking these numbers, we see that for global refinement, the error is proportional to $O(1/(sqrt(N) log(N)))$, and for the dual estimator $O(1/N)$. Generally speaking, we see that the dual weighted error estimator is better than the other refinement indicators, at least when compared with those that have a similarly regular behavior. The Kelly indicator produces smaller errors, but jumps about the picture rather irregularly, with the error also changing signs sometimes. Therefore, its behavior does not allow to extrapolate the results to larger values of N. Furthermore, if we trust the error estimates of the dual weighted error estimator, the results can be improved by adding the estimated error to the computed values. In terms of reliability, the weighted estimator is thus better than the Kelly indicator, although the latter sometimes produces smaller errors.

    +

    Checking these numbers, we see that for global refinement, the error is proportional to $O(1/(sqrt(N) log(N)))$, and for the dual estimator $O(1/N)$. Generally speaking, we see that the dual weighted error estimator is better than the other refinement indicators, at least when compared with those that have a similarly regular behavior. The Kelly indicator produces smaller errors, but jumps about the picture rather irregularly, with the error also changing signs sometimes. Therefore, its behavior does not allow to extrapolate the results to larger values of N. Furthermore, if we trust the error estimates of the dual weighted error estimator, the results can be improved by adding the estimated error to the computed values. In terms of reliability, the weighted estimator is thus better than the Kelly indicator, although the latter sometimes produces smaller errors.

    Evaluation of point stresses

    Besides evaluating the values of the solution at a certain point, the program also offers the possibility to evaluate the x-derivatives at a certain point, and also to tailor mesh refinement for this. To let the program compute these quantities, simply replace the two occurrences of PointValueEvaluation in the main function by PointXDerivativeEvaluation, and let the program run:

    Refinement cycle: 0
    Number of degrees of freedom=72
    @@ -2632,16 +2632,16 @@ -

    Note the asymmetry of the grids compared with those we obtained for the point evaluation. This is due to the fact that the domain and the primal solution may be symmetric about the diagonal, but the $x$-derivative is not, and the latter enters the refinement criterion.

    -

    Then, it is interesting to compare actually computed values of the quantity of interest (i.e. the x-derivative of the solution at one point) with a reference value of -0.0528223... plus or minus 0.0000005. We get this reference value by computing on finer grid after some more mesh refinements, with approximately 130,000 cells. Recall that if the error is $O(1/N)$ in the optimal case, then taking a mesh with ten times more cells gives us one additional digit in the result.

    +

    Note the asymmetry of the grids compared with those we obtained for the point evaluation. This is due to the fact that the domain and the primal solution may be symmetric about the diagonal, but the $x$-derivative is not, and the latter enters the refinement criterion.

    +

    Then, it is interesting to compare actually computed values of the quantity of interest (i.e. the x-derivative of the solution at one point) with a reference value of -0.0528223... plus or minus 0.0000005. We get this reference value by computing on finer grid after some more mesh refinements, with approximately 130,000 cells. Recall that if the error is $O(1/N)$ in the optimal case, then taking a mesh with ten times more cells gives us one additional digit in the result.

    In the left part of the following chart, you again see the convergence of the error towards this extrapolated value, while on the right you see a comparison of true and estimated error:

    -

    After an initial phase where the true error changes its sign, the estimated error matches it quite well, again. Also note the dramatic improvement in the error when using the estimated error to correct the computed value of $J(u_h)$.

    +

    After an initial phase where the true error changes its sign, the estimated error matches it quite well, again. Also note the dramatic improvement in the error when using the estimated error to correct the computed value of $J(u_h)$.

    step-13 revisited

    -

    If instead of the Exercise_2_3 data set, we choose CurvedRidges in the main function, and choose $(0.5,0.5)$ as the evaluation point, then we can redo the computations of the previous example program, to compare whether the results obtained with the help of the dual weighted error estimator are better than those we had previously.

    +

    If instead of the Exercise_2_3 data set, we choose CurvedRidges in the main function, and choose $(0.5,0.5)$ as the evaluation point, then we can redo the computations of the previous example program, to compare whether the results obtained with the help of the dual weighted error estimator are better than those we had previously.

    First, the meshes after 9 adaptive refinement cycles obtained with the point evaluation and derivative evaluation refinement criteria, respectively, look like this:

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_15.html differs (JavaScript source, Unicode text, UTF-8 text, with very long lines (2533)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_15.html 2023-11-25 15:26:01.303194674 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_15.html 2023-11-25 15:26:01.303194674 +0100 @@ -142,41 +142,41 @@

    Introduction

    Foreword

    -

    This program deals with an example of a non-linear elliptic partial differential equation, the minimal surface equation. You can imagine the solution of this equation to describe the surface spanned by a soap film that is enclosed by a closed wire loop. We imagine the wire to not just be a planar loop, but in fact curved. The surface tension of the soap film will then reduce the surface to have minimal surface. The solution of the minimal surface equation describes this shape with the wire's vertical displacement as a boundary condition. For simplicity, we will here assume that the surface can be written as a graph $u=u(x,y)$ although it is clear that it is not very hard to construct cases where the wire is bent in such a way that the surface can only locally be constructed as a graph but not globally.

    +

    This program deals with an example of a non-linear elliptic partial differential equation, the minimal surface equation. You can imagine the solution of this equation to describe the surface spanned by a soap film that is enclosed by a closed wire loop. We imagine the wire to not just be a planar loop, but in fact curved. The surface tension of the soap film will then reduce the surface to have minimal surface. The solution of the minimal surface equation describes this shape with the wire's vertical displacement as a boundary condition. For simplicity, we will here assume that the surface can be written as a graph $u=u(x,y)$ although it is clear that it is not very hard to construct cases where the wire is bent in such a way that the surface can only locally be constructed as a graph but not globally.

    Because the equation is non-linear, we can't solve it directly. Rather, we have to use Newton's method to compute the solution iteratively.

    Note
    The material presented here is also discussed in video lecture 31.5, video lecture 31.55, video lecture 31.6. (All video lectures are also available here.) (See also video lecture 31.65, video lecture 31.7.)

    Classical formulation

    In a classical sense, the problem is given in the following form:

    -\begin{align*}
+<picture><source srcset=\begin{align*}
     -\nabla \cdot \left( \frac{1}{\sqrt{1+|\nabla u|^{2}}}\nabla u \right) &= 0 \qquad
     \qquad &&\textrm{in} ~ \Omega
     \\
     u&=g \qquad\qquad &&\textrm{on} ~ \partial \Omega.
-  \end{align*} + \end{align*}" src="form_2858.png"/>

    -

    $\Omega$ is the domain we get by projecting the wire's positions into $x-y$ space. In this example, we choose $\Omega$ as the unit disk.

    -

    As described above, we solve this equation using Newton's method in which we compute the $n$th approximate solution from the $(n-1)$th one, and use a damping parameter $\alpha^n$ to get better global convergence behavior:

    -\begin{align*}
+<p><picture><source srcset=$\Omega$ is the domain we get by projecting the wire's positions into $x-y$ space. In this example, we choose $\Omega$ as the unit disk.

    +

    As described above, we solve this equation using Newton's method in which we compute the $n$th approximate solution from the $(n-1)$th one, and use a damping parameter $\alpha^n$ to get better global convergence behavior:

    +\begin{align*}
     F'(u^{n},\delta u^{n})&=- F(u^{n})
     \\
     u^{n+1}&=u^{n}+\alpha^n \delta u^{n}
-  \end{align*} + \end{align*}" src="form_2862.png"/>

    with

    -\[
+<picture><source srcset=\[
     F(u) \dealcoloneq -\nabla \cdot \left( \frac{1}{\sqrt{1+|\nabla u|^{2}}}\nabla u \right)
-  \] + \]" src="form_2863.png"/>

    -

    and $F'(u,\delta u)$ the derivative of F in direction of $\delta u$:

    -\[
+<p> and <picture><source srcset=$F'(u,\delta u)$ the derivative of F in direction of $\delta u$:

    +\[
   F'(u,\delta u)=\lim \limits_{\epsilon \rightarrow 0}{\frac{F(u+\epsilon \delta u)-
   F(u)}{\epsilon}}.
-\] +\]" src="form_2866.png"/>

    -

    Going through the motions to find out what $F'(u,\delta u)$ is, we find that we have to solve a linear elliptic PDE in every Newton step, with $\delta u^n$ as the solution of:

    +

    Going through the motions to find out what $F'(u,\delta u)$ is, we find that we have to solve a linear elliptic PDE in every Newton step, with $\delta u^n$ as the solution of:

    -\[
+<picture><source srcset=\[
   - \nabla \cdot \left( \frac{1}{\left(1+|\nabla u^{n}|^{2}\right)^{\frac{1}{2}}}\nabla
   \delta u^{n} \right) +
   \nabla \cdot \left( \frac{\nabla u^{n} \cdot
@@ -184,62 +184,62 @@
   \right)  =
   -\left( - \nabla \cdot \left( \frac{1}{\left(1+|\nabla u^{n}|^{2}\right)^{\frac{1}{2}}}
   \nabla u^{n} \right) \right)
-  \] + \]" src="form_2868.png"/>

    -

    In order to solve the minimal surface equation, we have to solve this equation repeatedly, once per Newton step. To solve this, we have to take a look at the boundary condition of this problem. Assuming that $u^{n}$ already has the right boundary values, the Newton update $\delta u^{n}$ should have zero boundary conditions, in order to have the right boundary condition after adding both. In the first Newton step, we are starting with the solution $u^{0}\equiv 0$, the Newton update still has to deliver the right boundary condition to the solution $u^{1}$.

    -

    Summing up, we have to solve the PDE above with the boundary condition $\delta
-u^{0}=g$ in the first step and with $\delta u^{n}=0$ in all the following steps.

    -
    Note
    In some sense, one may argue that if the program already implements $F(u)$, it is duplicative to also have to implement $F'(u,\delta)$. As always, duplication tempts bugs and we would like to avoid it. While we do not explore this issue in this program, we will come back to it at the end of the Possibilities for extensions section below, and specifically in step-72.
    +

    In order to solve the minimal surface equation, we have to solve this equation repeatedly, once per Newton step. To solve this, we have to take a look at the boundary condition of this problem. Assuming that $u^{n}$ already has the right boundary values, the Newton update $\delta u^{n}$ should have zero boundary conditions, in order to have the right boundary condition after adding both. In the first Newton step, we are starting with the solution $u^{0}\equiv 0$, the Newton update still has to deliver the right boundary condition to the solution $u^{1}$.

    +

    Summing up, we have to solve the PDE above with the boundary condition $\delta
+u^{0}=g$ in the first step and with $\delta u^{n}=0$ in all the following steps.

    +
    Note
    In some sense, one may argue that if the program already implements $F(u)$, it is duplicative to also have to implement $F'(u,\delta)$. As always, duplication tempts bugs and we would like to avoid it. While we do not explore this issue in this program, we will come back to it at the end of the Possibilities for extensions section below, and specifically in step-72.

    Weak formulation of the problem

    -

    Starting with the strong formulation above, we get the weak formulation by multiplying both sides of the PDE with a test function $\varphi$ and integrating by parts on both sides:

    -\[
+<p>Starting with the strong formulation above, we get the weak formulation by multiplying both sides of the PDE with a test function <picture><source srcset=$\varphi$ and integrating by parts on both sides:

    +\[
   \left( \nabla \varphi , \frac{1}{\left(1+|\nabla u^{n}|^{2}\right)^{\frac{1}{2}}}\nabla
   \delta u^{n} \right)-\left(\nabla \varphi ,\frac{\nabla u^{n} \cdot \nabla
   \delta u^{n}}{\left(1+|\nabla u^{n}|^{2}\right)^{\frac{3}{2}}}\nabla u^{n}  \right)
   = -\left(\nabla \varphi , \frac{1}{\left(1+|\nabla u^{n}|^{2}\right)^{\frac{1}{2}}} \nabla u^{n}
    \right).
-  \] + \]" src="form_2876.png"/>

    -

    Here the solution $\delta u^{n}$ is a function in $H^{1}(\Omega)$, subject to the boundary conditions discussed above. Reducing this space to a finite dimensional space with basis $\left\{
-\varphi_{0},\dots , \varphi_{N-1}\right\}$, we can write the solution:

    +

    Here the solution $\delta u^{n}$ is a function in $H^{1}(\Omega)$, subject to the boundary conditions discussed above. Reducing this space to a finite dimensional space with basis $\left\{
+\varphi_{0},\dots , \varphi_{N-1}\right\}$, we can write the solution:

    -\[
+<picture><source srcset=\[
   \delta u^{n}=\sum_{j=0}^{N-1} \delta U_{j} \varphi_{j}.
-\] +\]" src="form_2879.png"/>

    -

    Using the basis functions as test functions and defining $a_{n} \dealcoloneq \frac{1}
-{\sqrt{1+|\nabla u^{n}|^{2}}}$, we can rewrite the weak formulation:

    +

    Using the basis functions as test functions and defining $a_{n} \dealcoloneq \frac{1}
+{\sqrt{1+|\nabla u^{n}|^{2}}}$, we can rewrite the weak formulation:

    -\[
+<picture><source srcset=\[
   \sum_{j=0}^{N-1}\left[ \left( \nabla \varphi_{i} , a_{n} \nabla \varphi_{j} \right) -
   \left(\nabla u^{n}\cdot \nabla \varphi_{i} , a_{n}^{3} \nabla u^{n} \cdot \nabla
   \varphi_{j} \right) \right] \cdot \delta U_{j}=-\left( \nabla \varphi_{i} , a_{n}
   \nabla u^{n}\right) \qquad \forall i=0,\dots ,N-1,
-\] +\]" src="form_2881.png"/>

    -

    where the solution $\delta u^{n}$ is given by the coefficients $\delta U^{n}_{j}$. This linear system of equations can be rewritten as:

    +

    where the solution $\delta u^{n}$ is given by the coefficients $\delta U^{n}_{j}$. This linear system of equations can be rewritten as:

    -\[
+<picture><source srcset=\[
   A^{n}\; \delta U^{n}=b^{n},
-\] +\]" src="form_2883.png"/>

    -

    where the entries of the matrix $A^{n}$ are given by:

    +

    where the entries of the matrix $A^{n}$ are given by:

    -\[
+<picture><source srcset=\[
   A^{n}_{ij} \dealcoloneq \left( \nabla \varphi_{i} , a_{n} \nabla \varphi_{j} \right) -
   \left(\nabla u^{n}\cdot \nabla \varphi_{i} , a_{n}^{3} \nabla u^{n} \cdot \nabla
   \varphi_{j} \right),
-\] +\]" src="form_2885.png"/>

    -

    and the right hand side $b^{n}$ is given by:

    +

    and the right hand side $b^{n}$ is given by:

    -\[
+<picture><source srcset=\[
   b^{n}_{i} \dealcoloneq -\left( \nabla \varphi_{i} , a_{n} \nabla u^{n}\right).
-\] +\]" src="form_2887.png"/>

    Questions about the appropriate solver

    The matrix that corresponds to the Newton step above can be reformulated to show its structure a bit better. Rewriting it slightly, we get that it has the form

    -\[
+<picture><source srcset=\[
   A_{ij}
   =
   \left(
@@ -247,10 +247,10 @@
     B
     \nabla \varphi_j
   \right),
-\] +\]" src="form_2888.png"/>

    -

    where the matrix $B$ (of size $d \times d$ in $d$ space dimensions) is given by the following expression:

    -\[
+<p> where the matrix <picture><source srcset=$B$ (of size $d \times d$ in $d$ space dimensions) is given by the following expression:

    +\[
   B
   =
   a_n \left\{
@@ -265,44 +265,44 @@
   \frac{\nabla u_n}{\sqrt{1+|\nabla u^{n}|^{2}}} \otimes
   \frac{\nabla u_n}{\sqrt{1+|\nabla u^{n}|^{2}}}
   \right\}.
-\] +\]" src="form_2890.png"/>

    -

    From this expression, it is obvious that $B$ is symmetric, and so $A$ is symmetric as well. On the other hand, $B$ is also positive definite, which confers the same property onto $A$. This can be seen by noting that the vector $v_1 =
-\frac{\nabla u^n}{|\nabla u^n|}$ is an eigenvector of $B$ with eigenvalue $\lambda_1=a_n \left(1-\frac{|\nabla u^n|^2}{1+|\nabla u^n|^2}\right) > 0$ while all vectors $v_2\ldots v_d$ that are perpendicular to $v_1$ and each other are eigenvectors with eigenvalue $a_n$. Since all eigenvalues are positive, $B$ is positive definite and so is $A$. We can thus use the CG method for solving the Newton steps. (The fact that the matrix $A$ is symmetric and positive definite should not come as a surprise. It results from taking the derivative of an operator that results from taking the derivative of an energy functional: the minimal surface equation simply minimizes some non-quadratic energy. Consequently, the Newton matrix, as the matrix of second derivatives of a scalar energy, must be symmetric since the derivative with regard to the $i$th and $j$th degree of freedom should clearly commute. Likewise, if the energy functional is convex, then the matrix of second derivatives must be positive definite, and the direct calculation above simply reaffirms this.)

    -

    It is worth noting, however, that the positive definiteness degenerates for problems where $\nabla u$ becomes large. In other words, if we simply multiply all boundary values by 2, then to first order $u$ and $\nabla u$ will also be multiplied by two, but as a consequence the smallest eigenvalue of $B$ will become smaller and the matrix will become more ill-conditioned. (More specifically, for $|\nabla u^n|\rightarrow\infty$ we have that $\lambda_1 \propto a_n \frac{1}{|\nabla u^n|^2}$ whereas $\lambda_2\ldots \lambda_d=a_n$; thus, the condition number of $B$, which is a multiplicative factor in the condition number of $A$ grows like ${\cal O}(|\nabla u^n|^2)$.) It is simple to verify with the current program that indeed multiplying the boundary values used in the current program by larger and larger values results in a problem that will ultimately no longer be solvable using the simple preconditioned CG method we use here.

    +

    From this expression, it is obvious that $B$ is symmetric, and so $A$ is symmetric as well. On the other hand, $B$ is also positive definite, which confers the same property onto $A$. This can be seen by noting that the vector $v_1 =
+\frac{\nabla u^n}{|\nabla u^n|}$ is an eigenvector of $B$ with eigenvalue $\lambda_1=a_n \left(1-\frac{|\nabla u^n|^2}{1+|\nabla u^n|^2}\right) > 0$ while all vectors $v_2\ldots v_d$ that are perpendicular to $v_1$ and each other are eigenvectors with eigenvalue $a_n$. Since all eigenvalues are positive, $B$ is positive definite and so is $A$. We can thus use the CG method for solving the Newton steps. (The fact that the matrix $A$ is symmetric and positive definite should not come as a surprise. It results from taking the derivative of an operator that results from taking the derivative of an energy functional: the minimal surface equation simply minimizes some non-quadratic energy. Consequently, the Newton matrix, as the matrix of second derivatives of a scalar energy, must be symmetric since the derivative with regard to the $i$th and $j$th degree of freedom should clearly commute. Likewise, if the energy functional is convex, then the matrix of second derivatives must be positive definite, and the direct calculation above simply reaffirms this.)

    +

    It is worth noting, however, that the positive definiteness degenerates for problems where $\nabla u$ becomes large. In other words, if we simply multiply all boundary values by 2, then to first order $u$ and $\nabla u$ will also be multiplied by two, but as a consequence the smallest eigenvalue of $B$ will become smaller and the matrix will become more ill-conditioned. (More specifically, for $|\nabla u^n|\rightarrow\infty$ we have that $\lambda_1 \propto a_n \frac{1}{|\nabla u^n|^2}$ whereas $\lambda_2\ldots \lambda_d=a_n$; thus, the condition number of $B$, which is a multiplicative factor in the condition number of $A$ grows like ${\cal O}(|\nabla u^n|^2)$.) It is simple to verify with the current program that indeed multiplying the boundary values used in the current program by larger and larger values results in a problem that will ultimately no longer be solvable using the simple preconditioned CG method we use here.

    Choice of step length and globalization

    -

    As stated above, Newton's method works by computing a direction $\delta u^n$ and then performing the update $u^{n+1} = u^{n}+\alpha^n
-\delta u^{n}$ with a step length $0 < \alpha^n \le 1$. It is a common observation that for strongly nonlinear models, Newton's method does not converge if we always choose $\alpha^n=1$ unless one starts with an initial guess $u^0$ that is sufficiently close to the solution $u$ of the nonlinear problem. In practice, we don't always have such an initial guess, and consequently taking full Newton steps (i.e., using $\alpha=1$) does frequently not work.

    -

    A common strategy therefore is to use a smaller step length for the first few steps while the iterate $u^n$ is still far away from the solution $u$ and as we get closer use larger values for $\alpha^n$ until we can finally start to use full steps $\alpha^n=1$ as we are close enough to the solution. The question is of course how to choose $\alpha^n$. There are basically two widely used approaches: line search and trust region methods.

    +

    As stated above, Newton's method works by computing a direction $\delta u^n$ and then performing the update $u^{n+1} = u^{n}+\alpha^n
+\delta u^{n}$ with a step length $0 < \alpha^n \le 1$. It is a common observation that for strongly nonlinear models, Newton's method does not converge if we always choose $\alpha^n=1$ unless one starts with an initial guess $u^0$ that is sufficiently close to the solution $u$ of the nonlinear problem. In practice, we don't always have such an initial guess, and consequently taking full Newton steps (i.e., using $\alpha=1$) does frequently not work.

    +

    A common strategy therefore is to use a smaller step length for the first few steps while the iterate $u^n$ is still far away from the solution $u$ and as we get closer use larger values for $\alpha^n$ until we can finally start to use full steps $\alpha^n=1$ as we are close enough to the solution. The question is of course how to choose $\alpha^n$. There are basically two widely used approaches: line search and trust region methods.

    In this program, we simply always choose the step length equal to 0.1. This makes sure that for the testcase at hand we do get convergence although it is clear that by not eventually reverting to full step lengths we forego the rapid, quadratic convergence that makes Newton's method so appealing. Obviously, this is a point one eventually has to address if the program was made into one that is meant to solve more realistic problems. We will comment on this issue some more in the results section, and use an even better approach in step-77.

    Summary of the algorithm and testcase

    Overall, the program we have here is not unlike step-6 in many regards. The layout of the main class is essentially the same. On the other hand, the driving algorithm in the run() function is different and works as follows:

    1. -

      Start with the function $u^{0}\equiv 0$ and modify it in such a way that the values of $u^0$ along the boundary equal the correct boundary values $g$ (this happens in MinimalSurfaceProblem::set_boundary_values). Set $n=0$.

      +

      Start with the function $u^{0}\equiv 0$ and modify it in such a way that the values of $u^0$ along the boundary equal the correct boundary values $g$ (this happens in MinimalSurfaceProblem::set_boundary_values). Set $n=0$.

    2. -

      Compute the Newton update by solving the system $A^{n}\;\delta
-  U^{n}=b^{n}$ with boundary condition $\delta u^{n}=0$ on $\partial \Omega$.

      +

      Compute the Newton update by solving the system $A^{n}\;\delta
+  U^{n}=b^{n}$ with boundary condition $\delta u^{n}=0$ on $\partial \Omega$.

    3. -

      Compute a step length $\alpha^n$. In this program, we always set $\alpha^n=0.1$. To make things easier to extend later on, this happens in a function of its own, namely in MinimalSurfaceProblem::determine_step_length. (The strategy of always choosing $\alpha^n=0.1$ is of course not optimal – we should choose a step length that works for a given search direction – but it requires a bit of work to do that. In the end, we leave these sorts of things to external packages: step-77 does that.)

      +

      Compute a step length $\alpha^n$. In this program, we always set $\alpha^n=0.1$. To make things easier to extend later on, this happens in a function of its own, namely in MinimalSurfaceProblem::determine_step_length. (The strategy of always choosing $\alpha^n=0.1$ is of course not optimal – we should choose a step length that works for a given search direction – but it requires a bit of work to do that. In the end, we leave these sorts of things to external packages: step-77 does that.)

      /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_16.html differs (JavaScript source, Unicode text, UTF-8 text, with very long lines (861)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_16.html 2023-11-25 15:26:01.326527532 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_16.html 2023-11-25 15:26:01.326527532 +0100 @@ -139,7 +139,7 @@
      -

      The fine level in this mesh consists only of the degrees of freedom that are defined on the refined cells, but does not extend to that part of the domain that is not refined. While this guarantees that the overall effort grows as ${\cal O}(N)$ as necessary for optimal multigrid complexity, it leads to problems when defining where to smooth and what boundary conditions to pose for the operators defined on individual levels if the level boundary is not an external boundary. These questions are discussed in detail in the article cited above.

      +

      The fine level in this mesh consists only of the degrees of freedom that are defined on the refined cells, but does not extend to that part of the domain that is not refined. While this guarantees that the overall effort grows as ${\cal O}(N)$ as necessary for optimal multigrid complexity, it leads to problems when defining where to smooth and what boundary conditions to pose for the operators defined on individual levels if the level boundary is not an external boundary. These questions are discussed in detail in the article cited above.

      The testcase

      The problem we solve here is similar to step-6, with two main differences: first, the multigrid preconditioner, obviously. We also change the discontinuity of the coefficients such that the local assembler does not look more complicated than necessary.

      The commented program

      /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_18.html differs (JavaScript source, Unicode text, UTF-8 text, with very long lines (2368)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_18.html 2023-11-25 15:26:01.363193455 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_18.html 2023-11-25 15:26:01.363193455 +0100 @@ -152,23 +152,23 @@

      Quasistatic elastic deformation

      Motivation of the model

      In general, time-dependent small elastic deformations are described by the elastic wave equation

      -\[
+<picture><source srcset=\[
   \rho \frac{\partial^2 \mathbf{u}}{\partial t^2}
   + c \frac{\partial \mathbf{u}}{\partial t}
   - \textrm{div}\  ( C \varepsilon(\mathbf{u})) = \mathbf{f}
   \qquad
   \textrm{in}\ \Omega,
-\] +\]" src="form_2939.png"/>

      -

      where $\mathbf{u}=\mathbf{u} (\mathbf{x},t)$ is the deformation of the body, $\rho$ and $c$ the density and attenuation coefficient, and $\mathbf{f}$ external forces. In addition, initial conditions

      -\[
+<p> where <picture><source srcset=$\mathbf{u}=\mathbf{u} (\mathbf{x},t)$ is the deformation of the body, $\rho$ and $c$ the density and attenuation coefficient, and $\mathbf{f}$ external forces. In addition, initial conditions

      +\[
   \mathbf{u}(\cdot, 0) = \mathbf{u}_0(\cdot)
   \qquad
   \textrm{on}\ \Omega,
-\] +\]" src="form_2942.png"/>

      and Dirichlet (displacement) or Neumann (traction) boundary conditions need to be specified for a unique solution:

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   \mathbf{u}(\mathbf{x},t) &=& \mathbf{d}(\mathbf{x},t)
   \qquad
   \textrm{on}\ \Gamma_D\subset\partial\Omega,
@@ -176,12 +176,12 @@
   \mathbf{n} \ C \varepsilon(\mathbf{u}(\mathbf{x},t)) &=& \mathbf{b}(\mathbf{x},t)
   \qquad
   \textrm{on}\ \Gamma_N=\partial\Omega\backslash\Gamma_D.
-\end{eqnarray*} +\end{eqnarray*}" src="form_2943.png"/>

      -

      In above formulation, $\varepsilon(\mathbf{u})= \frac 12 (\nabla \mathbf{u} + \nabla
-\mathbf{u}^T)$ is the symmetric gradient of the displacement, also called the strain. $C$ is a tensor of rank 4, called the stress-strain tensor (the inverse of the compliance tensor) that contains knowledge of the elastic strength of the material; its symmetry properties make sure that it maps symmetric tensors of rank 2 (“matrices” of dimension $d$, where $d$ is the spatial dimensionality) onto symmetric tensors of the same rank. We will comment on the roles of the strain and stress tensors more below. For the moment it suffices to say that we interpret the term $\textrm{div}\  ( C \varepsilon(\mathbf{u}))$ as the vector with components $\frac \partial{\partial x_j} C_{ijkl} \varepsilon(\mathbf{u})_{kl}$, where summation over indices $j,k,l$ is implied.

      -

      The quasistatic limit of this equation is motivated as follows: each small perturbation of the body, for example by changes in boundary condition or the forcing function, will result in a corresponding change in the configuration of the body. In general, this will be in the form of waves radiating away from the location of the disturbance. Due to the presence of the damping term, these waves will be attenuated on a time scale of, say, $\tau$. Now, assume that all changes in external forcing happen on times scales that are much larger than $\tau$. In that case, the dynamic nature of the change is unimportant: we can consider the body to always be in static equilibrium, i.e. we can assume that at all times the body satisfies

      -\begin{eqnarray*}
+<p> In above formulation, <picture><source srcset=$\varepsilon(\mathbf{u})= \frac 12 (\nabla \mathbf{u} + \nabla
+\mathbf{u}^T)$ is the symmetric gradient of the displacement, also called the strain. $C$ is a tensor of rank 4, called the stress-strain tensor (the inverse of the compliance tensor) that contains knowledge of the elastic strength of the material; its symmetry properties make sure that it maps symmetric tensors of rank 2 (“matrices” of dimension $d$, where $d$ is the spatial dimensionality) onto symmetric tensors of the same rank. We will comment on the roles of the strain and stress tensors more below. For the moment it suffices to say that we interpret the term $\textrm{div}\  ( C \varepsilon(\mathbf{u}))$ as the vector with components $\frac \partial{\partial x_j} C_{ijkl} \varepsilon(\mathbf{u})_{kl}$, where summation over indices $j,k,l$ is implied.

      +

      The quasistatic limit of this equation is motivated as follows: each small perturbation of the body, for example by changes in boundary condition or the forcing function, will result in a corresponding change in the configuration of the body. In general, this will be in the form of waves radiating away from the location of the disturbance. Due to the presence of the damping term, these waves will be attenuated on a time scale of, say, $\tau$. Now, assume that all changes in external forcing happen on times scales that are much larger than $\tau$. In that case, the dynamic nature of the change is unimportant: we can consider the body to always be in static equilibrium, i.e. we can assume that at all times the body satisfies

      +\begin{eqnarray*}
   - \textrm{div}\  ( C \varepsilon(\mathbf{u})) &=& \mathbf{f}(\mathbf{x},t)
   \qquad
   \textrm{in}\ \Omega,
@@ -193,13 +193,13 @@
   \mathbf{n} \ C \varepsilon(\mathbf{u}(\mathbf{x},t)) &=& \mathbf{b}(\mathbf{x},t)
   \qquad
   \textrm{on}\ \Gamma_N.
-\end{eqnarray*} +\end{eqnarray*}" src="form_2949.png"/>

      -

      Note that the differential equation does not contain any time derivatives any more – all time dependence is introduced through boundary conditions and a possibly time-varying force function $\mathbf{f}(\mathbf{x},t)$. The changes in configuration can therefore be considered as being stationary instantaneously. An alternative view of this is that $t$ is not really a time variable, but only a time-like parameter that governs the evolution of the problem.

      +

      Note that the differential equation does not contain any time derivatives any more – all time dependence is introduced through boundary conditions and a possibly time-varying force function $\mathbf{f}(\mathbf{x},t)$. The changes in configuration can therefore be considered as being stationary instantaneously. An alternative view of this is that $t$ is not really a time variable, but only a time-like parameter that governs the evolution of the problem.

      While these equations are sufficient to describe small deformations, computing large deformations is a little more complicated and, in general, leads to nonlinear equations such as those treated in step-44. In the following, let us consider some of the tools one would employ when simulating problems in which the deformation becomes large.

      Note
      The model we will consider below is not founded on anything that would be mathematically sound: we will consider a model in which we produce a small deformation, deform the physical coordinates of the body by this deformation, and then consider the next loading step again as a linear problem. This isn't consistent, since the assumption of linearity implies that deformations are infinitesimal and so moving around the vertices of our mesh by a finite amount before solving the next linear problem is an inconsistent approach. We should therefore note that it is not surprising that the equations discussed below can't be found in the literature: The model considered here has little to do with reality! On the other hand, the implementation techniques we consider are very much what one would need to use when implementing a real model, as we will see in step-44.
      -

      To come back to defining our "artificial" model, let us first introduce a tensorial stress variable $\sigma$, and write the differential equations in terms of the stress:

      -\begin{eqnarray*}
+<p>To come back to defining our $\sigma$, and write the differential equations in terms of the stress:

      +\begin{eqnarray*}
   - \textrm{div}\  \sigma &=& \mathbf{f}(\mathbf{x},t)
   \qquad
   \textrm{in}\ \Omega(t),
@@ -211,30 +211,30 @@
   \mathbf{n} \ C \varepsilon(\mathbf{u}(\mathbf{x},t)) &=& \mathbf{b}(\mathbf{x},t)
   \qquad
   \textrm{on}\ \Gamma_N=\partial\Omega(t)\backslash\Gamma_D.
-\end{eqnarray*} +\end{eqnarray*}" src="form_2951.png"/>

      -

      Note that these equations are posed on a domain $\Omega(t)$ that changes with time, with the boundary moving according to the displacements $\mathbf{u}(\mathbf{x},t)$ of the points on the boundary. To complete this system, we have to specify the incremental relationship between the stress and the strain, as follows:

      -\[
+<p> Note that these equations are posed on a domain <picture><source srcset=$\Omega(t)$ that changes with time, with the boundary moving according to the displacements $\mathbf{u}(\mathbf{x},t)$ of the points on the boundary. To complete this system, we have to specify the incremental relationship between the stress and the strain, as follows:

      +\[
   \dot\sigma = C \varepsilon (\dot{\mathbf{u}}),
   \qquad
   \qquad
   \textrm{[stress-strain]}
-\] +\]" src="form_2954.png"/>

      -

      where a dot indicates a time derivative. Both the stress $\sigma$ and the strain $\varepsilon(\mathbf{u})$ are symmetric tensors of rank 2.

      +

      where a dot indicates a time derivative. Both the stress $\sigma$ and the strain $\varepsilon(\mathbf{u})$ are symmetric tensors of rank 2.

      Time discretization

      -

      Numerically, this system is solved as follows: first, we discretize the time component using a backward Euler scheme. This leads to a discrete equilibrium of force at time step $n$:

      -\[
+<p>Numerically, this system is solved as follows: first, we discretize the time component using a backward Euler scheme. This leads to a discrete equilibrium of force at time step <picture><source srcset=$n$:

      +\[
   -\textrm{div}\  \sigma^n = f^n,
-\] +\]" src="form_2956.png"/>

      where

      -\[
+<picture><source srcset=\[
   \sigma^n = \sigma^{n-1} + C \varepsilon (\Delta \mathbf{u}^n),
-\] +\]" src="form_2957.png"/>

      -

      and $\Delta \mathbf{u}^n$ the incremental displacement for time step $n$. In addition, we have to specify initial data $\mathbf{u}(\cdot,0)=\mathbf{u}_0$. This way, if we want to solve for the displacement increment, we have to solve the following system:

      -\begin{align*}
+<p> and <picture><source srcset=$\Delta \mathbf{u}^n$ the incremental displacement for time step $n$. In addition, we have to specify initial data $\mathbf{u}(\cdot,0)=\mathbf{u}_0$. This way, if we want to solve for the displacement increment, we have to solve the following system:

      +\begin{align*}
   - \textrm{div}\   C \varepsilon(\Delta\mathbf{u}^n) &= \mathbf{f} + \textrm{div}\  \sigma^{n-1}
   \qquad
   &&\textrm{in}\ \Omega(t_{n-1}),
@@ -246,11 +246,11 @@
   \mathbf{n} \ C \varepsilon(\Delta \mathbf{u}^n(\mathbf{x},t)) &= \mathbf{b}(\mathbf{x},t_n)-\mathbf{b}(\mathbf{x},t_{n-1})
   \qquad
   &&\textrm{on}\ \Gamma_N=\partial\Omega(t_{n-1})\backslash\Gamma_D.
-\end{align*} +\end{align*}" src="form_2960.png"/>

      -

      The weak form of this set of equations, which as usual is the basis for the finite element formulation, reads as follows: find $\Delta \mathbf{u}^n \in
-\{v\in H^1(\Omega(t_{n-1}))^d: v|_{\Gamma_D}=\mathbf{d}(\cdot,t_n) - \mathbf{d}(\cdot,t_{n-1})\}$ such that

      -\begin{align*}
+<p> The weak form of this set of equations, which as usual is the basis for the finite element formulation, reads as follows: find <picture><source srcset=$\Delta \mathbf{u}^n \in
+\{v\in H^1(\Omega(t_{n-1}))^d: v|_{\Gamma_D}=\mathbf{d}(\cdot,t_n) - \mathbf{d}(\cdot,t_{n-1})\}$ such that

      +\begin{align*}
   (C \varepsilon(\Delta\mathbf{u}^n), \varepsilon(\varphi) )_{\Omega(t_{n-1})}
   &=
   (\mathbf{f}, \varphi)_{\Omega(t_{n-1})}
@@ -262,12 +262,12 @@
   \\
   &\qquad\qquad
   \forall \varphi \in \{\mathbf{v}\in H^1(\Omega(t_{n-1}))^d: \mathbf{v}|_{\Gamma_D}=0\}.
-\end{align*} +\end{align*}" src="form_2962.png"/>

      -

      Using that $\sigma^{n-1} \mathbf{n}
+<p> Using that <picture><source srcset=$\sigma^{n-1} \mathbf{n}
             = [C \varepsilon(\mathbf{u}^{n-1})] \mathbf{n}
-            = \mathbf{b}(\mathbf x, t_{n-1})$, these equations can be simplified to

      -\begin{align*}
+            = \mathbf{b}(\mathbf x, t_{n-1})$, these equations can be simplified to

      +\begin{align*}
   (C \varepsilon(\Delta\mathbf{u}^n), \varepsilon(\varphi) )_{\Omega(t_{n-1})}
   &=
   (\mathbf{f}, \varphi)_{\Omega(t_{n-1})}
@@ -279,32 +279,32 @@
   \qquad
   \qquad
   \textrm{[linear-system]}
-\end{align*} +\end{align*}" src="form_2964.png"/>

      -

      We note that, for simplicity, in the program we will always assume that there are no boundary forces, i.e. $\mathbf{b} = 0$, and that the deformation of the body is driven by body forces $\mathbf{f}$ and prescribed boundary displacements $\mathbf{d}$ alone. It is also worth noting that when integrating by parts, we would get terms of the form $(C \varepsilon(\Delta\mathbf{u}^n), \nabla \varphi
-)_{\Omega(t_{n-1})}$, but that we replace them with the term involving the symmetric gradient $\varepsilon(\varphi)$ instead of $\nabla\varphi$. Due to the symmetry of $C$, the two terms are mathematically equivalent, but the symmetric version avoids the potential for round-off errors making the resulting matrix slightly non-symmetric.

      -

      The system at time step $n$, to be solved on the old domain $\Omega(t_{n-1})$, has exactly the form of a stationary elastic problem, and is therefore similar to what we have already implemented in previous example programs. We will therefore not comment on the space discretization beyond saying that we again use lowest order continuous finite elements.

      +

      We note that, for simplicity, in the program we will always assume that there are no boundary forces, i.e. $\mathbf{b} = 0$, and that the deformation of the body is driven by body forces $\mathbf{f}$ and prescribed boundary displacements $\mathbf{d}$ alone. It is also worth noting that when integrating by parts, we would get terms of the form $(C \varepsilon(\Delta\mathbf{u}^n), \nabla \varphi
+)_{\Omega(t_{n-1})}$, but that we replace them with the term involving the symmetric gradient $\varepsilon(\varphi)$ instead of $\nabla\varphi$. Due to the symmetry of $C$, the two terms are mathematically equivalent, but the symmetric version avoids the potential for round-off errors making the resulting matrix slightly non-symmetric.

      +

      The system at time step $n$, to be solved on the old domain $\Omega(t_{n-1})$, has exactly the form of a stationary elastic problem, and is therefore similar to what we have already implemented in previous example programs. We will therefore not comment on the space discretization beyond saying that we again use lowest order continuous finite elements.

      There are differences, however:

      1. We have to move (update) the mesh after each time step, in order to be able to solve the next time step on a new domain;

      2. -We need to know $\sigma^{n-1}$ to compute the next incremental displacement, i.e. we need to compute it at the end of the time step to make sure it is available for the next time step. Essentially, the stress variable is our window to the history of deformation of the body.
      3. +We need to know $\sigma^{n-1}$ to compute the next incremental displacement, i.e. we need to compute it at the end of the time step to make sure it is available for the next time step. Essentially, the stress variable is our window to the history of deformation of the body.

      These two operations are done in the functions move_mesh and update_quadrature_point_history in the program. While moving the mesh is only a technicality, updating the stress is a little more complicated and will be discussed in the next section.

      Updating the stress variable

      -

      As indicated above, we need to have the stress variable $\sigma^n$ available when computing time step $n+1$, and we can compute it using

      -\[
+<p>As indicated above, we need to have the stress variable <picture><source srcset=$\sigma^n$ available when computing time step $n+1$, and we can compute it using

      +\[
   \sigma^n = \sigma^{n-1} + C \varepsilon (\Delta \mathbf{u}^n).
   \qquad
   \qquad
   \textrm{[stress-update]}
-\] +\]" src="form_2973.png"/>

      -

      There are, despite the apparent simplicity of this equation, two questions that we need to discuss. The first concerns the way we store $\sigma^n$: even if we compute the incremental updates $\Delta\mathbf{u}^n$ using lowest-order finite elements, then its symmetric gradient $\varepsilon(\Delta\mathbf{u}^n)$ is in general still a function that is not easy to describe. In particular, it is not a piecewise constant function, and on general meshes (with cells that are not rectangles parallel to the coordinate axes) or with non-constant stress-strain tensors $C$ it is not even a bi- or trilinear function. Thus, it is a priori not clear how to store $\sigma^n$ in a computer program.

      -

      To decide this, we have to see where it is used. The only place where we require the stress is in the term $(\sigma^{n-1},\varepsilon(\varphi))_{\Omega(t_{n-1})}$. In practice, we of course replace this term by numerical quadrature:

      -\[
+<p> There are, despite the apparent simplicity of this equation, two questions that we need to discuss. The first concerns the way we store <picture><source srcset=$\sigma^n$: even if we compute the incremental updates $\Delta\mathbf{u}^n$ using lowest-order finite elements, then its symmetric gradient $\varepsilon(\Delta\mathbf{u}^n)$ is in general still a function that is not easy to describe. In particular, it is not a piecewise constant function, and on general meshes (with cells that are not rectangles parallel to the coordinate axes) or with non-constant stress-strain tensors $C$ it is not even a bi- or trilinear function. Thus, it is a priori not clear how to store $\sigma^n$ in a computer program.

      +

      To decide this, we have to see where it is used. The only place where we require the stress is in the term $(\sigma^{n-1},\varepsilon(\varphi))_{\Omega(t_{n-1})}$. In practice, we of course replace this term by numerical quadrature:

      +\[
   (\sigma^{n-1},\varepsilon(\varphi))_{\Omega(t_{n-1})}
   =
   \sum_{K\subset {T}}
@@ -313,12 +313,12 @@
   \sum_{K\subset {T}}
   \sum_q
   w_q \ \sigma^{n-1}(\mathbf{x}_q) : \varepsilon(\varphi(\mathbf{x}_q),
-\] +\]" src="form_2977.png"/>

      -

      where $w_q$ are the quadrature weights and $\mathbf{x}_q$ the quadrature points on cell $K$. This should make clear that what we really need is not the stress $\sigma^{n-1}$ in itself, but only the values of the stress in the quadrature points on all cells. This, however, is a simpler task: we only have to provide a data structure that is able to hold one symmetric tensor of rank 2 for each quadrature point on all cells (or, since we compute in parallel, all quadrature points of all cells that the present MPI process “owns”). At the end of each time step we then only have to evaluate $\varepsilon(\Delta \mathbf{u}^n(\mathbf{x}_q))$, multiply it by the stress-strain tensor $C$, and use the result to update the stress $\sigma^n(\mathbf{x}_q)$ at quadrature point $q$.

      -

      The second complication is not visible in our notation as chosen above. It is due to the fact that we compute $\Delta u^n$ on the domain $\Omega(t_{n-1})$, and then use this displacement increment to both update the stress as well as move the mesh nodes around to get to $\Omega(t_n)$ on which the next increment is computed. What we have to make sure, in this context, is that moving the mesh does not only involve moving around the nodes, but also making corresponding changes to the stress variable: the updated stress is a variable that is defined with respect to the coordinate system of the material in the old domain, and has to be transferred to the new domain. The reason for this can be understood as follows: locally, the incremental deformation $\Delta\mathbf{u}$ can be decomposed into three parts, a linear translation (the constant part of the displacement increment field in the neighborhood of a point), a dilational component (that part of the gradient of the displacement field that has a nonzero divergence), and a rotation. A linear translation of the material does not affect the stresses that are frozen into it – the stress values are simply translated along. The dilational or compressional change produces a corresponding stress update. However, the rotational component does not necessarily induce a nonzero stress update (think, in 2d, for example of the situation where $\Delta\mathbf{u}=(y, -x)^T$, with which $\varepsilon(\Delta
-\mathbf{u})=0$). Nevertheless, if the material was prestressed in a certain direction, then this direction will be rotated along with the material. To this end, we have to define a rotation matrix $R(\Delta \mathbf{u}^n)$ that describes, in each point the rotation due to the displacement increments. It is not hard to see that the actual dependence of $R$ on $\Delta \mathbf{u}^n$ can only be through the curl of the displacement, rather than the displacement itself or its full gradient (as mentioned above, the constant components of the increment describe translations, its divergence the dilational modes, and the curl the rotational modes). Since the exact form of $R$ is cumbersome, we only state it in the program code, and note that the correct updating formula for the stress variable is then

      -\[
+<p> where <picture><source srcset=$w_q$ are the quadrature weights and $\mathbf{x}_q$ the quadrature points on cell $K$. This should make clear that what we really need is not the stress $\sigma^{n-1}$ in itself, but only the values of the stress in the quadrature points on all cells. This, however, is a simpler task: we only have to provide a data structure that is able to hold one symmetric tensor of rank 2 for each quadrature point on all cells (or, since we compute in parallel, all quadrature points of all cells that the present MPI process “owns”). At the end of each time step we then only have to evaluate $\varepsilon(\Delta \mathbf{u}^n(\mathbf{x}_q))$, multiply it by the stress-strain tensor $C$, and use the result to update the stress $\sigma^n(\mathbf{x}_q)$ at quadrature point $q$.

      /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_19.html differs (JavaScript source, Unicode text, UTF-8 text, with very long lines (1489)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_19.html 2023-11-25 15:26:01.399859374 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_19.html 2023-11-25 15:26:01.399859374 +0100 @@ -146,135 +146,135 @@

      The finite element method in general, and deal.II in particular, were invented to solve partial differential equations – in other words, to solve continuum mechanics problems. On the other hand, sometimes one wants to solve problems in which it is useful to track individual objects ("particles") and how their positions evolve. If this simply leads to a set of ordinary differential equations, for example if you want to track the positions of the planets in the solar system over time, then deal.II is clearly not the right tool. On the other hand, if this evolution is due to the interaction with the solution of partial differential equations, or if having a mesh to determine which particles interact with others (such as in the smoothed particle hydrodynamics (SPH) method), then deal.II has support for you.

      The case we will consider here is how electrically charged particles move through an electric field. As motivation, we will consider cathode rays: Electrons emitted by a heated piece of metal that is negatively charged (the "cathode"), and that are then accelerated by an electric field towards the positively charged electrode (the "anode"). The anode is typically ring-shaped so that the majority of electrons can fly through the hole in the form of an electron beam. In the olden times, they might then have illuminated the screen of a TV built from a cathode ray tube. Today, instead, electron beams are useful in X-ray machines, electron beam lithography, electron beam welding, and a number of other areas.

      The equations we will then consider are as follows: First, we need to describe the electric field. This is most easily accomplished by noting that the electric potential $V$ satisfied the equation

      -\[
+<picture><source srcset=\[
   -\epsilon_0 \Delta V = \rho
-\] +\]" src="form_3008.png"/>

      -

      where $\epsilon_0$ is the dielectric constant of vacuum, and $\rho$ is the charge density. This is augmented by boundary conditions that we will choose as follows:

      -\begin{align*}
+<p> where <picture><source srcset=$\epsilon_0$ is the dielectric constant of vacuum, and $\rho$ is the charge density. This is augmented by boundary conditions that we will choose as follows:

      +\begin{align*}
   V &= -V_0 && \text{on}\; \Gamma_\text{cathode}\subset\partial\Omega \\
   V &= +V_0 && \text{on}\; \Gamma_\text{anode}\subset\partial\Omega \\
   \epsilon\frac{\partial V}{\partial n} &= 0
    && \text{on}\; \partial\Omega\setminus\Gamma_\text{cathode}\setminus\Gamma_\text{anode}.
-\end{align*} +\end{align*}" src="form_3010.png"/>

      -

      In other words, we prescribe voltages $+V_0$ and $-V_0$ at the two electrodes and insulating (Neumann) boundary conditions elsewhere. Since the dynamics of the particles are purely due to the electric field $\mathbf E=\nabla V$, we could as well have prescribed $2V_0$ and $0$ at the two electrodes – all that matters is the voltage difference at the two electrodes.

      -

      Given this electric potential $V$ and the electric field $\mathbf E=\nabla V$, we can describe the trajectory of the $i$th particle using the differential equation

      -\[
+<p> In other words, we prescribe voltages <picture><source srcset=$+V_0$ and $-V_0$ at the two electrodes and insulating (Neumann) boundary conditions elsewhere. Since the dynamics of the particles are purely due to the electric field $\mathbf E=\nabla V$, we could as well have prescribed $2V_0$ and $0$ at the two electrodes – all that matters is the voltage difference at the two electrodes.

      +

      Given this electric potential $V$ and the electric field $\mathbf E=\nabla V$, we can describe the trajectory of the $i$th particle using the differential equation

      +\[
   m {\ddot {\mathbf x}}_i = e\mathbf E,
-\] +\]" src="form_3015.png"/>

      -

      where $m,e$ are the mass and electric charge of each particle. In practice, it is convenient to write this as a system of first-order differential equations in the position $\mathbf x$ and velocity $\mathbf v$:

      -\begin{align*}
+<p> where <picture><source srcset=$m,e$ are the mass and electric charge of each particle. In practice, it is convenient to write this as a system of first-order differential equations in the position $\mathbf x$ and velocity $\mathbf v$:

      +\begin{align*}
   {\dot {\mathbf v}}_i &= \frac{e\mathbf E}{m}, \\
   {\dot {\mathbf x}}_i &= {\mathbf v}_i.
-\end{align*} +\end{align*}" src="form_3017.png"/>

      -

      The deal.II class we will use to deal with particles, Particles::ParticleHandler, stores particles in a way so that the position $\mathbf x_i$ is part of the Particles::ParticleHandler data structures. (It stores particles sorted by cell they are in, and consequently needs to know where each particle is.) The velocity $\mathbf v_i$, on the other hand, is of no concern to Particles::ParticleHandler and consequently we will store it as a "property" of each particle that we will update in each time step. Properties can also be used to store any other quantity we might care about each particle: its charge, or if they were larger than just an electron, its color, mass, attitude in space, chemical composition, etc.

      +

      The deal.II class we will use to deal with particles, Particles::ParticleHandler, stores particles in a way so that the position $\mathbf x_i$ is part of the Particles::ParticleHandler data structures. (It stores particles sorted by cell they are in, and consequently needs to know where each particle is.) The velocity $\mathbf v_i$, on the other hand, is of no concern to Particles::ParticleHandler and consequently we will store it as a "property" of each particle that we will update in each time step. Properties can also be used to store any other quantity we might care about each particle: its charge, or if they were larger than just an electron, its color, mass, attitude in space, chemical composition, etc.

      There remain two things to discuss to complete the model: Where particles start and what the charge density $\rho$ is.

      -

      First, historically, cathode rays used very large electric fields to pull electrons out of the metal. This produces only a relatively small current. One can do better by heating the cathode: a statistical fraction of electrons in that case have enough thermal energy to leave the metal; the electric field then just has to be strong enough to pull them away from the attraction of their host body. We will model this in the following way: We will create a new particle if (i) the electric field points away from the electrode, i.e., if $\mathbf E \cdot \mathbf n < 0$ where $\mathbf n$ is the normal vector at a face pointing out of the domain (into the electrode), and (ii) the electric field exceeds a threshold value $|\mathbf E|\ge E_\text{threshold}$. This is surely not a sufficiently accurate model for what really happens, but is good enough for our current tutorial program.

      +

      First, historically, cathode rays used very large electric fields to pull electrons out of the metal. This produces only a relatively small current. One can do better by heating the cathode: a statistical fraction of electrons in that case have enough thermal energy to leave the metal; the electric field then just has to be strong enough to pull them away from the attraction of their host body. We will model this in the following way: We will create a new particle if (i) the electric field points away from the electrode, i.e., if $\mathbf E \cdot \mathbf n < 0$ where $\mathbf n$ is the normal vector at a face pointing out of the domain (into the electrode), and (ii) the electric field exceeds a threshold value $|\mathbf E|\ge E_\text{threshold}$. This is surely not a sufficiently accurate model for what really happens, but is good enough for our current tutorial program.

      Second, in principle we would have to model the charge density via

      -\[
+<picture><source srcset=\[
   \rho(\mathbf x) = \sum_i e\delta(\mathbf x-\mathbf x_i).
-\] +\]" src="form_3022.png"/>

      -

      The issue now is that in reality, a cathode ray tube in an old television yields a current of somewhere around a few milli-Amperes. In the much higher energy beams of particle accelerators, the current may only be a few nano-Ampere. But an Ampere is $6\times 10^{18}$ electrons flowing per second. Now, as you will see in the results section, we really only simulate a few microseconds ( $10^{-6}$ seconds), but that still results in very very large numbers of electrons – far more than we can hope to simulate with a program as small as the current one. As a consequence, let us presume that each particle represents $N$ electrons. Then the particle mass and charge are also $Nm$ and $Ne$ and the equations we have to solve are

      -\[
+<p> The issue now is that in reality, a cathode ray tube in an old television yields a current of somewhere around a few milli-Amperes. In the much higher energy beams of particle accelerators, the current may only be a few nano-Ampere. But an Ampere is <picture><source srcset=$6\times 10^{18}$ electrons flowing per second. Now, as you will see in the results section, we really only simulate a few microseconds ( $10^{-6}$ seconds), but that still results in very very large numbers of electrons – far more than we can hope to simulate with a program as small as the current one. As a consequence, let us presume that each particle represents $N$ electrons. Then the particle mass and charge are also $Nm$ and $Ne$ and the equations we have to solve are

      +\[
   (Nm) {\ddot {\mathbf x}}_i = (Ne)\mathbf E,
-\] +\]" src="form_3026.png"/>

      -

      which is of course exactly the same as above after dividing both sides by $N$. On the other hand, the charge density for these "clumps" of electrons is given by

      -\[
+<p> which is of course exactly the same as above after dividing both sides by <picture><source srcset=$N$. On the other hand, the charge density for these "clumps" of electrons is given by

      +\[
   \rho(\mathbf x) = \sum_i (Ne)\delta(\mathbf x-\mathbf x_i).
-\] +\]" src="form_3027.png"/>

      -

      It is this form that we will implement in the program, where $N$ is chosen rather large in the program to ensure that the particles actually affect the electric field. (This may not be realistic in practice: In most cases, there are just not enough electrons to actually affect the overall electric field. But realism is not our goal here.)

      -

      As a final thought about the model, one may wonder why the equation for the electric field (or, rather, the electric potential) has no time derivative whereas the equations for the electron positions do. In essence, this is a modeling assumption: We assume that the particles move so slowly that at any given time the electric field is in equilibrium. This is saying, in other words, that the velocity of the electrons is much less than the speed of light. In yet other words, we can rephrase this in terms of the electrode voltage $V_0$: Since every volt of electric potential accelerates electrons by approximately 600 km/s (neglecting relativistic effects), requiring $|\mathbf v_i\|\ll c$ is equivalent to saying that $2V_0 \ll 500 \text{V}$. Under this assumption (and the assumption that the total number of electrons is small), one can also neglect the creation of magnetic fields by the moving charges, which would otherwise also affect the movement of the electrons.

      +

      It is this form that we will implement in the program, where $N$ is chosen rather large in the program to ensure that the particles actually affect the electric field. (This may not be realistic in practice: In most cases, there are just not enough electrons to actually affect the overall electric field. But realism is not our goal here.)

      +

      As a final thought about the model, one may wonder why the equation for the electric field (or, rather, the electric potential) has no time derivative whereas the equations for the electron positions do. In essence, this is a modeling assumption: We assume that the particles move so slowly that at any given time the electric field is in equilibrium. This is saying, in other words, that the velocity of the electrons is much less than the speed of light. In yet other words, we can rephrase this in terms of the electrode voltage $V_0$: Since every volt of electric potential accelerates electrons by approximately 600 km/s (neglecting relativistic effects), requiring $|\mathbf v_i\|\ll c$ is equivalent to saying that $2V_0 \ll 500 \text{V}$. Under this assumption (and the assumption that the total number of electrons is small), one can also neglect the creation of magnetic fields by the moving charges, which would otherwise also affect the movement of the electrons.

      Time discretization

      The equations outlined above then form a set of coupled differential equations. Let us bring them all together in one place again to make that clear:

      -\begin{align*}
+<picture><source srcset=\begin{align*}
   -\epsilon_0 \Delta V &= \sum_i e\delta(\mathbf x-\mathbf x_i)
   \\
   {\dot {\mathbf x}}_i &= {\mathbf v}_i,
   \\
   {\dot {\mathbf v}}_i &= \frac{e\mathbf E}{m} = \frac{e\mathbf \nabla V}{m}.
-\end{align*} +\end{align*}" src="form_3031.png"/>

      Because of the awkward dependence of the electric potential on the particle locations, we don't want to solve this as a coupled system but instead use a decoupled approach where we first solve for the potential in each time step and then the particle locations. (One could also do it the other way around, of course.) This is very much in the same spirit as we do in step-21, step-31, and step-32, to name just a few, and can all be understood in the context of the operator splitting methods discussed in step-58.

      -

      So, if we denote by an upper index $n$ the time step, and if we use a simple time discretization for the ODE, then this means that we have to solve the following set of equations in each time step:

      -\begin{align*}
+<p>So, if we denote by an upper index <picture><source srcset=$n$ the time step, and if we use a simple time discretization for the ODE, then this means that we have to solve the following set of equations in each time step:

      +\begin{align*}
   -\epsilon_0 \Delta V^{(n)} &= \sum_i e\delta(\mathbf x-\mathbf x_i^{(n-1)})
   \\
   \frac{{\mathbf v}_i^{(n)}-{\mathbf v}_i^{(n-1)}}{\Delta t} &= \frac{e\nabla V^{(n)}}{m}
   \\
   \frac{{\mathbf x}_i^{(n)}-{\mathbf x}_i^{(n-1)}}{\Delta t} &= {\mathbf v}_i^{(n)}.
-\end{align*} +\end{align*}" src="form_3032.png"/>

      -

      This scheme can be understood in the framework of operator splitting methods (specifically, the "Lie splitting" method) wherein a coupled system is solved by updating one variable at a time, using either the old values of other variables (e.g., using $\mathbf x_i^{(n-1)}$ in the first equation) or the values of variables that have already been updated in a previous sub-step (e.g., using $V^{(n)}$ in the second equation). There are of course many better ways to do a time discretization (for example the simple leapfrog scheme when updating the velocity, or more general Strang splitting methods for the coupled system) but this isn't the point of the tutorial program, and so we will be content with what we have here. (We will comment on a piece of this puzzle in the possibilities for extensions section of this program, however.)

      -

      There remains the question of how we should choose the time step size $\Delta t$. The limitation here is that the Particles::ParticleHandler class needs to keep track of which cell each particle is in. This is particularly an issue if we are running computations in parallel (say, in step-70) because in that case every process only stores those cells it owns plus one layer of "ghost cells". That's not relevant here, but in general we should make sure that over the course of each time step, a particle moves only from one cell to any of its immediate neighbors (face, edge, or vertex neighbors). If we can ensure that, then Particles::ParticleHandler is guaranteed to be able to figure out which cell a particle ends up in. To do this, a useful rule of thumb is that we should choose the time step so that for all particles the expected distance the particle moves by is less than one cell diameter:

      -\[
+<p> This scheme can be understood in the framework of operator splitting methods (specifically, the $\mathbf x_i^{(n-1)}$ in the first equation) or the values of variables that have already been updated in a previous sub-step (e.g., using $V^{(n)}$ in the second equation). There are of course many better ways to do a time discretization (for example the simple leapfrog scheme when updating the velocity, or more general Strang splitting methods for the coupled system) but this isn't the point of the tutorial program, and so we will be content with what we have here. (We will comment on a piece of this puzzle in the possibilities for extensions section of this program, however.)

      +

      There remains the question of how we should choose the time step size $\Delta t$. The limitation here is that the Particles::ParticleHandler class needs to keep track of which cell each particle is in. This is particularly an issue if we are running computations in parallel (say, in step-70) because in that case every process only stores those cells it owns plus one layer of "ghost cells". That's not relevant here, but in general we should make sure that over the course of each time step, a particle moves only from one cell to any of its immediate neighbors (face, edge, or vertex neighbors). If we can ensure that, then Particles::ParticleHandler is guaranteed to be able to figure out which cell a particle ends up in. To do this, a useful rule of thumb is that we should choose the time step so that for all particles the expected distance the particle moves by is less than one cell diameter:

      +\[
   \Delta t \le \frac{h_i}{\|\mathbf v_i\|} \qquad\qquad \forall i,
-\] +\]" src="form_3035.png"/>

      or equivalently

      -\[
+<picture><source srcset=\[
   \Delta t \le \min_i \frac{h_i}{\|\mathbf v_i\|}.
-\] +\]" src="form_3036.png"/>

      -

      Here, $h_i$ is the length of the shortest edge of the cell on which particle $i$ is located – in essence, a measure of the size of a cell.

      +

      Here, $h_i$ is the length of the shortest edge of the cell on which particle $i$ is located – in essence, a measure of the size of a cell.

      On the other hand, a particle might already be at the boundary of one cell and the neighboring cell might be once further refined. So then the time to cross that neighboring cell would actually be half the amount above, suggesting

      -\[
+<picture><source srcset=\[
   \Delta t \le \min_i \frac{\tfrac 12 h_i}{\|\mathbf v_i\|}.
-\] +\]" src="form_3037.png"/>

      But even that is not good enough: The formula above updates the particle positions in each time using the formula

      -\[
+<picture><source srcset=\[
 \frac{{\mathbf x}_i^{(n)}-{\mathbf x}_i^{(n-1)}}{\Delta t} = {\mathbf v}_i^{(n)},
-\] +\]" src="form_3038.png"/>

      -

      that is, using the current velocity ${\mathbf v}_i^{n}$. But we don't have the current velocity yet at the time when we need to choose $\Delta t$ – which is after we have updated the potential $V^{(n)}$ but before we update the velocity from ${\mathbf v}_i^{(n-1)}$ to ${\mathbf v}_i^{(n)}$. All we have is ${\mathbf v}_i^{(n-1)}$. So we need an additional safety factor for our final choice:

      -\[
+<p> that is, using the <em>current</em> velocity <picture><source srcset=${\mathbf v}_i^{n}$. But we don't have the current velocity yet at the time when we need to choose $\Delta t$ – which is after we have updated the potential $V^{(n)}$ but before we update the velocity from ${\mathbf v}_i^{(n-1)}$ to ${\mathbf v}_i^{(n)}$. All we have is ${\mathbf v}_i^{(n-1)}$. So we need an additional safety factor for our final choice:

      +\[
   \Delta t^{(n)} =
   c_\text{safety} \min_i \frac{\tfrac 12 h_i}{\|\mathbf v_i^{(n-1)}\|}.
-\] +\]" src="form_3042.png"/>

      -

      How large should $c_\text{safety}$ be? That depends on how much of underestimate $\|\mathbf v_i^{(n-1)}\|$ might be compared to $\|\mathbf v_i^{(n)}\|$, and that is actually quite easy to assess: A particle created in one time step with zero velocity will roughly pick up equal velocity increments in each successive time step if the electric field it encounters along the way were roughly constant. So the maximal difference between $\|\mathbf v_i^{(n-1)}\|$ and $\|\mathbf v_i^{(n)}\|$ would be a factor of two. As a consequence, we will choose $c_\text{safety}=0.5$.

      +

      How large should $c_\text{safety}$ be? That depends on how much of underestimate $\|\mathbf v_i^{(n-1)}\|$ might be compared to $\|\mathbf v_i^{(n)}\|$, and that is actually quite easy to assess: A particle created in one time step with zero velocity will roughly pick up equal velocity increments in each successive time step if the electric field it encounters along the way were roughly constant. So the maximal difference between $\|\mathbf v_i^{(n-1)}\|$ and $\|\mathbf v_i^{(n)}\|$ would be a factor of two. As a consequence, we will choose $c_\text{safety}=0.5$.

      There is only one other case we ought to consider: What happens in the very first time step? There, any particles to be moved along have just been created, but they have a zero velocity. So we don't know what velocity we should choose for them. Of course, in all other time steps there are also particles that have just been created, but in general, the particles with the highest velocity limit the time step size and so the newly created particles with their zero velocity don't matter. But if we only have such particles?

      -

      In that case, we can use the following approximation: If a particle starts at $\mathbf v^{(0)}=0$, then the update formula tells us that

      -\[
+<p>In that case, we can use the following approximation: If a particle starts at <picture><source srcset=$\mathbf v^{(0)}=0$, then the update formula tells us that

      +\[
   {\mathbf v}_i^{(1)} = \frac{e\nabla V^{(1)}}{m} \Delta t,
-\] +\]" src="form_3048.png"/>

      and consequently

      -\[
+<picture><source srcset=\[
     \frac{{\mathbf x}_i^{(1)}-{\mathbf x}_i^{(0)}}{\Delta t} = {\mathbf v}_i^{(1)},
-\] +\]" src="form_3049.png"/>

      which we can write as

      -\[
+<picture><source srcset=\[
     {\mathbf x}_i^{(1)} - {\mathbf x}_i^{(0)} = \frac{e\nabla V^{(1)}}{m} \Delta t^2.
-\] +\]" src="form_3050.png"/>

      -

      Not wanting to move a particle by more than $\frac 12 h_i$ then implies that we should choose the time step as

      -\[
+<p> Not wanting to move a particle by more than <picture><source srcset=$\frac 12 h_i$ then implies that we should choose the time step as

      +\[
   \Delta t
   \le
   \min_i
   \sqrt{ \frac{h_i m}{e \|\nabla V^{(1)}\| }}.
-\] +\]" src="form_3052.png"/>

      Using the same argument about neighboring cells possibly being smaller by a factor of two then leads to the final formula for time step zero:

      -\[
+<picture><source srcset=\[
   \Delta t
   =
   \min_i
   \sqrt{ \frac{\frac 12 h_i m}{e \|\nabla V^{(1)}\| } }.
-\] +\]" src="form_3053.png"/>

      -

      Strictly speaking, we would have to evaluate the electric potential $V^{(1)}$ at the location of each particle, but a good enough approximation is to use the maximum of the values at the vertices of the respective cell. (Why the vertices and not the midpoint? Because the gradient of the solution of the Laplace equation, i.e., the electric field, is largest in corner singularities which are located at the vertices of cells.) This has the advantage that we can make good use of the FEValues functionality which can recycle pre-computed material as long as the quadrature points are the same from one cell to the next.

      -

      We could always run this kind of scheme to estimate the difference between $\mathbf v_i^{(n-1)}$ and $\mathbf v_i^{(n)}$, but it relies on evaluating the electric field $\mathbf E$ on each cell, and that is expensive. As a consequence, we will limit this approach to the very first time step.

      +

      Strictly speaking, we would have to evaluate the electric potential $V^{(1)}$ at the location of each particle, but a good enough approximation is to use the maximum of the values at the vertices of the respective cell. (Why the vertices and not the midpoint? Because the gradient of the solution of the Laplace equation, i.e., the electric field, is largest in corner singularities which are located at the vertices of cells.) This has the advantage that we can make good use of the FEValues functionality which can recycle pre-computed material as long as the quadrature points are the same from one cell to the next.

      +

      We could always run this kind of scheme to estimate the difference between $\mathbf v_i^{(n-1)}$ and $\mathbf v_i^{(n)}$, but it relies on evaluating the electric field $\mathbf E$ on each cell, and that is expensive. As a consequence, we will limit this approach to the very first time step.

      Spatial discretization

      -

      Having discussed the time discretization, the discussion of the spatial discretization is going to be short: We use quadratic finite elements, i.e., the space $Q_2$, to approximate the electric potential $V$. The mesh is adapted a couple of times during the initial time step. All of this is entirely standard if you have read step-6, and the implementation does not provide for any kind of surprise.

      +

      Having discussed the time discretization, the discussion of the spatial discretization is going to be short: We use quadratic finite elements, i.e., the space $Q_2$, to approximate the electric potential $V$. The mesh is adapted a couple of times during the initial time step. All of this is entirely standard if you have read step-6, and the implementation does not provide for any kind of surprise.

      Dealing with particles programmatically

      Adding and moving particles is, in practice, not very difficult in deal.II. To add one, the create_particles() function of this program simply uses a code snippet of the following form:

      new_particle.set_location(location);
      @@ -287,7 +287,7 @@
      void set_reference_location(const Point< dim > &new_reference_location)
      Definition: particle.h:542
      void set_id(const types::particle_index &new_id)
      Definition: particle.h:569
      /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_2.html differs (JavaScript source, Unicode text, UTF-8 text, with very long lines (2404)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_2.html 2023-11-25 15:26:01.416525702 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_2.html 2023-11-25 15:26:01.416525702 +0100 @@ -117,14 +117,14 @@

    Introduction

    Note
    The material presented here is also discussed in video lecture 9. (All video lectures are also available here.)
    -

    The finite element method is based on approximating the solution $u$ of a differential equation such as $-\Delta u=f$ by a function $u_h$ that is "piecewise" polynomial; that is, we subdivide the domain $\Omega$ on which the equation is posed into small cells that in the documentation we will generally denote by the symbol $K$. On each cell $K$, the approximating function $u_h$ we seek is then a polynomial. (Or, strictly speaking, a function that is the image of a polynomial from a "reference cell", but let us not make things more complicated than necessary for now.)

    -

    In the previous tutorial program (in step-1), we showed how we should think of the subdivision of the domain into cells as a "mesh" represented by the Triangulation class, and how this looks like in code. In the current tutorial program, we now show how one represents piecewise polynomial functions through the concept of degrees of freedom defined on this mesh. For this example, we will use the lowest order ( $Q_1$) finite elements, that is the approximating function $u_h$ we are looking for will be "bi-linear" on each quadrilateral cell $K$ of the mesh. (They would be linear if we would work on triangles.)

    -

    In practice, we represent the function as a linear combination of shape functions $\varphi_j(\mathbf x)$ with multipliers $U_j$ that we call the "degrees of freedom". For the bi-linear functions we consider here, each of these shape functions and degrees of freedom is associated with a vertex of the mesh. Later examples will demonstrate higher order elements where degrees of freedom are not necessarily associated with vertices any more, but can be associated with edges, faces, or cells.

    -

    The term "degree of freedom" is commonly used in the finite element community to indicate two slightly different, but related things. The first is that we'd like to represent the finite element solution as a linear combination of shape functions, in the form $u_h(\mathbf x) = \sum_{j=0}^{N-1} U_j \varphi_j(\mathbf
-x)$. Here, $U_j$ is a vector of expansion coefficients. Because we don't know their values yet (we will compute them as the solution of a linear or nonlinear system), they are called "unknowns" or "degrees of freedom". The second meaning of the term can be explained as follows: A mathematical description of finite element problems is often to say that we are looking for a finite dimensional function $u_h \in V_h$ that satisfies some set of equations (e.g. $a(u_h,\varphi_h)=(f,\varphi_h)$ for all test functions $\varphi_h\in
-V_h$). In other words, all we say here is that the solution needs to lie in some space $V_h$. However, to actually solve this problem on a computer we need to choose a basis of this space; this is the set of shape functions $\varphi_j(\mathbf x)$ we have used above in the expansion of $u_h(\mathbf x)$ with coefficients $U_j$. There are of course many bases of the space $V_h$, but we will specifically choose the one that is described by the finite element functions that are traditionally defined locally on the cells of the mesh.

    +

    The finite element method is based on approximating the solution $u$ of a differential equation such as $-\Delta u=f$ by a function $u_h$ that is "piecewise" polynomial; that is, we subdivide the domain $\Omega$ on which the equation is posed into small cells that in the documentation we will generally denote by the symbol $K$. On each cell $K$, the approximating function $u_h$ we seek is then a polynomial. (Or, strictly speaking, a function that is the image of a polynomial from a "reference cell", but let us not make things more complicated than necessary for now.)

    +

    In the previous tutorial program (in step-1), we showed how we should think of the subdivision of the domain into cells as a "mesh" represented by the Triangulation class, and how this looks like in code. In the current tutorial program, we now show how one represents piecewise polynomial functions through the concept of degrees of freedom defined on this mesh. For this example, we will use the lowest order ( $Q_1$) finite elements, that is the approximating function $u_h$ we are looking for will be "bi-linear" on each quadrilateral cell $K$ of the mesh. (They would be linear if we would work on triangles.)

    +

    In practice, we represent the function as a linear combination of shape functions $\varphi_j(\mathbf x)$ with multipliers $U_j$ that we call the "degrees of freedom". For the bi-linear functions we consider here, each of these shape functions and degrees of freedom is associated with a vertex of the mesh. Later examples will demonstrate higher order elements where degrees of freedom are not necessarily associated with vertices any more, but can be associated with edges, faces, or cells.

    +

    The term "degree of freedom" is commonly used in the finite element community to indicate two slightly different, but related things. The first is that we'd like to represent the finite element solution as a linear combination of shape functions, in the form $u_h(\mathbf x) = \sum_{j=0}^{N-1} U_j \varphi_j(\mathbf
+x)$. Here, $U_j$ is a vector of expansion coefficients. Because we don't know their values yet (we will compute them as the solution of a linear or nonlinear system), they are called "unknowns" or "degrees of freedom". The second meaning of the term can be explained as follows: A mathematical description of finite element problems is often to say that we are looking for a finite dimensional function $u_h \in V_h$ that satisfies some set of equations (e.g. $a(u_h,\varphi_h)=(f,\varphi_h)$ for all test functions $\varphi_h\in
+V_h$). In other words, all we say here is that the solution needs to lie in some space $V_h$. However, to actually solve this problem on a computer we need to choose a basis of this space; this is the set of shape functions $\varphi_j(\mathbf x)$ we have used above in the expansion of $u_h(\mathbf x)$ with coefficients $U_j$. There are of course many bases of the space $V_h$, but we will specifically choose the one that is described by the finite element functions that are traditionally defined locally on the cells of the mesh.

    Enumerating degrees of freedom

    -

    Describing "degrees of freedom" in this context requires us to simply enumerate the basis functions of the space $V_h$. For $Q_1$ elements this means simply enumerating the vertices of the mesh in some way, but for higher order elements, one also has to enumerate the shape functions that are associated with edges, faces, or cell interiors of the mesh. In other words, the enumeration of degrees of freedom is an entirely separate thing from the indices we use for vertices. The class that provides this enumeration of the basis functions of $V_h$ is called DoFHandler.

    +

    Describing "degrees of freedom" in this context requires us to simply enumerate the basis functions of the space $V_h$. For $Q_1$ elements this means simply enumerating the vertices of the mesh in some way, but for higher order elements, one also has to enumerate the shape functions that are associated with edges, faces, or cell interiors of the mesh. In other words, the enumeration of degrees of freedom is an entirely separate thing from the indices we use for vertices. The class that provides this enumeration of the basis functions of $V_h$ is called DoFHandler.

    Defining degrees of freedom ("DoF"s in short) on a mesh is, in practice, a rather simple task, since the library does all the work for you. Essentially, all you have to do is create a finite element object (from one of the many finite element classes deal.II already has, see for example the Finite element space descriptions documentation) and give it to a DoFHandler object through the DoFHandler::distribute_dofs() function ("distributing DoFs" is the term we use to describe the process of enumerating the basis functions as discussed above). The DoFHandler is a class that knows which degrees of freedom live where, i.e., it can answer questions like "how many degrees of freedom are there globally" and "on this cell, give me the global indices of the shape functions that live here". This is the sort of information you need when determining how big your system matrix should be, and when copying the contributions of a single cell into the global matrix.

    The first task of the current program is therefore to take a mesh and a finite element, and enumerate the degrees of freedom. In the current context, this means simply giving each vertex of the mesh a DoF index. Once that has happened, we will output in a picture which vertex ended up with which DoF index. You can find the corresponding pictures in the results section of this tutorial.

    @@ -133,11 +133,11 @@

    The next step would then be to compute a matrix and right hand side corresponding to a particular differential equation using this finite element and mesh. We will keep this step for the step-3 program and rather talk about one practical aspect of a finite element program, namely that finite element matrices are always very sparse: almost all entries in these matrices are zero.

    To be more precise, we say that a matrix is sparse if the number of nonzero entries per row in the matrix is bounded by a number that is independent of the overall number of degrees of freedom. For example, the simple 5-point stencil of a finite difference approximation of the Laplace equation leads to a sparse matrix since the number of nonzero entries per row is five, and therefore independent of the total size of the matrix. For more complicated problems – say, the Stokes problem of step-22 – and in particular in 3d, the number of entries per row may be several hundred. But the important point is that this number is independent of the overall size of the problem: If you refine the mesh, the maximal number of unknowns per row remains the same.

    Sparsity is one of the distinguishing feature of the finite element method compared to, say, approximating the solution of a partial differential equation using a Taylor expansion and matching coefficients, or using a Fourier basis.

    -

    In practical terms, it is the sparsity of matrices that enables us to solve problems with millions or billions of unknowns. To understand this, note that a matrix with $N$ rows, each with a fixed upper bound for the number of nonzero entries, requires ${\cal O}(N)$ memory locations for storage, and a matrix-vector multiplication also requires only ${\cal O}(N)$ operations. Consequently, if we had a linear solver that requires only a fixed number of matrix-vector multiplications to come up with the solution of a linear system with this matrix, then we would have a solver that can find the values of all $N$ unknowns with optimal complexity, i.e., with a total of ${\cal O}(N)$ operations. It is clear that this wouldn't be possible if the matrix were not sparse (because then the number of entries in the matrix would have to be ${\cal O}(N^s)$ with some $s>1$, and doing a fixed number of matrix-vector products would take ${\cal O}(N^s)$ operations), but it also requires very specialized solvers such as multigrid methods to satisfy the requirement that the solution requires only a fixed number of matrix-vector multiplications. We will frequently look at the question of what solver to use in the remaining programs of this tutorial.

    -

    The sparsity is generated by the fact that finite element shape functions are defined locally on individual cells, rather than globally, and that the local differential operators in the bilinear form only couple shape functions whose support overlaps. (The "support" of a function is the area where it is nonzero. For the finite element method, the support of a shape function is generally the cells adjacent to the vertex, edge, or face it is defined on.) In other words, degrees of freedom $i$ and $j$ that are not defined on the same cell do not overlap, and consequently the matrix entry $A_{ij}$ will be zero. (In some cases such as the Discontinuous Galerkin method, shape functions may also connect to neighboring cells through face integrals. But finite element methods do not generally couple shape functions beyond the immediate neighbors of a cell on which the function is defined.)

    +

    In practical terms, it is the sparsity of matrices that enables us to solve problems with millions or billions of unknowns. To understand this, note that a matrix with $N$ rows, each with a fixed upper bound for the number of nonzero entries, requires ${\cal O}(N)$ memory locations for storage, and a matrix-vector multiplication also requires only ${\cal O}(N)$ operations. Consequently, if we had a linear solver that requires only a fixed number of matrix-vector multiplications to come up with the solution of a linear system with this matrix, then we would have a solver that can find the values of all $N$ unknowns with optimal complexity, i.e., with a total of ${\cal O}(N)$ operations. It is clear that this wouldn't be possible if the matrix were not sparse (because then the number of entries in the matrix would have to be ${\cal O}(N^s)$ with some $s>1$, and doing a fixed number of matrix-vector products would take ${\cal O}(N^s)$ operations), but it also requires very specialized solvers such as multigrid methods to satisfy the requirement that the solution requires only a fixed number of matrix-vector multiplications. We will frequently look at the question of what solver to use in the remaining programs of this tutorial.

    +

    The sparsity is generated by the fact that finite element shape functions are defined locally on individual cells, rather than globally, and that the local differential operators in the bilinear form only couple shape functions whose support overlaps. (The "support" of a function is the area where it is nonzero. For the finite element method, the support of a shape function is generally the cells adjacent to the vertex, edge, or face it is defined on.) In other words, degrees of freedom $i$ and $j$ that are not defined on the same cell do not overlap, and consequently the matrix entry $A_{ij}$ will be zero. (In some cases such as the Discontinuous Galerkin method, shape functions may also connect to neighboring cells through face integrals. But finite element methods do not generally couple shape functions beyond the immediate neighbors of a cell on which the function is defined.)

    How degrees of freedom are enumerated

    By default, the DoFHandler class enumerates degrees of freedom on a mesh using an algorithm that is difficult to describe and leads to results that do look right if you know what it is doing but otherwise appears rather random; consequently, the sparsity pattern is also not optimized for any particular purpose. To show this, the code below will demonstrate a simple way to output the "sparsity pattern" that corresponds to a DoFHandler, i.e., an object that represents all of the potentially nonzero elements of a matrix one may build when discretizing a partial differential equation on a mesh and its DoFHandler. This lack of structure in the sparsity pattern will be apparent from the pictures we show below.

    -

    For most applications and algorithms, the exact way in which degrees of freedom are numbered does not matter. For example, the Conjugate Gradient method we use to solve linear systems does not care. On the other hand, some algorithms do care: in particular, some preconditioners such as SSOR will work better if they can walk through degrees of freedom in a particular order, and it would be nice if we could just sort them in such a way that SSOR can iterate through them from zero to $N$ in this order. Other examples include computing incomplete LU or Cholesky factorizations, or if we care about the block structure of matrices (see step-20 for an example). deal.II therefore has algorithms that can re-enumerate degrees of freedom in particular ways in namespace DoFRenumbering. Renumbering can be thought of as choosing a different, permuted basis of the finite element space. The sparsity pattern and matrices that result from this renumbering are therefore also simply a permutation of rows and columns compared to the ones we would get without explicit renumbering.

    +

    For most applications and algorithms, the exact way in which degrees of freedom are numbered does not matter. For example, the Conjugate Gradient method we use to solve linear systems does not care. On the other hand, some algorithms do care: in particular, some preconditioners such as SSOR will work better if they can walk through degrees of freedom in a particular order, and it would be nice if we could just sort them in such a way that SSOR can iterate through them from zero to $N$ in this order. Other examples include computing incomplete LU or Cholesky factorizations, or if we care about the block structure of matrices (see step-20 for an example). deal.II therefore has algorithms that can re-enumerate degrees of freedom in particular ways in namespace DoFRenumbering. Renumbering can be thought of as choosing a different, permuted basis of the finite element space. The sparsity pattern and matrices that result from this renumbering are therefore also simply a permutation of rows and columns compared to the ones we would get without explicit renumbering.

    In the program below, we will use the algorithm of Cuthill and McKee to do so. We will show the sparsity pattern for both the original enumeration of degrees of freedom and of the renumbered version below, in the results section.

    The commented program

    The first few includes are just like in the previous program, so do not require additional comments:

    @@ -275,7 +275,7 @@
     

    Renumbering of DoFs

    In the sparsity pattern produced above, the nonzero entries extended quite far off from the diagonal. For some algorithms, for example for incomplete LU decompositions or Gauss-Seidel preconditioners, this is unfavorable, and we will show a simple way how to improve this situation.

    -

    Remember that for an entry $(i,j)$ in the matrix to be nonzero, the supports of the shape functions i and j needed to intersect (otherwise in the integral, the integrand would be zero everywhere since either the one or the other shape function is zero at some point). However, the supports of shape functions intersected only if they were adjacent to each other, so in order to have the nonzero entries clustered around the diagonal (where $i$ equals $j$), we would like to have adjacent shape functions to be numbered with indices (DoF numbers) that differ not too much.

    +

    Remember that for an entry $(i,j)$ in the matrix to be nonzero, the supports of the shape functions i and j needed to intersect (otherwise in the integral, the integrand would be zero everywhere since either the one or the other shape function is zero at some point). However, the supports of shape functions intersected only if they were adjacent to each other, so in order to have the nonzero entries clustered around the diagonal (where $i$ equals $j$), we would like to have adjacent shape functions to be numbered with indices (DoF numbers) that differ not too much.

    This can be accomplished by a simple front marching algorithm, where one starts at a given vertex and gives it the index zero. Then, its neighbors are numbered successively, making their indices close to the original one. Then, their neighbors, if not yet numbered, are numbered, and so on.

    One algorithm that adds a little bit of sophistication along these lines is the one by Cuthill and McKee. We will use it in the following function to renumber the degrees of freedom such that the resulting sparsity pattern is more localized around the diagonal. The only interesting part of the function is the first call to DoFRenumbering::Cuthill_McKee, the rest is essentially as before:

      void renumber_dofs(DoFHandler<2> &dof_handler)
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_20.html differs (JavaScript source, ASCII text, with very long lines (1868)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_20.html 2023-11-25 15:26:01.449858358 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_20.html 2023-11-25 15:26:01.449858358 +0100 @@ -152,13 +152,13 @@ p &=& g \qquad {\textrm{on}\ }\partial\Omega. \end{eqnarray*}" src="form_3080.png"/>

    -

    $K({\mathbf x})$ is assumed to be uniformly positive definite, i.e., there is $\alpha>0$ such that the eigenvalues $\lambda_i({\mathbf x})$ of $K(x)$ satisfy $\lambda_i({\mathbf x})\ge \alpha$. The use of the symbol $p$ instead of the usual $u$ for the solution variable will become clear in the next section.

    +

    $K({\mathbf x})$ is assumed to be uniformly positive definite, i.e., there is $\alpha>0$ such that the eigenvalues $\lambda_i({\mathbf x})$ of $K(x)$ satisfy $\lambda_i({\mathbf x})\ge \alpha$. The use of the symbol $p$ instead of the usual $u$ for the solution variable will become clear in the next section.

    After discussing the equation and the formulation we are going to use to solve it, this introduction will cover the use of block matrices and vectors, the definition of solvers and preconditioners, and finally the actual test case we are going to solve.

    We are going to extend this tutorial program in step-21 to solve not only the mixed Laplace equation, but add another equation that describes the transport of a mixture of two fluids.

    The equations covered here fall into the class of vector-valued problems. A toplevel overview of this topic can be found in the Handling vector valued problems module.

    The equations

    In the form above, the Poisson equation (i.e., the Laplace equation with a nonzero right hand side) is generally considered a good model equation for fluid flow in porous media. Of course, one typically models fluid flow through the Navier-Stokes equations or, if fluid velocities are slow or the viscosity is large, the Stokes equations (which we cover in step-22). In the first of these two models, the forces that act are inertia and viscous friction, whereas in the second it is only viscous friction – i.e., forces that one fluid particle exerts on a nearby one. This is appropriate if you have free flow in a large domain, say a pipe, a river, or in the air. On the other hand, if the fluid is confined in pores, then friction forces exerted by the pore walls on the fluid become more and more important and internal viscous friction becomes less and less important. Modeling this then first leads to the Brinkman model if both effects are important, and in the limit of very small pores to the Darcy equations. The latter is just a different name for the Poisson or Laplace equation, connotating it with the area to which one wants to apply it: slow flow in a porous medium. In essence it says that the velocity is proportional to the negative pressure gradient that drives the fluid through the porous medium.

    -

    The Darcy equation models this pressure that drives the flow. (Because the solution variable is a pressure, we here use the name $p$ instead of the name $u$ more commonly used for the solution of partial differential equations.) Typical applications of this view of the Laplace equation are then modeling groundwater flow, or the flow of hydrocarbons in oil reservoirs. In these applications, $K$ is the permeability tensor, i.e., a measure for how much resistance the soil or rock matrix asserts on the fluid flow.

    +

    The Darcy equation models this pressure that drives the flow. (Because the solution variable is a pressure, we here use the name $p$ instead of the name $u$ more commonly used for the solution of partial differential equations.) Typical applications of this view of the Laplace equation are then modeling groundwater flow, or the flow of hydrocarbons in oil reservoirs. In these applications, $K$ is the permeability tensor, i.e., a measure for how much resistance the soil or rock matrix asserts on the fluid flow.

    In the applications named above, a desirable feature for a numerical scheme is that it should be locally conservative, i.e., that whatever flows into a cell also flows out of it (or the difference is equal to the integral over the source terms over each cell, if the sources are nonzero). However, as it turns out, the usual discretizations of the Laplace equation (such as those used in step-3, step-4, or step-6) do not satisfy this property. But, one can achieve this by choosing a different formulation of the problem and a particular combination of finite element spaces.

    Formulation, weak form, and discrete problem

    To this end, one first introduces a second variable, called the velocity, ${\mathbf u}=-K\nabla p$. By its definition, the velocity is a vector in the negative direction of the pressure gradient, multiplied by the permeability tensor. If the permeability tensor is proportional to the unit matrix, this equation is easy to understand and intuitive: the higher the permeability, the higher the velocity; and the velocity is proportional to the gradient of the pressure, going from areas of high pressure to areas of low pressure (thus the negative sign).

    @@ -187,15 +187,15 @@ \end{eqnarray*}" src="form_3091.png"/>

    Here, ${\mathbf n}$ is the outward normal vector at the boundary. Note how in this formulation, Dirichlet boundary values of the original problem are incorporated in the weak form.

    -

    To be well-posed, we have to look for solutions and test functions in the space $H({\textrm{div}})=\{{\mathbf w}\in L^2(\Omega)^d:\ {\textrm{div}}\ {\mathbf w}\in L^2\}$ for $\mathbf u$, $\mathbf v$, and $L^2$ for $p,q$. It is a well-known fact stated in almost every book on finite element theory that if one chooses discrete finite element spaces for the approximation of ${\mathbf u},p$ inappropriately, then the resulting discrete problem is instable and the discrete solution will not converge to the exact solution. (Some details on the problem considered here – which falls in the class of "saddle-point problems" – can be found on the Wikipedia page on the Ladyzhenskaya-Babuska-Brezzi (LBB) condition.)

    -

    To overcome this, a number of different finite element pairs for ${\mathbf u},p$ have been developed that lead to a stable discrete problem. One such pair is to use the Raviart-Thomas spaces $RT(k)$ for the velocity ${\mathbf u}$ and discontinuous elements of class $DQ(k)$ for the pressure $p$. For details about these spaces, we refer in particular to the book on mixed finite element methods by Brezzi and Fortin, but many other books on the theory of finite elements, for example the classic book by Brenner and Scott, also state the relevant results. In any case, with appropriate choices of function spaces, the discrete formulation reads as follows: Find ${\mathbf
+<p>To be well-posed, we have to look for solutions and test functions in the space <picture><source srcset=$H({\textrm{div}})=\{{\mathbf w}\in L^2(\Omega)^d:\ {\textrm{div}}\ {\mathbf w}\in L^2\}$ for $\mathbf u$, $\mathbf v$, and $L^2$ for $p,q$. It is a well-known fact stated in almost every book on finite element theory that if one chooses discrete finite element spaces for the approximation of ${\mathbf u},p$ inappropriately, then the resulting discrete problem is instable and the discrete solution will not converge to the exact solution. (Some details on the problem considered here – which falls in the class of "saddle-point problems" – can be found on the Wikipedia page on the Ladyzhenskaya-Babuska-Brezzi (LBB) condition.)

    +

    To overcome this, a number of different finite element pairs for ${\mathbf u},p$ have been developed that lead to a stable discrete problem. One such pair is to use the Raviart-Thomas spaces $RT(k)$ for the velocity ${\mathbf u}$ and discontinuous elements of class $DQ(k)$ for the pressure $p$. For details about these spaces, we refer in particular to the book on mixed finite element methods by Brezzi and Fortin, but many other books on the theory of finite elements, for example the classic book by Brenner and Scott, also state the relevant results. In any case, with appropriate choices of function spaces, the discrete formulation reads as follows: Find ${\mathbf
 u}_h,p_h$ so that

    \begin{eqnarray*}
   A(\{{\mathbf u}_h,p_h\},\{{\mathbf v}_h,q_h\}) = F(\{{\mathbf v}_h,q_h\})
   \qquad\qquad \forall {\mathbf v}_h,q_h.
 \end{eqnarray*}

    -

    Before continuing, let us briefly pause and show that the choice of function spaces above provides us with the desired local conservation property. In particular, because the pressure space consists of discontinuous piecewise polynomials, we can choose the test function $q$ as the function that is equal to one on any given cell $K$ and zero everywhere else. If we also choose ${\mathbf v}=0$ everywhere (remember that the weak form above has to hold for all discrete test functions $q,v$), then putting these choices of test functions into the weak formulation above implies in particular that

    +

    Before continuing, let us briefly pause and show that the choice of function spaces above provides us with the desired local conservation property. In particular, because the pressure space consists of discontinuous piecewise polynomials, we can choose the test function $q$ as the function that is equal to one on any given cell $K$ and zero everywhere else. If we also choose ${\mathbf v}=0$ everywhere (remember that the weak form above has to hold for all discrete test functions $q,v$), then putting these choices of test functions into the weak formulation above implies in particular that

    \begin{eqnarray*}
   - (1,{\textrm{div}}\ {\mathbf u}_h)_K
   =
@@ -218,16 +218,16 @@
 \end{eqnarray*}

    If you now recall that ${\mathbf u}$ was the velocity, then the integral on the left is exactly the (discrete) flux across the boundary of the cell $K$. The statement is then that the flux must be equal to the integral over the sources within $K$. In particular, if there are no sources (i.e., $f=0$ in $K$), then the statement is that total flux is zero, i.e., whatever flows into a cell must flow out of it through some other part of the cell boundary. This is what we call local conservation because it holds for every cell.

    -

    On the other hand, the usual continuous $Q_k$ elements would not result in this kind of property when used for the pressure (as, for example, we do in step-43) because one can not choose a discrete test function $q_h$ that is one on a cell $K$ and zero everywhere else: It would be discontinuous and consequently not in the finite element space. (Strictly speaking, all we can say is that the proof above would not work for continuous elements. Whether these elements might still result in local conservation is a different question as one could think that a different kind of proof might still work; in reality, however, the property really does not hold.)

    +

    On the other hand, the usual continuous $Q_k$ elements would not result in this kind of property when used for the pressure (as, for example, we do in step-43) because one can not choose a discrete test function $q_h$ that is one on a cell $K$ and zero everywhere else: It would be discontinuous and consequently not in the finite element space. (Strictly speaking, all we can say is that the proof above would not work for continuous elements. Whether these elements might still result in local conservation is a different question as one could think that a different kind of proof might still work; in reality, however, the property really does not hold.)

    Assembling the linear system

    -

    The deal.II library (of course) implements Raviart-Thomas elements $RT(k)$ of arbitrary order $k$, as well as discontinuous elements $DG(k)$. If we forget about their particular properties for a second, we then have to solve a discrete problem

    +

    The deal.II library (of course) implements Raviart-Thomas elements $RT(k)$ of arbitrary order $k$, as well as discontinuous elements $DG(k)$. If we forget about their particular properties for a second, we then have to solve a discrete problem

    \begin{eqnarray*}
   A(x_h,w_h) = F(w_h),
 \end{eqnarray*}

    -

    with the bilinear form and right hand side as stated above, and $x_h=\{{\mathbf u}_h,p_h\}$, $w_h=\{{\mathbf v}_h,q_h\}$. Both $x_h$ and $w_h$ are from the space $X_h=RT(k)\times DQ(k)$, where $RT(k)$ is itself a space of $dim$-dimensional functions to accommodate for the fact that the flow velocity is vector-valued. The necessary question then is: how do we do this in a program?

    -

    Vector-valued elements have already been discussed in previous tutorial programs, the first time and in detail in step-8. The main difference there was that the vector-valued space $V_h$ is uniform in all its components: the $dim$ components of the displacement vector are all equal and from the same function space. What we could therefore do was to build $V_h$ as the outer product of the $dim$ times the usual $Q(1)$ finite element space, and by this make sure that all our shape functions have only a single non-zero vector component. Instead of dealing with vector-valued shape functions, all we did in step-8 was therefore to look at the (scalar) only non-zero component and use the fe.system_to_component_index(i).first call to figure out which component this actually is.

    -

    This doesn't work with Raviart-Thomas elements: following from their construction to satisfy certain regularity properties of the space $H({\textrm{div}})$, the shape functions of $RT(k)$ are usually nonzero in all their vector components at once. For this reason, were fe.system_to_component_index(i).first applied to determine the only nonzero component of shape function $i$, an exception would be generated. What we really need to do is to get at all vector components of a shape function. In deal.II diction, we call such finite elements non-primitive, whereas finite elements that are either scalar or for which every vector-valued shape function is nonzero only in a single vector component are called primitive.

    +

    with the bilinear form and right hand side as stated above, and $x_h=\{{\mathbf u}_h,p_h\}$, $w_h=\{{\mathbf v}_h,q_h\}$. Both $x_h$ and $w_h$ are from the space $X_h=RT(k)\times DQ(k)$, where $RT(k)$ is itself a space of $dim$-dimensional functions to accommodate for the fact that the flow velocity is vector-valued. The necessary question then is: how do we do this in a program?

    +

    Vector-valued elements have already been discussed in previous tutorial programs, the first time and in detail in step-8. The main difference there was that the vector-valued space $V_h$ is uniform in all its components: the $dim$ components of the displacement vector are all equal and from the same function space. What we could therefore do was to build $V_h$ as the outer product of the $dim$ times the usual $Q(1)$ finite element space, and by this make sure that all our shape functions have only a single non-zero vector component. Instead of dealing with vector-valued shape functions, all we did in step-8 was therefore to look at the (scalar) only non-zero component and use the fe.system_to_component_index(i).first call to figure out which component this actually is.

    +

    This doesn't work with Raviart-Thomas elements: following from their construction to satisfy certain regularity properties of the space $H({\textrm{div}})$, the shape functions of $RT(k)$ are usually nonzero in all their vector components at once. For this reason, were fe.system_to_component_index(i).first applied to determine the only nonzero component of shape function $i$, an exception would be generated. What we really need to do is to get at all vector components of a shape function. In deal.II diction, we call such finite elements non-primitive, whereas finite elements that are either scalar or for which every vector-valued shape function is nonzero only in a single vector component are called primitive.

    So what do we have to do for non-primitive elements? To figure this out, let us go back in the tutorial programs, almost to the very beginnings. There, we learned that we use the FEValues class to determine the values and gradients of shape functions at quadrature points. For example, we would call fe_values.shape_value(i,q_point) to obtain the value of the ith shape function on the quadrature point with number q_point. Later, in step-8 and other tutorial programs, we learned that this function call also works for vector-valued shape functions (of primitive finite elements), and that it returned the value of the only non-zero component of shape function i at quadrature point q_point.

    For non-primitive shape functions, this is clearly not going to work: there is no single non-zero vector component of shape function i, and the call to fe_values.shape_value(i,q_point) would consequently not make much sense. However, deal.II offers a second function call, fe_values.shape_value_component(i,q_point,comp) that returns the value of the compth vector component of shape function i at quadrature point q_point, where comp is an index between zero and the number of vector components of the present finite element; for example, the element we will use to describe velocities and pressures is going to have $dim+1$ components. It is worth noting that this function call can also be used for primitive shape functions: it will simply return zero for all components except one; for non-primitive shape functions, it will in general return a non-zero value for more than just one component.

    We could now attempt to rewrite the bilinear form above in terms of vector components. For example, in 2d, the first term could be rewritten like this (note that $u_0=x_0, u_1=x_1, p=x_2$):

    @@ -261,7 +261,7 @@

    fe_values.shape_value_component(j,q,1)
    ) *
    fe_values.JxW(q);
    -

    This is, at best, tedious, error prone, and not dimension independent. There are obvious ways to make things dimension independent, but in the end, the code is simply not pretty. What would be much nicer is if we could simply extract the ${\mathbf u}$ and $p$ components of a shape function $x_h^i$. In the program we do that in the following way:

    +

    This is, at best, tedious, error prone, and not dimension independent. There are obvious ways to make things dimension independent, but in the end, the code is simply not pretty. What would be much nicer is if we could simply extract the ${\mathbf u}$ and $p$ components of a shape function $x_h^i$. In the program we do that in the following way:

    const FEValuesExtractors::Vector velocities (0);
    const FEValuesExtractors::Scalar pressure (dim);
    @@ -283,7 +283,7 @@

    This is, in fact, not only the first term of the bilinear form, but the whole thing (sans boundary contributions).

    -

    What this piece of code does is, given an fe_values object, to extract the values of the first $dim$ components of shape function i at quadrature points q, that is the velocity components of that shape function. Put differently, if we write shape functions $x_h^i$ as the tuple $\{{\mathbf u}_h^i,p_h^i\}$, then the function returns the velocity part of this tuple. Note that the velocity is of course a dim-dimensional tensor, and that the function returns a corresponding object. Similarly, where we subscript with the pressure extractor, we extract the scalar pressure component. The whole mechanism is described in more detail in the Handling vector valued problems module.

    +

    What this piece of code does is, given an fe_values object, to extract the values of the first $dim$ components of shape function i at quadrature points q, that is the velocity components of that shape function. Put differently, if we write shape functions $x_h^i$ as the tuple $\{{\mathbf u}_h^i,p_h^i\}$, then the function returns the velocity part of this tuple. Note that the velocity is of course a dim-dimensional tensor, and that the function returns a corresponding object. Similarly, where we subscript with the pressure extractor, we extract the scalar pressure component. The whole mechanism is described in more detail in the Handling vector valued problems module.

    In practice, it turns out that we can do a bit better if we evaluate the shape functions, their gradients and divergences only once per outermost loop, and store the result, as this saves us a few otherwise repeated computations (it is possible to save even more repeated operations by calculating all relevant quantities in advance and then only inserting the results in the actual loop, see step-22 for a realization of that approach). The final result then looks like this, working in every space dimension:

    for (const auto &cell : dof_handler.active_cell_iterators())
    {
    @@ -339,14 +339,14 @@

    You will find the exact same code as above in the sources for the present program. We will therefore not comment much on it below.

    Linear solvers and preconditioners

    After assembling the linear system we are faced with the task of solving it. The problem here is that the matrix possesses two undesirable properties:

    At least it is symmetric, but the first issue above still means that the Conjugate Gradient method is not going to work since it is only applicable to problems in which the matrix is symmetric and positive definite. We would have to resort to other iterative solvers instead, such as MinRes, SymmLQ, or GMRES, that can deal with indefinite systems. However, then the next problem immediately surfaces: Due to the zero block, there are zeros on the diagonal and none of the usual, "simple" preconditioners (Jacobi, SSOR) will work as they require division by diagonal elements.

    For the matrix sizes we expect to run with this program, the by far simplest approach would be to just use a direct solver (in particular, the SparseDirectUMFPACK class that is bundled with deal.II). step-29 goes this route and shows that solving any linear system can be done in just 3 or 4 lines of code.

    But then, this is a tutorial: We teach how to do things. Consequently, in the following, we will introduce some techniques that can be used in cases like these. Namely, we will consider the linear system as not consisting of one large matrix and vectors, but we will want to decompose matrices into blocks that correspond to the individual operators that appear in the system. We note that the resulting solver is not optimal – there are much better ways to efficiently compute the system, for example those explained in the results section of step-22 or the one we use in step-43 for a problem similar to the current one. Here, our goal is simply to introduce new solution techniques and how they can be implemented in deal.II.

    Solving using the Schur complement

    -

    In view of the difficulties using standard solvers and preconditioners mentioned above, let us take another look at the matrix. If we sort our degrees of freedom so that all velocity come before all pressure variables, then we can subdivide the linear system $Ax=b$ into the following blocks:

    +

    In view of the difficulties using standard solvers and preconditioners mentioned above, let us take another look at the matrix. If we sort our degrees of freedom so that all velocity come before all pressure variables, then we can subdivide the linear system $Ax=b$ into the following blocks:

    \begin{eqnarray*}
   \left(\begin{array}{cc}
     M & B \\ B^T & 0
@@ -360,24 +360,24 @@
   \end{array}\right),
 \end{eqnarray*}

    -

    where $U,P$ are the values of velocity and pressure degrees of freedom, respectively, $M$ is the mass matrix on the velocity space, $B^T$ corresponds to the negative divergence operator, and $B$ is its transpose and corresponds to the gradient.

    +

    where $U,P$ are the values of velocity and pressure degrees of freedom, respectively, $M$ is the mass matrix on the velocity space, $B^T$ corresponds to the negative divergence operator, and $B$ is its transpose and corresponds to the gradient.

    By block elimination, we can then re-order this system in the following way (multiply the first row of the system by $B^TM^{-1}$ and then subtract the second row from it):

    \begin{eqnarray*}
   B^TM^{-1}B P &=& B^TM^{-1} F - G, \\
   MU &=& F - BP.
 \end{eqnarray*}

    -

    Here, the matrix $S=B^TM^{-1}B$ (called the Schur complement of $A$) is obviously symmetric and, owing to the positive definiteness of $M$ and the fact that $B$ has full column rank, $S$ is also positive definite.

    -

    Consequently, if we could compute $S$, we could apply the Conjugate Gradient method to it. However, computing $S$ is expensive because it requires us to compute the inverse of the (possibly large) matrix $M$; and $S$ is in fact also a full matrix because even though $M$ is sparse, its inverse $M^{-1}$ will generally be a dense matrix. On the other hand, the CG algorithm doesn't require us to actually have a representation of $S$: It is sufficient to form matrix-vector products with it. We can do so in steps, using the fact that matrix products are associative (i.e., we can set parentheses in such a way that the product is more convenient to compute): To compute $Sv=(B^TM^{-1}B)v=B^T(M^{-1}(Bv))$, we

      +

      Here, the matrix $S=B^TM^{-1}B$ (called the Schur complement of $A$) is obviously symmetric and, owing to the positive definiteness of $M$ and the fact that $B$ has full column rank, $S$ is also positive definite.

      +

      Consequently, if we could compute $S$, we could apply the Conjugate Gradient method to it. However, computing $S$ is expensive because it requires us to compute the inverse of the (possibly large) matrix $M$; and $S$ is in fact also a full matrix because even though $M$ is sparse, its inverse $M^{-1}$ will generally be a dense matrix. On the other hand, the CG algorithm doesn't require us to actually have a representation of $S$: It is sufficient to form matrix-vector products with it. We can do so in steps, using the fact that matrix products are associative (i.e., we can set parentheses in such a way that the product is more convenient to compute): To compute $Sv=(B^TM^{-1}B)v=B^T(M^{-1}(Bv))$, we

      1. compute $w = B v$;
      2. -solve $My = w$ for $y=M^{-1}w$, using the CG method applied to the positive definite and symmetric mass matrix $M$;
      3. +solve $My = w$ for $y=M^{-1}w$, using the CG method applied to the positive definite and symmetric mass matrix $M$;
      4. compute $z=B^Ty$ to obtain $z=Sv$.

      Note how we evaluate the expression $B^TM^{-1}Bv$ right to left to avoid matrix-matrix products; this way, all we have to do is evaluate matrix-vector products.

      -

      In the following, we will then have to come up with ways to represent the matrix $S$ so that it can be used in a Conjugate Gradient solver, as well as to define ways in which we can precondition the solution of the linear system involving $S$, and deal with solving linear systems with the matrix $M$ (the second step above).

      +

      In the following, we will then have to come up with ways to represent the matrix $S$ so that it can be used in a Conjugate Gradient solver, as well as to define ways in which we can precondition the solution of the linear system involving $S$, and deal with solving linear systems with the matrix $M$ (the second step above).

      Note
      The key point in this consideration is to recognize that to implement an iterative solver such as CG or GMRES, we never actually need the actual elements of a matrix! All that is required is that we can form matrix-vector products. The same is true for preconditioners. In deal.II we encode this requirement by only requiring that matrices and preconditioners given to solver classes have a vmult() member function that does the matrix-vector product. How a class chooses to implement this function is not important to the solver. Consequently, classes can implement it by, for example, doing a sequence of products and linear solves as discussed above.

      The LinearOperator framework in deal.II

      deal.II includes support for describing such linear operations in a very general way. This is done with the LinearOperator class that, like the MatrixType concept, defines a minimal interface for applying a linear operation to a vector:

      std::function<void(Range &, const Domain &)> vmult;
      @@ -404,7 +404,7 @@

      We now have a LinearOperator op_M_inv that we can use to construct more complicated operators such as the Schur complement $S$. Assuming that B is a reference to the upper right block constructing a LinearOperator op_S is a matter of two lines:

      const auto op_B = linear_operator(B);
      const auto op_S = transpose_operator(op_B) * op_M_inv * op_B;
      LinearOperator< Domain, Range, Payload > transpose_operator(const LinearOperator< Range, Domain, Payload > &op)
      -

      Here, the multiplication of three LinearOperator objects yields a composite object op_S whose vmult() function first applies $B$, then $M^{-1}$ (i.e. solving an equation with $M$), and finally $B^T$ to any given input vector. In that sense op_S.vmult() is similar to the following code:

      B.vmult (tmp1, src); // multiply with the top right block: B
      +

      Here, the multiplication of three LinearOperator objects yields a composite object op_S whose vmult() function first applies $B$, then $M^{-1}$ (i.e. solving an equation with $M$), and finally $B^T$ to any given input vector. In that sense op_S.vmult() is similar to the following code:

      B.vmult (tmp1, src); // multiply with the top right block: B
      solver_M(M, tmp2, tmp1, preconditioner_M); // multiply with M^-1
      B.Tvmult (dst, tmp2); // multiply with the bottom left block: B^T

      (tmp1 and tmp2 are two temporary vectors). The key point behind this approach is the fact that we never actually create an inner product of matrices. Instead, whenever we have to perform a matrix vector multiplication with op_S we simply run all individual vmult operations in above sequence.

      @@ -426,7 +426,7 @@
      Even though both approaches are exactly equivalent, the LinearOperator class has a big advantage over this manual approach. It provides so-called syntactic sugar: Mathematically, we think about $S$ as being the composite matrix $S=B^TM^{-1}B$ and the LinearOperator class allows you to write this out more or less verbatim,
      const auto op_M_inv = inverse_operator(op_M, solver_M, preconditioner_M);
      const auto op_S = transpose_operator(op_B) * op_M_inv * op_B;
      The manual approach on the other hand obscures this fact. -

      All that is left for us to do now is to form the right hand sides of the two equations defining $P$ and $U$, and then solve them with the Schur complement matrix and the mass matrix, respectively. For example the right hand side of the first equation reads $B^TM^{-1}F-G$. This could be implemented as follows:

      Vector<double> schur_rhs (P.size());
      +

      All that is left for us to do now is to form the right hand sides of the two equations defining $P$ and $U$, and then solve them with the Schur complement matrix and the mass matrix, respectively. For example the right hand side of the first equation reads $B^TM^{-1}F-G$. This could be implemented as follows:

      Vector<double> schur_rhs (P.size());
      Vector<double> tmp (U.size());
      op_M_inv.vmult (tmp, F);
      transpose_operator(op_B).vmult (schur_rhs, tmp);
      @@ -435,7 +435,7 @@
      std::function<void(Range &)> apply_add;

      The class allows lazy evaluation of expressions involving vectors and linear operators. This is done by storing the computational expression and only performing the computation when either the object is converted to a vector object, or PackagedOperation::apply() (or PackagedOperation::apply_add()) is invoked by hand. Assuming that F and G are the two vectors of the right hand side we can simply write:

      const auto schur_rhs = transpose_operator(op_B) * op_M_inv * F - G;

      Here, schur_rhs is a PackagedOperation that records the computation we specified. It does not create a vector with the actual result immediately.

      -

      With these prerequisites at hand, solving for $P$ and $U$ is a matter of creating another solver and inverse:

      SolverControl solver_control_S(2000, 1.e-12);
      +

      With these prerequisites at hand, solving for $P$ and $U$ is a matter of creating another solver and inverse:

      SolverControl solver_control_S(2000, 1.e-12);
      SolverCG<Vector<double>> solver_S(solver_control_S);
      PreconditionIdentity preconditioner_S;
      @@ -455,9 +455,9 @@ \end{eqnarray*}" src="form_3139.png"/>

      as a preconditioner? That would mean that every time we have to do one preconditioning step, we actually have to solve with $\tilde S$. At first, this looks almost as expensive as solving with $S$ right away. However, note that in the inner iteration, we do not have to calculate $M^{-1}$, but only the inverse of its diagonal, which is cheap.

      -

      Thankfully, the LinearOperator framework makes this very easy to write out. We already used a Jacobi preconditioner (preconditioner_M) for the $M$ matrix earlier. So all that is left to do is to write out how the approximate Schur complement should look like:

      const auto op_aS =
      +

      Thankfully, the LinearOperator framework makes this very easy to write out. We already used a Jacobi preconditioner (preconditioner_M) for the $M$ matrix earlier. So all that is left to do is to write out how the approximate Schur complement should look like:

      const auto op_aS =
      transpose_operator(op_B) * linear_operator(preconditioner_M) * op_B;
      -

      Note how this operator differs in simply doing one Jacobi sweep (i.e. multiplying with the inverses of the diagonal) instead of multiplying with the full $M^{-1}$. (This is how a single Jacobi preconditioner step with $M$ is defined: it is the multiplication with the inverse of the diagonal of $M$; in other words, the operation $({\textrm{diag}\ }M)^{-1}x$ on a vector $x$ is exactly what PreconditionJacobi does.)

      +

      Note how this operator differs in simply doing one Jacobi sweep (i.e. multiplying with the inverses of the diagonal) instead of multiplying with the full $M^{-1}$. (This is how a single Jacobi preconditioner step with $M$ is defined: it is the multiplication with the inverse of the diagonal of $M$; in other words, the operation $({\textrm{diag}\ }M)^{-1}x$ on a vector $x$ is exactly what PreconditionJacobi does.)

      With all this we almost have the preconditioner completed: it should be the inverse of the approximate Schur complement. We implement this again by creating a linear operator with inverse_operator() function. This time however we would like to choose a relatively modest tolerance for the CG solver (that inverts op_aS). The reasoning is that op_aS is only coarse approximation to op_S, so we actually do not need to invert it exactly. This, however creates a subtle problem: preconditioner_S will be used in the final outer CG iteration to create an orthogonal basis. But for this to work, it must be precisely the same linear operation for every invocation. We ensure this by using an IterationNumberControl that allows us to fix the number of CG iterations that are performed to a fixed small number (in our case 30):

      IterationNumberControl iteration_number_control_aS(30, 1.e-18);
      SolverCG<Vector<double>> solver_aS(iteration_number_control_aS);
      PreconditionIdentity preconditioner_aS;
      @@ -721,7 +721,7 @@
       
      void component_wise(DoFHandler< dim, spacedim > &dof_handler, const std::vector< unsigned int > &target_component=std::vector< unsigned int >())

      The next thing is that we want to figure out the sizes of these blocks so that we can allocate an appropriate amount of space. To this end, we call the DoFTools::count_dofs_per_fe_component() function that counts how many shape functions are non-zero for a particular vector component. We have dim+1 vector components, and DoFTools::count_dofs_per_fe_component() will count how many shape functions belong to each of these components.

      -

      There is one problem here. As described in the documentation of that function, it wants to put the number of $x$-velocity shape functions into dofs_per_component[0], the number of $y$-velocity shape functions into dofs_per_component[1] (and similar in 3d), and the number of pressure shape functions into dofs_per_component[dim]. But, the Raviart-Thomas element is special in that it is non-primitive, i.e., for Raviart-Thomas elements all velocity shape functions are nonzero in all components. In other words, the function cannot distinguish between $x$ and $y$ velocity functions because there is no such distinction. It therefore puts the overall number of velocity into each of dofs_per_component[c], $0\le c\le \text{dim}$. On the other hand, the number of pressure variables equals the number of shape functions that are nonzero in the dim-th component.

      +

      There is one problem here. As described in the documentation of that function, it wants to put the number of $x$-velocity shape functions into dofs_per_component[0], the number of $y$-velocity shape functions into dofs_per_component[1] (and similar in 3d), and the number of pressure shape functions into dofs_per_component[dim]. But, the Raviart-Thomas element is special in that it is non-primitive, i.e., for Raviart-Thomas elements all velocity shape functions are nonzero in all components. In other words, the function cannot distinguish between $x$ and $y$ velocity functions because there is no such distinction. It therefore puts the overall number of velocity into each of dofs_per_component[c], $0\le c\le \text{dim}$. On the other hand, the number of pressure variables equals the number of shape functions that are nonzero in the dim-th component.

      Using this knowledge, we can get the number of velocity shape functions from any of the first dim elements of dofs_per_component, and then use this below to initialize the vector and matrix block sizes, as well as create output.

      Note
      If you find this concept difficult to understand, you may want to consider using the function DoFTools::count_dofs_per_fe_block() instead, as we do in the corresponding piece of code in step-22. You might also want to read up on the difference between blocks and components in the glossary.
        const std::vector<types::global_dof_index> dofs_per_component =
      @@ -1072,7 +1072,7 @@
        }

      Results

      Output of the program and graphical visualization

      -

      If we run the program as is, we get this output for the $32\times 32$ mesh we use (for a total of 1024 cells with 1024 pressure degrees of freedom since we use piecewise constants, and 2112 velocities because the Raviart-Thomas element defines one degree per freedom per face and there are $1024 + 32 = 1056$ faces parallel to the $x$-axis and the same number parallel to the $y$-axis):

      \$ make run
      +

      If we run the program as is, we get this output for the $32\times 32$ mesh we use (for a total of 1024 cells with 1024 pressure degrees of freedom since we use piecewise constants, and 2112 velocities because the Raviart-Thomas element defines one degree per freedom per face and there are $1024 + 32 = 1056$ faces parallel to the $x$-axis and the same number parallel to the $y$-axis):

      \$ make run
       [ 66%] Built target step-20
       Scanning dependencies of target run
       [100%] Run step-20 with Release configuration
      @@ -1088,7 +1088,7 @@
           
       
       

      Let us start with the pressure: it is highest at the left and lowest at the right, so flow will be from left to right. In addition, though hardly visible in the graph, we have chosen the pressure field such that the flow left-right flow first channels towards the center and then outward again. Consequently, the x-velocity has to increase to get the flow through the narrow part, something that can easily be seen in the left image. The middle image represents inward flow in y-direction at the left end of the domain, and outward flow in y-direction at the right end of the domain.

      -

      As an additional remark, note how the x-velocity in the left image is only continuous in x-direction, whereas the y-velocity is continuous in y-direction. The flow fields are discontinuous in the other directions. This very obviously reflects the continuity properties of the Raviart-Thomas elements, which are, in fact, only in the space H(div) and not in the space $H^1$. Finally, the pressure field is completely discontinuous, but that should not surprise given that we have chosen FE_DGQ(0) as the finite element for that solution component.

      +

      As an additional remark, note how the x-velocity in the left image is only continuous in x-direction, whereas the y-velocity is continuous in y-direction. The flow fields are discontinuous in the other directions. This very obviously reflects the continuity properties of the Raviart-Thomas elements, which are, in fact, only in the space H(div) and not in the space $H^1$. Finally, the pressure field is completely discontinuous, but that should not surprise given that we have chosen FE_DGQ(0) as the finite element for that solution component.

      Convergence

      The program offers two obvious places where playing and observing convergence is in order: the degree of the finite elements used (passed to the constructor of the MixedLaplaceProblem class from main()), and the refinement level (determined in MixedLaplaceProblem::make_grid_and_dofs). What one can do is to change these values and observe the errors computed later on in the course of the program run.

      If one does this, one finds the following pattern for the $L_2$ error in the pressure variable:

      /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_21.html differs (JavaScript source, Unicode text, UTF-8 text, with very long lines (3138)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_21.html 2023-11-25 15:26:01.486524277 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_21.html 2023-11-25 15:26:01.486524277 +0100 @@ -153,7 +153,7 @@

      The equations covered here are an extension of the material already covered in step-20. In particular, they fall into the class of vector-valued problems. A toplevel overview of this topic can be found in the Handling vector valued problems module.

      The two phase flow problem

      Modeling of two phase flow in porous media is important for both environmental remediation and the management of petroleum and groundwater reservoirs. Practical situations involving two phase flow include the dispersal of a nonaqueous phase liquid in an aquifer, or the joint movement of a mixture of fluids such as oil and water in a reservoir. Simulation models, if they are to provide realistic predictions, must accurately account for these effects.

      -

      To derive the governing equations, consider two phase flow in a reservoir $\Omega$ under the assumption that the movement of fluids is dominated by viscous effects; i.e. we neglect the effects of gravity, compressibility, and capillary pressure. Porosity will be considered to be constant. We will denote variables referring to either of the two phases using subscripts $w$ and $o$, short for water and oil. The derivation of the equations holds for other pairs of fluids as well, however.

      +

      To derive the governing equations, consider two phase flow in a reservoir $\Omega$ under the assumption that the movement of fluids is dominated by viscous effects; i.e. we neglect the effects of gravity, compressibility, and capillary pressure. Porosity will be considered to be constant. We will denote variables referring to either of the two phases using subscripts $w$ and $o$, short for water and oil. The derivation of the equations holds for other pairs of fluids as well, however.

      The velocity with which molecules of each of the two phases move is determined by Darcy's law that states that the velocity is proportional to the pressure gradient:

      \begin{eqnarray*}
   \mathbf{u}_{j}
@@ -161,7 +161,7 @@
   -\frac{k_{rj}(S)}{\mu_{j}} \mathbf{K} \cdot \nabla p
 \end{eqnarray*}

      -

      where $\mathbf{u}_{j}$ is the velocity of phase $j=o,w$, $K$ is the permeability tensor, $k_{rj}$ is the relative permeability of phase $j$, $p$ is the pressure and $\mu_{j}$ is the viscosity of phase $j$. Finally, $S$ is the saturation (volume fraction), i.e. a function with values between 0 and 1 indicating the composition of the mixture of fluids. In general, the coefficients $K, k_{rj}, \mu$ may be spatially dependent variables, and we will always treat them as non-constant functions in the following.

      +

      where $\mathbf{u}_{j}$ is the velocity of phase $j=o,w$, $K$ is the permeability tensor, $k_{rj}$ is the relative permeability of phase $j$, $p$ is the pressure and $\mu_{j}$ is the viscosity of phase $j$. Finally, $S$ is the saturation (volume fraction), i.e. a function with values between 0 and 1 indicating the composition of the mixture of fluids. In general, the coefficients $K, k_{rj}, \mu$ may be spatially dependent variables, and we will always treat them as non-constant functions in the following.

      We combine Darcy's law with the statement of conservation of mass for each phase,

      \[
   \textrm{div}\ \mathbf{u}_{j} = q_j,
@@ -172,7 +172,7 @@
 - \nabla \cdot (\mathbf{K}\lambda(S) \nabla p)= q.
 \end{eqnarray*}

      -

      Here, $q$ is the sum source term, and

      +

      Here, $q$ is the sum source term, and

      \[
   \lambda(S) = \frac{k_{rw}(S)}{\mu_{w}}+\frac{k_{ro}(S)}{\mu_{o}}
 \] @@ -248,7 +248,7 @@

      where $\triangle t$ is the length of a time step. Note how we solve the implicit pressure-velocity system that only depends on the previously computed saturation $S^n$, and then do an explicit time step for $S^{n+1}$ that only depends on the previously known $S^n$ and the just computed $\mathbf{u}^{n+1}$. This way, we never have to iterate for the nonlinearities of the system as we would have if we used a fully implicit method. (In a more modern perspective, this should be seen as an "operator splitting" method. step-58 has a long description of the idea behind this.)

      -

      We can then state the problem in weak form as follows, by multiplying each equation with test functions $\mathbf v$, $\phi$, and $\sigma$ and integrating terms by parts:

      +

      We can then state the problem in weak form as follows, by multiplying each equation with test functions $\mathbf v$, $\phi$, and $\sigma$ and integrating terms by parts:

      \begin{eqnarray*}
   \left((\mathbf{K}\lambda(S^n))^{-1} \mathbf{u}^{n+1},\mathbf v\right)_\Omega -
   (p^{n+1}, \nabla\cdot\mathbf v)_\Omega &=&
@@ -257,7 +257,7 @@
   (\nabla \cdot\mathbf{u}^{n+1}, \phi)_\Omega &=& (q^{n+1},\phi)_\Omega
 \end{eqnarray*}

      -

      Note that in the first term, we have to prescribe the pressure $p^{n+1}$ on the boundary $\partial\Omega$ as boundary values for our problem. $\mathbf n$ denotes the unit outward normal vector to $\partial K$, as usual.

      +

      Note that in the first term, we have to prescribe the pressure $p^{n+1}$ on the boundary $\partial\Omega$ as boundary values for our problem. $\mathbf n$ denotes the unit outward normal vector to $\partial K$, as usual.

      For the saturation equation, we obtain after integrating by parts

      \begin{eqnarray*}
   (S^{n+1}, \sigma)_\Omega
@@ -291,7 +291,7 @@
 </p>
 <p> We introduce an object of type <a class=DiscreteTime in order to keep track of the current value of time and time step in the code. This class encapsulates many complexities regarding adjusting time step size and stopping at a specified final time.

      Space discretization

      -

      In each time step, we then apply the mixed finite method of step-20 to the velocity and pressure. To be well-posed, we choose Raviart-Thomas spaces $RT_{k}$ for $\mathbf{u}$ and discontinuous elements of class $DGQ_{k}$ for $p$. For the saturation, we will also choose $DGQ_{k}$ spaces.

      +

      In each time step, we then apply the mixed finite method of step-20 to the velocity and pressure. To be well-posed, we choose Raviart-Thomas spaces $RT_{k}$ for $\mathbf{u}$ and discontinuous elements of class $DGQ_{k}$ for $p$. For the saturation, we will also choose $DGQ_{k}$ spaces.

      Since we have discontinuous spaces, we have to think about how to evaluate terms on the interfaces between cells, since discontinuous functions are not really defined there. In particular, we have to give a meaning to the last term on the left hand side of the saturation equation. To this end, let us define that we want to evaluate it in the following sense:

      \begin{eqnarray*}
   &&\left(F(S^n) (\mathbf n \cdot \mathbf{u}^{n+1}), \sigma\right)_{\partial K}
@@ -305,7 +305,7 @@
 <p> where <picture><source srcset=$\partial K_{-} \dealcoloneq \{x\in \partial K, \mathbf{u}(x) \cdot \mathbf{n}<0\}$ denotes the inflow boundary and $\partial K_{+} \dealcoloneq \{\partial K \setminus
 \partial K_{-}\}$ is the outflow part of the boundary. The quantities $S_+,\mathbf{u}_+$ then correspond to the values of these variables on the present cell, whereas $S_-,\mathbf{u}_-$ (needed on the inflow part of the boundary of $K$) are quantities taken from the neighboring cell. Some more context on discontinuous element techniques and evaluation of fluxes can also be found in step-12 and step-12b.

      Linear solvers

      -

      The linear solvers used in this program are a straightforward extension of the ones used in step-20 (but without LinearOperator). Essentially, we simply have to extend everything from two to three solution components. If we use the discrete spaces mentioned above and put shape functions into the bilinear forms, we arrive at the following linear system to be solved for time step $n+1$:

      +

      The linear solvers used in this program are a straightforward extension of the ones used in step-20 (but without LinearOperator). Essentially, we simply have to extend everything from two to three solution components. If we use the discrete spaces mentioned above and put shape functions into the bilinear forms, we arrive at the following linear system to be solved for time step $n+1$:

      \[
 \left(
 \begin{array}{ccc}
@@ -327,7 +327,7 @@
 \right)
 \]

      -

      where the individual matrices and vectors are defined as follows using shape functions $\mathbf v_i$ (of type Raviart Thomas $RT_k$) for velocities and $\phi_i$ (of type $DGQ_k$) for both pressures and saturations:

      +

      where the individual matrices and vectors are defined as follows using shape functions $\mathbf v_i$ (of type Raviart Thomas $RT_k$) for velocities and $\phi_i$ (of type $DGQ_k$) for both pressures and saturations:

      \begin{eqnarray*}
 M^u(S^n)_{ij} &=&
 \left((\mathbf{K}\lambda(S^n))^{-1} \mathbf{v}_i,\mathbf
@@ -357,7 +357,7 @@
 (S^n,\phi_i)_\Omega +\triangle t \sum_K  \left(F(S^n) q^{n+1}, \phi_i\right)_K.
 \end{eqnarray*}

      -
      Note
      Due to historical accidents, the role of matrices $B$ and $B^T$ has been reverted in this program compared to step-20. In other words, here $B$ refers to the divergence and $B^T$ to the gradient operators when it was the other way around in step-20.
      +
      Note
      Due to historical accidents, the role of matrices $B$ and $B^T$ has been reverted in this program compared to step-20. In other words, here $B$ refers to the divergence and $B^T$ to the gradient operators when it was the other way around in step-20.

      The system above presents a complication: Since the matrix $H_{ij}$ depends on $\mathbf u^{n+1}$ implicitly (the velocities are needed to determine which parts of the boundaries $\partial K$ of cells are influx or outflux parts), we can only assemble this matrix after we have solved for the velocities.

      The solution scheme then involves the following steps:

      1. @@ -375,7 +375,7 @@
      2. Solve for the saturation $S^{n+1}$.
      -

      In this scheme, we never actually build the matrix $H$, but rather generate the right hand side of the third equation once we are ready to do so.

      +

      In this scheme, we never actually build the matrix $H$, but rather generate the right hand side of the third equation once we are ready to do so.

      In the program, we use a variable solution to store the solution of the present time step. At the end of each step, we copy its content, i.e. all three of its block components, into the variable old_solution for use in the next time step.

      Choosing a time step

      A general rule of thumb in hyperbolic transport equations like the equation we have to solve for the saturation equation is that if we use an explicit time stepping scheme, then we should use a time step such that the distance that a particle can travel within one time step is no larger than the diameter of a single cell. In other words, here, we should choose

      @@ -424,7 +424,7 @@ \]" src="form_3224.png"/>

      Note
      Coming back to this testcase in step-43 several years later revealed an oddity in the setup of this testcase. To this end, consider that we can rewrite the advection equation for the saturation as $S_{t} + (\mathbf{u}
-F'(S)) \cdot \nabla S = 0$. Now, at the initial time, we have $S=0$, and with the given choice of function $F(S)$, we happen to have $F'(0)=0$. In other words, at $t=0$, the equation reduces to $S_t=0$ for all $\mathbf x$, so the saturation is zero everywhere and it is going to stay zero everywhere! This is despite the fact that $\mathbf u$ is not necessarily zero: the combined fluid is moving, but we've chosen our partial flux $F(S)$ in such a way that infinitesimal amounts of wetting fluid also only move at infinitesimal speeds (i.e., they stick to the medium more than the non-wetting phase in which they are embedded). That said, how can we square this with the knowledge that wetting fluid is invading from the left, leading to the flow patterns seen in the results section? That's where we get into mathematics: Equations like the transport equation we are considering here have infinitely many solutions, but only one of them is physical: the one that results from the so-called viscosity limit, called the viscosity solution. The thing is that with discontinuous elements we arrive at this viscosity limit because using a numerical flux introduces a finite amount of artificial viscosity into the numerical scheme. On the other hand, in step-43, we use an artificial viscosity that is proportional to $\|\mathbf u F'(S)\|$ on every cell, which at the initial time is zero. Thus, the saturation there is zero and remains zero; the solution we then get is one solution of the advection equation, but the method does not converge to the viscosity solution without further changes. We will therefore use a different initial condition in that program.
      +F'(S)) \cdot \nabla S = 0$" src="form_3225.png"/>. Now, at the initial time, we have $S=0$, and with the given choice of function $F(S)$, we happen to have $F'(0)=0$. In other words, at $t=0$, the equation reduces to $S_t=0$ for all $\mathbf x$, so the saturation is zero everywhere and it is going to stay zero everywhere! This is despite the fact that $\mathbf u$ is not necessarily zero: the combined fluid is moving, but we've chosen our partial flux $F(S)$ in such a way that infinitesimal amounts of wetting fluid also only move at infinitesimal speeds (i.e., they stick to the medium more than the non-wetting phase in which they are embedded). That said, how can we square this with the knowledge that wetting fluid is invading from the left, leading to the flow patterns seen in the results section? That's where we get into mathematics: Equations like the transport equation we are considering here have infinitely many solutions, but only one of them is physical: the one that results from the so-called viscosity limit, called the viscosity solution. The thing is that with discontinuous elements we arrive at this viscosity limit because using a numerical flux introduces a finite amount of artificial viscosity into the numerical scheme. On the other hand, in step-43, we use an artificial viscosity that is proportional to $\|\mathbf u F'(S)\|$ on every cell, which at the initial time is zero. Thus, the saturation there is zero and remains zero; the solution we then get is one solution of the advection equation, but the method does not converge to the viscosity solution without further changes. We will therefore use a different initial condition in that program.

      Finally, to come back to the description of the testcase, we will show results for computations with the two permeability functions introduced at the end of the results section of step-20:

      • A function that models a single, winding crack that snakes through the domain. In analogy to step-20, but taking care of the slightly different geometry we have here, we describe this by the following function:

        @@ -449,7 +449,7 @@ e^{-\left(\frac{|\mathbf{x}-\mathbf{x}_i|}{0.05}\right)^2}, \end{eqnarray*}" src="form_3231.png"/>

        - where the centers $\mathbf{x}_i$ are $N$ randomly chosen locations inside the domain. This function models a domain in which there are $N$ centers of higher permeability (for example where rock has cracked) embedded in a matrix of more pristine, unperturbed background rock. Note that here we have cut off the permeability function both above and below to ensure a bounded condition number.
      • + where the centers $\mathbf{x}_i$ are $N$ randomly chosen locations inside the domain. This function models a domain in which there are $N$ centers of higher permeability (for example where rock has cracked) embedded in a matrix of more pristine, unperturbed background rock. Note that here we have cut off the permeability function both above and below to ensure a bounded condition number.

      The commented program

      This program is an adaptation of step-20 and includes some technique of DG methods from step-12. A good part of the program is therefore very similar to step-20 and we will not comment again on these parts. Only the new stuff will be discussed in more detail.

      @@ -511,7 +511,7 @@
    1. project_back_saturation resets all saturation degrees of freedom with values less than zero to zero, and all those with saturations greater than one to one.
    2. -

      The rest of the class should be pretty much obvious. The viscosity variable stores the viscosity $\mu$ that enters several of the formulas in the nonlinear equations. The variable time keeps track of the time information within the simulation.

      +

      The rest of the class should be pretty much obvious. The viscosity variable stores the viscosity $\mu$ that enters several of the formulas in the nonlinear equations. The variable time keeps track of the time information within the simulation.

        template <int dim>
        class TwoPhaseFlowProblem
        {
      @@ -1124,7 +1124,7 @@
        fe_values.get_function_values(solution, present_solution_values);
       

      First for the cell terms. These are, following the formulas in the introduction, $(S^n,\sigma)-(F(S^n) \mathbf{v}^{n+1},\nabla
-   \sigma)$, where $\sigma$ is the saturation component of the test function:

      + \sigma)$" src="form_3235.png"/>, where $\sigma$ is the saturation component of the test function:

        for (unsigned int q = 0; q < n_q_points; ++q)
        for (unsigned int i = 0; i < dofs_per_cell; ++i)
        {
      @@ -1490,7 +1490,7 @@

      Possibilities for extensions

      There are a number of areas where this program can be improved. Three of them are listed below. All of them are, in fact, addressed in a tutorial program that forms the continuation of the current one: step-43.

      Solvers

      -

      At present, the program is not particularly fast: the 2d random medium computation took about a day for the 1,000 or so time steps. The corresponding 3d computation took almost two days for 800 time steps. The reason why it isn't faster than this is twofold. First, we rebuild the entire matrix in every time step, although some parts such as the $B$, $B^T$, and $M^S$ blocks never change.

      +

      At present, the program is not particularly fast: the 2d random medium computation took about a day for the 1,000 or so time steps. The corresponding 3d computation took almost two days for 800 time steps. The reason why it isn't faster than this is twofold. First, we rebuild the entire matrix in every time step, although some parts such as the $B$, $B^T$, and $M^S$ blocks never change.

      Second, we could do a lot better with the solver and preconditioners. Presently, we solve the Schur complement $B^TM^u(S)^{-1}B$ with a CG method, using $[B^T (\textrm{diag}(M^u(S)))^{-1} B]^{-1}$ as a preconditioner. Applying this preconditioner is expensive, since it involves solving a linear system each time. This may have been appropriate for step-20, where we have to solve the entire problem only once. However, here we have to solve it hundreds of times, and in such cases it is worth considering a preconditioner that is more expensive to set up the first time, but cheaper to apply later on.

      One possibility would be to realize that the matrix we use as preconditioner, $B^T (\textrm{diag}(M^u(S)))^{-1} B$ is still sparse, and symmetric on top of that. If one looks at the flow field evolve over time, we also see that while $S$ changes significantly over time, the pressure hardly does and consequently $B^T (\textrm{diag}(M^u(S)))^{-1} B \approx B^T (\textrm{diag}(M^u(S^0)))^{-1}
 B$. In other words, the matrix for the first time step should be a good preconditioner also for all later time steps. With a bit of back-and-forthing, it isn't hard to actually get a representation of it as a SparseMatrix object. We could then hand it off to the SparseMIC class to form a sparse incomplete Cholesky decomposition. To form this decomposition is expensive, but we have to do it only once in the first time step, and can then use it as a cheap preconditioner in the future. We could do better even by using the SparseDirectUMFPACK class that produces not only an incomplete, but a complete decomposition of the matrix, which should yield an even better preconditioner.

      @@ -1503,7 +1503,7 @@ \triangle t_{n+1} \le \frac h{|\mathbf{u}^{n+1}(\mathbf{x})|} \]" src="form_3248.png"/>

      -

      that has to hold globally, i.e. for all $\mathbf x$. After discretization, we satisfy it by choosing

      +

      that has to hold globally, i.e. for all $\mathbf x$. After discretization, we satisfy it by choosing

      \[
   \triangle t_{n+1} = \frac {\min_K h_K}{\max_{\mathbf{x}}|\mathbf{u}^{n+1}(\mathbf{x})|}.
 \] /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_22.html differs (JavaScript source, ASCII text, with very long lines (2478)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_22.html 2023-11-25 15:26:01.523190198 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_22.html 2023-11-25 15:26:01.523190198 +0100 @@ -165,36 +165,36 @@ This material is based upon work partly supported by the National Science Foundation under Award No. EAR-0426271 and The California Institute of Technology. Any opinions, findings, and conclusions or recommendations expressed in this publication are those of the author and do not necessarily reflect the views of the National Science Foundation or of The California Institute of Technology.

      Introduction

      This program deals with the Stokes system of equations which reads as follows in non-dimensionalized form:

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   -2\; \textrm{div}\; \varepsilon(\textbf{u}) + \nabla p &=& \textbf{f},
   \\
   -\textrm{div}\; \textbf{u} &=& 0,
-\end{eqnarray*} +\end{eqnarray*}" src="form_3249.png"/>

      -

      where $\textbf u$ denotes the velocity of a fluid, $p$ is its pressure, $\textbf f$ are external forces, and $\varepsilon(\textbf{u})= \nabla^s{\textbf{u}}= \frac 12 \left[
-(\nabla \textbf{u}) + (\nabla \textbf{u})^T\right]$ is the rank-2 tensor of symmetrized gradients; a component-wise definition of it is $\varepsilon(\textbf{u})_{ij}=\frac
-12\left(\frac{\partial u_i}{\partial x_j} + \frac{\partial u_j}{\partial x_i}\right)$.

      +

      where $\textbf u$ denotes the velocity of a fluid, $p$ is its pressure, $\textbf f$ are external forces, and $\varepsilon(\textbf{u})= \nabla^s{\textbf{u}}= \frac 12 \left[
+(\nabla \textbf{u}) + (\nabla \textbf{u})^T\right]$ is the rank-2 tensor of symmetrized gradients; a component-wise definition of it is $\varepsilon(\textbf{u})_{ij}=\frac
+12\left(\frac{\partial u_i}{\partial x_j} + \frac{\partial u_j}{\partial x_i}\right)$.

      The Stokes equations describe the steady-state motion of a slow-moving, viscous fluid such as honey, rocks in the earth mantle, or other cases where inertia does not play a significant role. If a fluid is moving fast enough that inertia forces are significant compared to viscous friction, the Stokes equations are no longer valid; taking into account inertia effects then leads to the nonlinear Navier-Stokes equations. However, in this tutorial program, we will focus on the simpler Stokes system.

      Note that when deriving the more general compressible Navier-Stokes equations, the diffusion is modeled as the divergence of the stress tensor

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   \tau = - \mu \left(2\varepsilon(\textbf{u}) - \frac{2}{3}\nabla \cdot \textbf{u} I\right),
-\end{eqnarray*} +\end{eqnarray*}" src="form_3253.png"/>

      -

      where $\mu$ is the viscosity of the fluid. With the assumption of $\mu=1$ (assume constant viscosity and non-dimensionalize the equation by dividing out $\mu$) and assuming incompressibility ( $\textrm{div}\; \textbf{u}=0$), we arrive at the formulation from above:

      -\begin{eqnarray*}
+<p> where <picture><source srcset=$\mu$ is the viscosity of the fluid. With the assumption of $\mu=1$ (assume constant viscosity and non-dimensionalize the equation by dividing out $\mu$) and assuming incompressibility ( $\textrm{div}\; \textbf{u}=0$), we arrive at the formulation from above:

      +\begin{eqnarray*}
   \textrm{div}\; \tau = -2\textrm{div}\;\varepsilon(\textbf{u}).
-\end{eqnarray*} +\end{eqnarray*}" src="form_3256.png"/>

      -

      A different formulation uses the Laplace operator ( $-\triangle \textbf{u}$) instead of the symmetrized gradient. A big difference here is that the different components of the velocity do not couple. If you assume additional regularity of the solution $\textbf{u}$ (second partial derivatives exist and are continuous), the formulations are equivalent:

      -\begin{eqnarray*}
+<p> A different formulation uses the Laplace operator ( <picture><source srcset=$-\triangle \textbf{u}$) instead of the symmetrized gradient. A big difference here is that the different components of the velocity do not couple. If you assume additional regularity of the solution $\textbf{u}$ (second partial derivatives exist and are continuous), the formulations are equivalent:

      +\begin{eqnarray*}
   \textrm{div}\; \tau
   = -2\textrm{div}\;\varepsilon(\textbf{u})
   = -\triangle \textbf{u} - \nabla \cdot (\nabla\textbf{u})^T
   = -\triangle \textbf{u}.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3259.png"/>

      -

      This is because the $i$th entry of $\nabla \cdot (\nabla\textbf{u})^T$ is given by:

      -\begin{eqnarray*}
+<p> This is because the <picture><source srcset=$i$th entry of $\nabla \cdot (\nabla\textbf{u})^T$ is given by:

      +\begin{eqnarray*}
 [\nabla \cdot (\nabla\textbf{u})^T]_i
 = \sum_j \frac{\partial}{\partial x_j} [(\nabla\textbf{u})^T]_{i,j}
 = \sum_j \frac{\partial}{\partial x_j} [(\nabla\textbf{u})]_{j,i}
@@ -203,14 +203,14 @@
 = \frac{\partial}{\partial x_i}
   \underbrace{\textrm{div}\; \textbf{u}}_{=0}
 = 0.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3261.png"/>

      If you can not assume the above mentioned regularity, or if your viscosity is not a constant, the equivalence no longer holds. Therefore, we decided to stick with the more physically accurate symmetric tensor formulation in this tutorial.

      To be well-posed, we will have to add boundary conditions to the equations. What boundary conditions are readily possible here will become clear once we discuss the weak form of the equations.

      The equations covered here fall into the class of vector-valued problems. A toplevel overview of this topic can be found in the Handling vector valued problems module.

      Weak form

      The weak form of the equations is obtained by writing it in vector form as

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   \begin{pmatrix}
     {-2\; \textrm{div}\; \varepsilon(\textbf{u}) + \nabla p}
     \\
@@ -222,23 +222,23 @@
   \\
   0
   \end{pmatrix},
-\end{eqnarray*} +\end{eqnarray*}" src="form_3262.png"/>

      -

      forming the dot product from the left with a vector-valued test function $\phi = \begin{pmatrix}\textbf{v} \\ q\end{pmatrix}$ and integrating over the domain $\Omega$, yielding the following set of equations:

      -\begin{eqnarray*}
+<p> forming the dot product from the left with a vector-valued test function <picture><source srcset=$\phi = \begin{pmatrix}\textbf{v} \\ q\end{pmatrix}$ and integrating over the domain $\Omega$, yielding the following set of equations:

      +\begin{eqnarray*}
   (\mathrm v,
    -2\; \textrm{div}\; \varepsilon(\textbf{u}) + \nabla p)_{\Omega}
   -
   (q,\textrm{div}\; \textbf{u})_{\Omega}
   =
   (\textbf{v}, \textbf{f})_\Omega,
-\end{eqnarray*} +\end{eqnarray*}" src="form_3264.png"/>

      -

      which has to hold for all test functions $\phi = \begin{pmatrix}\textbf{v}
-\\ q\end{pmatrix}$.

      +

      which has to hold for all test functions $\phi = \begin{pmatrix}\textbf{v}
+\\ q\end{pmatrix}$.

      A generally good rule of thumb is that if one can reduce how many derivatives are taken on any variable in the formulation, then one should in fact do that using integration by parts. (This is motivated by the theory of partial differential equations, and in particular the difference between strong and weak solutions.) We have already done that for the Laplace equation, where we have integrated the second derivative by parts to obtain the weak formulation that has only one derivative on both test and trial function.

      In the current context, we integrate by parts the second term:

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   (\textbf{v}, -2\; \textrm{div}\; \varepsilon(\textbf{u}))_{\Omega}
   - (\textrm{div}\; \textbf{v}, p)_{\Omega}
   + (\textbf{n}\cdot\textbf{v}, p)_{\partial\Omega}
@@ -246,10 +246,10 @@
   (q,\textrm{div}\; \textbf{u})_{\Omega}
   =
   (\textbf{v}, \textbf{f})_\Omega.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3266.png"/>

      Likewise, we integrate by parts the first term to obtain

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   (\nabla \textbf{v}, 2\; \varepsilon(\textbf{u}))_{\Omega}
   -
   (\textbf{n} \otimes \textbf{v}, 2\; \varepsilon(\textbf{u}))_{\partial\Omega}
@@ -259,19 +259,19 @@
   (q,\textrm{div}\; \textbf{u})_{\Omega}
   =
   (\textbf{v}, \textbf{f})_\Omega,
-\end{eqnarray*} +\end{eqnarray*}" src="form_3267.png"/>

      where the scalar product between two tensor-valued quantities is here defined as

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   (\nabla \textbf{v}, 2\; \varepsilon(\textbf{u}))_{\Omega}
   =
   2 \int_\Omega \sum_{i,j=1}^d \frac{\partial v_j}{\partial x_i}
   \varepsilon(\textbf{u})_{ij} \ dx.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3268.png"/>

      -

      Using this, we have now reduced the requirements on our variables to first derivatives for $\mathbf u,\mathbf v$ and no derivatives at all for $p,q$.

      -

      Because the scalar product between a general tensor like $\nabla\textbf{v}$ and a symmetric tensor like $\varepsilon(\textbf{u})$ equals the scalar product between the symmetrized forms of the two, we can also write the bilinear form above as follows:

      -\begin{eqnarray*}
+<p> Using this, we have now reduced the requirements on our variables to first derivatives for <picture><source srcset=$\mathbf u,\mathbf v$ and no derivatives at all for $p,q$.

      +

      Because the scalar product between a general tensor like $\nabla\textbf{v}$ and a symmetric tensor like $\varepsilon(\textbf{u})$ equals the scalar product between the symmetrized forms of the two, we can also write the bilinear form above as follows:

      +\begin{eqnarray*}
   (\varepsilon(\textbf{v}), 2\; \varepsilon(\textbf{u}))_{\Omega}
   -
   (\textbf{n} \otimes \textbf{v}, 2\; \varepsilon(\textbf{u}))_{\partial\Omega}
@@ -281,43 +281,43 @@
   (q,\textrm{div}\; \textbf{u})_{\Omega}
   =
   (\textbf{v}, \textbf{f})_\Omega,
-\end{eqnarray*} +\end{eqnarray*}" src="form_3272.png"/>

      We will deal with the boundary terms in the next section, but it is already clear from the domain terms

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   (\varepsilon(\textbf{v}), 2\; \varepsilon(\textbf{u}))_{\Omega}
   - (\textrm{div}\; \textbf{v}, p)_{\Omega}
   -
   (q,\textrm{div}\; \textbf{u})_{\Omega}
-\end{eqnarray*} +\end{eqnarray*}" src="form_3273.png"/>

      of the bilinear form that the Stokes equations yield a symmetric bilinear form, and consequently a symmetric (if indefinite) system matrix.

      Boundary conditions

      Note
      The material presented here is also discussed in video lecture 21.5. (All video lectures are also available here.) (See also video lecture 21.55, video lecture 21.6, video lecture 21.65.)

      The weak form just derived immediately presents us with different possibilities for imposing boundary conditions:

      1. -

        Dirichlet velocity boundary conditions: On a part $\Gamma_D\subset\partial\Omega$ we may impose Dirichlet conditions on the velocity $\textbf u$:

        +

        Dirichlet velocity boundary conditions: On a part $\Gamma_D\subset\partial\Omega$ we may impose Dirichlet conditions on the velocity $\textbf u$:

        -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
         \textbf u = \textbf g_D \qquad\qquad \textrm{on}\ \Gamma_D.
-    \end{eqnarray*} + \end{eqnarray*}" src="form_3275.png"/>

        -

        Because test functions $\textbf{v}$ come from the tangent space of the solution variable, we have that $\textbf{v}=0$ on $\Gamma_D$ and consequently that

        -\begin{eqnarray*}
+<p> Because test functions <picture><source srcset=$\textbf{v}$ come from the tangent space of the solution variable, we have that $\textbf{v}=0$ on $\Gamma_D$ and consequently that

        +\begin{eqnarray*}
       -(\textbf{n} \otimes \mathrm
         v, 2\; \varepsilon(\textbf{u}))_{\Gamma_D}
       +
       (\textbf{n}\cdot\textbf{v}, p)_{\Gamma_D}
       = 0.
-    \end{eqnarray*} + \end{eqnarray*}" src="form_3279.png"/>

        In other words, as usual, strongly imposed boundary values do not appear in the weak form.

        It is noteworthy that if we impose Dirichlet boundary values on the entire boundary, then the pressure is only determined up to a constant. An algorithmic realization of that would use similar tools as have been seen in step-11.

      2. -

        Neumann-type or natural boundary conditions: On the rest of the boundary $\Gamma_N=\partial\Omega\backslash\Gamma_D$, let us re-write the boundary terms as follows:

        -\begin{eqnarray*}
+<p class=Neumann-type or natural boundary conditions: On the rest of the boundary $\Gamma_N=\partial\Omega\backslash\Gamma_D$, let us re-write the boundary terms as follows:

        +\begin{eqnarray*}
       -(\textbf{n} \otimes \mathrm
         v, 2\; \varepsilon(\textbf{u}))_{\Gamma_N}
       +
@@ -347,17 +347,17 @@
       &=&
       (\textbf{v},
        \textbf{n}\cdot [p \textbf{I} - 2\; \varepsilon(\textbf{u})])_{\Gamma_N}.
/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_23.html differs (JavaScript source, Unicode text, UTF-8 text, with very long lines (1350))
--- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_23.html	2023-11-25 15:26:01.546523057 +0100
+++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_23.html	2023-11-25 15:26:01.546523057 +0100
@@ -130,8 +130,8 @@
  <a class=

        Introduction

        Note
        The material presented here is also discussed in video lecture 28. (All video lectures are also available here.)

        This is the first of a number of tutorial programs that will finally cover "real" time-dependent problems, not the slightly odd form of time dependence found in step-18 or the DAE model of step-21. In particular, this program introduces the wave equation in a bounded domain. Later, step-24 will consider an example of absorbing boundary conditions, and step-25 a kind of nonlinear wave equation producing solutions called solitons.

        -

        The wave equation in its prototypical form reads as follows: find $u(x,t), x\in\Omega, t\in[0,T]$ that satisfies

        -\begin{eqnarray*}
+<p>The wave equation in its prototypical form reads as follows: find <picture><source srcset=$u(x,t), x\in\Omega, t\in[0,T]$ that satisfies

        +\begin{eqnarray*}
         \frac{\partial^2 u}{\partial t^2}
         -
         \Delta u &=& f
@@ -149,10 +149,10 @@
         \frac{\partial u(x,0)}{\partial t} &=& u_1(x)
         \qquad
         \textrm{in}\ \Omega.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3358.png"/>

        Note that since this is an equation with second-order time derivatives, we need to pose two initial conditions, one for the value and one for the time derivative of the solution.

        -

        Physically, the equation describes the motion of an elastic medium. In 2-d, one can think of how a membrane moves if subjected to a force. The Dirichlet boundary conditions above indicate that the membrane is clamped at the boundary at a height $g(x,t)$ (this height might be moving as well — think of people holding a blanket and shaking it up and down). The first initial condition equals the initial deflection of the membrane, whereas the second one gives its velocity. For example, one could think of pushing the membrane down with a finger and then letting it go at $t=0$ (nonzero deflection but zero initial velocity), or hitting it with a hammer at $t=0$ (zero deflection but nonzero velocity). Both cases would induce motion in the membrane.

        +

        Physically, the equation describes the motion of an elastic medium. In 2-d, one can think of how a membrane moves if subjected to a force. The Dirichlet boundary conditions above indicate that the membrane is clamped at the boundary at a height $g(x,t)$ (this height might be moving as well — think of people holding a blanket and shaking it up and down). The first initial condition equals the initial deflection of the membrane, whereas the second one gives its velocity. For example, one could think of pushing the membrane down with a finger and then letting it go at $t=0$ (nonzero deflection but zero initial velocity), or hitting it with a hammer at $t=0$ (zero deflection but nonzero velocity). Both cases would induce motion in the membrane.

        Time discretization

        Method of lines or Rothe's method?

        There is a long-standing debate in the numerical analysis community over whether a discretization of time dependent equations should involve first discretizing the time variable leading to a stationary PDE at each time step that is then solved using standard finite element techniques (this is called the Rothe method), or whether one should first discretize the spatial variables, leading to a large system of ordinary differential equations that can then be handled by one of the usual ODE solvers (this is called the method of lines).

        @@ -165,12 +165,12 @@

        Rothe's method!

        Given these considerations, here is how we will proceed: let us first define a simple time stepping method for this second order problem, and then in a second step do the spatial discretization, i.e. we will follow Rothe's approach.

        For the first step, let us take a little detour first: in order to discretize a second time derivative, we can either discretize it directly, or we can introduce an additional variable and transform the system into a first order system. In many cases, this turns out to be equivalent, but dealing with first order systems is often simpler. To this end, let us introduce

        -\[
+<picture><source srcset=\[
         v = \frac{\partial u}{\partial t},
-\] +\]" src="form_3360.png"/>

        and call this variable the velocity for obvious reasons. We can then reformulate the original wave equation as follows:

        -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
         \frac{\partial u}{\partial t}
         -
         v
@@ -195,37 +195,37 @@
         v(x,0) &=& u_1(x)
         \qquad
         \textrm{in}\ \Omega.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3361.png"/>

        -

        The advantage of this formulation is that it now only contains first time derivatives for both variables, for which it is simple to write down time stepping schemes. Note that we do not have boundary conditions for $v$ at first. However, we could enforce $v=\frac{\partial
-g}{\partial t}$ on the boundary. It turns out in numerical examples that this is actually necessary: without doing so the solution doesn't look particularly wrong, but the Crank-Nicolson scheme does not conserve energy if one doesn't enforce these boundary conditions.

        -

        With this formulation, let us introduce the following time discretization where a superscript $n$ indicates the number of a time step and $k=t_n-t_{n-1}$ is the length of the present time step:

        -\begin{eqnarray*}
+<p> The advantage of this formulation is that it now only contains first time derivatives for both variables, for which it is simple to write down time stepping schemes. Note that we do not have boundary conditions for <picture><source srcset=$v$ at first. However, we could enforce $v=\frac{\partial
+g}{\partial t}$ on the boundary. It turns out in numerical examples that this is actually necessary: without doing so the solution doesn't look particularly wrong, but the Crank-Nicolson scheme does not conserve energy if one doesn't enforce these boundary conditions.

        +

        With this formulation, let us introduce the following time discretization where a superscript $n$ indicates the number of a time step and $k=t_n-t_{n-1}$ is the length of the present time step:

        +\begin{eqnarray*}
   \frac{u^n - u^{n-1}}{k}
   - \left[\theta v^n + (1-\theta) v^{n-1}\right] &=& 0,
   \\
   \frac{v^n - v^{n-1}}{k}
   - \Delta\left[\theta u^n + (1-\theta) u^{n-1}\right]
   &=& \theta f^n + (1-\theta) f^{n-1}.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3364.png"/>

        -

        Note how we introduced a parameter $\theta$ here. If we chose $\theta=0$, for example, the first equation would reduce to $\frac{u^n - u^{n-1}}{k}  - v^{n-1} = 0$, which is well-known as the forward or explicit Euler method. On the other hand, if we set $\theta=1$, then we would get $\frac{u^n - u^{n-1}}{k}  - v^n = 0$, which corresponds to the backward or implicit Euler method. Both these methods are first order accurate methods. They are simple to implement, but they are not really very accurate.

        -

        The third case would be to choose $\theta=\frac 12$. The first of the equations above would then read $\frac{u^n - u^{n-1}}{k}
-- \frac 12 \left[v^n + v^{n-1}\right] = 0$. This method is known as the Crank-Nicolson method and has the advantage that it is second order accurate. In addition, it has the nice property that it preserves the energy in the solution (physically, the energy is the sum of the kinetic energy of the particles in the membrane plus the potential energy present due to the fact that it is locally stretched; this quantity is a conserved one in the continuous equation, but most time stepping schemes do not conserve it after time discretization). Since $v^n$ also appears in the equation for $u^n$, the Crank-Nicolson scheme is also implicit.

        -

        In the program, we will leave $\theta$ as a parameter, so that it will be easy to play with it. The results section will show some numerical evidence comparing the different schemes.

        -

        The equations above (called the semidiscretized equations because we have only discretized the time, but not space), can be simplified a bit by eliminating $v^n$ from the first equation and rearranging terms. We then get

        -\begin{eqnarray*}
+<p> Note how we introduced a parameter <picture><source srcset=$\theta$ here. If we chose $\theta=0$, for example, the first equation would reduce to $\frac{u^n - u^{n-1}}{k}  - v^{n-1} = 0$, which is well-known as the forward or explicit Euler method. On the other hand, if we set $\theta=1$, then we would get $\frac{u^n - u^{n-1}}{k}  - v^n = 0$, which corresponds to the backward or implicit Euler method. Both these methods are first order accurate methods. They are simple to implement, but they are not really very accurate.

        +

        The third case would be to choose $\theta=\frac 12$. The first of the equations above would then read $\frac{u^n - u^{n-1}}{k}
+- \frac 12 \left[v^n + v^{n-1}\right] = 0$. This method is known as the Crank-Nicolson method and has the advantage that it is second order accurate. In addition, it has the nice property that it preserves the energy in the solution (physically, the energy is the sum of the kinetic energy of the particles in the membrane plus the potential energy present due to the fact that it is locally stretched; this quantity is a conserved one in the continuous equation, but most time stepping schemes do not conserve it after time discretization). Since $v^n$ also appears in the equation for $u^n$, the Crank-Nicolson scheme is also implicit.

        +

        In the program, we will leave $\theta$ as a parameter, so that it will be easy to play with it. The results section will show some numerical evidence comparing the different schemes.

        +

        The equations above (called the semidiscretized equations because we have only discretized the time, but not space), can be simplified a bit by eliminating $v^n$ from the first equation and rearranging terms. We then get

        +\begin{eqnarray*}
   \left[ 1-k^2\theta^2\Delta \right] u^n &=&
          \left[ 1+k^2\theta(1-\theta)\Delta\right] u^{n-1} + k v^{n-1}
          + k^2\theta\left[\theta f^n + (1-\theta) f^{n-1}\right],\\
    v^n &=& v^{n-1} + k\Delta\left[ \theta u^n + (1-\theta) u^{n-1}\right]
    + k\left[\theta f^n + (1-\theta) f^{n-1}\right].
-\end{eqnarray*} +\end{eqnarray*}" src="form_3372.png"/>

        -

        In this form, we see that if we are given the solution $u^{n-1},v^{n-1}$ of the previous timestep, that we can then solve for the variables $u^n,v^n$ separately, i.e. one at a time. This is convenient. In addition, we recognize that the operator in the first equation is positive definite, and the second equation looks particularly simple.

        +

        In this form, we see that if we are given the solution $u^{n-1},v^{n-1}$ of the previous timestep, that we can then solve for the variables $u^n,v^n$ separately, i.e. one at a time. This is convenient. In addition, we recognize that the operator in the first equation is positive definite, and the second equation looks particularly simple.

        Space discretization

        -

        We have now derived equations that relate the approximate (semi-discrete) solution $u^n(x)$ and its time derivative $v^n(x)$ at time $t_n$ with the solutions $u^{n-1}(x),v^{n-1}(x)$ of the previous time step at $t_{n-1}$. The next step is to also discretize the spatial variable using the usual finite element methodology. To this end, we multiply each equation with a test function, integrate over the entire domain, and integrate by parts where necessary. This leads to

        -\begin{eqnarray*}
+<p>We have now derived equations that relate the approximate (semi-discrete) solution <picture><source srcset=$u^n(x)$ and its time derivative $v^n(x)$ at time $t_n$ with the solutions $u^{n-1}(x),v^{n-1}(x)$ of the previous time step at $t_{n-1}$. The next step is to also discretize the spatial variable using the usual finite element methodology. To this end, we multiply each equation with a test function, integrate over the entire domain, and integrate by parts where necessary. This leads to

        +\begin{eqnarray*}
   (u^n,\varphi) + k^2\theta^2(\nabla u^n,\nabla \varphi) &=&
   (u^{n-1},\varphi) - k^2\theta(1-\theta)(\nabla u^{n-1},\nabla \varphi)
   +
@@ -245,15 +245,15 @@
   \left[
   \theta (f^n,\varphi) + (1-\theta) (f^{n-1},\varphi)
   \right].
-\end{eqnarray*} +\end{eqnarray*}" src="form_3378.png"/>

        -

        It is then customary to approximate $u^n(x) \approx u^n_h(x) = \sum_i
-U_i^n\phi_i^n(x)$, where $\phi_i^n(x)$ are the shape functions used for the discretization of the $n$-th time step and $U_i^n$ are the unknown nodal values of the solution. Similarly, $v^n(x) \approx
-v^n_h(x) = \sum_i V_i^n\phi_i^n(x)$. Finally, we have the solutions of the previous time step, $u^{n-1}(x) \approx u^{n-1}_h(x) = \sum_i
-U_i^{n-1}\phi_i^{n-1}(x)$ and $v^{n-1}(x) \approx v^{n-1}_h(x) = \sum_i
-V_i^{n-1}\phi_i^{n-1}(x)$. Note that since the solution of the previous time step has already been computed by the time we get to time step $n$, $U^{n-1},V^{n-1}$ are known. Furthermore, note that the solutions of the previous step may have been computed on a different mesh, so we have to use shape functions $\phi^{n-1}_i(x)$.

        +

        It is then customary to approximate $u^n(x) \approx u^n_h(x) = \sum_i
+U_i^n\phi_i^n(x)$, where $\phi_i^n(x)$ are the shape functions used for the discretization of the $n$-th time step and $U_i^n$ are the unknown nodal values of the solution. Similarly, $v^n(x) \approx
+v^n_h(x) = \sum_i V_i^n\phi_i^n(x)$. Finally, we have the solutions of the previous time step, $u^{n-1}(x) \approx u^{n-1}_h(x) = \sum_i
+U_i^{n-1}\phi_i^{n-1}(x)$ and $v^{n-1}(x) \approx v^{n-1}_h(x) = \sum_i
+V_i^{n-1}\phi_i^{n-1}(x)$. Note that since the solution of the previous time step has already been computed by the time we get to time step $n$, $U^{n-1},V^{n-1}$ are known. Furthermore, note that the solutions of the previous step may have been computed on a different mesh, so we have to use shape functions $\phi^{n-1}_i(x)$.

        If we plug these expansions into above equations and test with the test functions from the present mesh, we get the following linear system:

        -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   (M^n + k^2\theta^2 A^n)U^n &=&
   M^{n,n-1}U^{n-1} - k^2\theta(1-\theta) A^{n,n-1}U^{n-1}
   +
@@ -273,10 +273,10 @@
   \left[
   \theta F^n + (1-\theta) F^{n-1}
   \right],
-\end{eqnarray*} +\end{eqnarray*}" src="form_3387.png"/>

        where

        -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
         M^n_{ij} &=& (\phi_i^n, \phi_j^n),
         \\
         A^n_{ij} &=& (\nabla\phi_i^n, \nabla\phi_j^n),
@@ -288,14 +288,14 @@
         F^n_{i} &=& (f^n,\phi_i^n),
         \\
         F^{n-1}_{i} &=& (f^{n-1},\phi_i^n).
-\end{eqnarray*} +\end{eqnarray*}" src="form_3388.png"/>

        If we solve these two equations, we can move the solution one step forward and go on to the next time step.

        -

        It is worth noting that if we choose the same mesh on each time step (as we will in fact do in the program below), then we have the same shape functions on time step $n$ and $n-1$, i.e. $\phi^n_i=\phi_i^{n-1}=\phi_i$. Consequently, we get $M^n=M^{n,n-1}=M$ and $A^n=A^{n,n-1}=A$. On the other hand, if we had used different shape functions, then we would have to compute integrals that contain shape functions defined on two meshes. This is a somewhat messy process that we omit here, but that is treated in some detail in step-28.

        +

        It is worth noting that if we choose the same mesh on each time step (as we will in fact do in the program below), then we have the same shape functions on time step $n$ and $n-1$, i.e. $\phi^n_i=\phi_i^{n-1}=\phi_i$. Consequently, we get $M^n=M^{n,n-1}=M$ and $A^n=A^{n,n-1}=A$. On the other hand, if we had used different shape functions, then we would have to compute integrals that contain shape functions defined on two meshes. This is a somewhat messy process that we omit here, but that is treated in some detail in step-28.

        Under these conditions (i.e. a mesh that doesn't change), one can optimize the solution procedure a bit by basically eliminating the solution of the second linear system. We will discuss this in the introduction of the step-25 program.

        Energy conservation

        -

        One way to compare the quality of a time stepping scheme is to see whether the numerical approximation preserves conservation properties of the continuous equation. For the wave equation, the natural quantity to look at is the energy. By multiplying the wave equation by $u_t$, integrating over $\Omega$, and integrating by parts where necessary, we find that

        -\[
+<p>One way to compare the quality of a time stepping scheme is to see whether the numerical approximation preserves conservation properties of the continuous equation. For the wave equation, the natural quantity to look at is the energy. By multiplying the wave equation by <picture><source srcset=$u_t$, integrating over $\Omega$, and integrating by parts where necessary, we find that

        +\[
         \frac{d}{d t}
         \left[\frac 12 \int_\Omega \left(\frac{\partial u}{\partial
         t}\right)^2 + (\nabla u)^2 \; dx\right]
@@ -304,34 +304,34 @@
         +
         \int_{\partial\Omega} n\cdot\nabla u
         \frac{\partial g}{\partial t} \; dx.
-\] +\]" src="form_3394.png"/>

        By consequence, in absence of body forces and constant boundary values, we get that

        -\[
+<picture><source srcset=\[
         E(t) = \frac 12 \int_\Omega \left(\frac{\partial u}{\partial
         t}\right)^2 + (\nabla u)^2 \; dx
-\] +\]" src="form_3395.png"/>

        -

        is a conserved quantity, i.e. one that doesn't change with time. We will compute this quantity after each time step. It is straightforward to see that if we replace $u$ by its finite element approximation, and $\frac{\partial u}{\partial t}$ by the finite element approximation of the velocity $v$, then

        -\[
+<p> is a conserved quantity, i.e. one that doesn't change with time. We will compute this quantity after each time step. It is straightforward to see that if we replace <picture><source srcset=$u$ by its finite element approximation, and $\frac{\partial u}{\partial t}$ by the finite element approximation of the velocity $v$, then

        +\[
         E(t_n) = \frac 12 \left<V^n, M^n V^n\right>
         +
         \frac 12 \left<U^n, A^n U^n\right>.
-\] +\]" src="form_3397.png"/>

        As we will see in the results section, the Crank-Nicolson scheme does indeed conserve the energy, whereas neither the forward nor the backward Euler scheme do.

        Who are Courant, Friedrichs, and Lewy?

        One of the reasons why the wave equation is not easy to solve numerically is that explicit time discretizations are only stable if the time step is small enough. In particular, it is coupled to the spatial mesh width $h$. For the lowest order discretization we use here, the relationship reads

        -\[
+<picture><source srcset=\[
         k\le \frac hc
-\] +\]" src="form_3398.png"/>

        -

        where $c$ is the wave speed, which in our formulation of the wave equation has been normalized to one. Consequently, unless we use the implicit schemes with $\theta>0$, our solutions will not be numerically stable if we violate this restriction. Implicit schemes do not have this restriction for stability, but they become inaccurate if the time step is too large.

        +

        where $c$ is the wave speed, which in our formulation of the wave equation has been normalized to one. Consequently, unless we use the implicit schemes with $\theta>0$, our solutions will not be numerically stable if we violate this restriction. Implicit schemes do not have this restriction for stability, but they become inaccurate if the time step is too large.

        This condition was first recognized by Courant, Friedrichs, and Lewy — in 1928, long before computers became available for numerical computations! (This result appeared in the German language article R. Courant, K. Friedrichs and H. Lewy: Über die partiellen Differenzengleichungen der mathematischen Physik, Mathematische Annalen, vol. 100, no. 1, pages 32-74, 1928.) This condition on the time step is most frequently just referred to as the CFL condition. Intuitively, the CFL condition says that the time step must not be larger than the time it takes a wave to cross a single cell.

        -

        In the program, we will refine the square $[-1,1]^2$ seven times uniformly, giving a mesh size of $h=\frac 1{64}$, which is what we set the time step to. The fact that we set the time step and mesh size individually in two different places is error prone: it is too easy to refine the mesh once more but forget to also adjust the time step. step-24 shows a better way how to keep these things in sync.

        +

        In the program, we will refine the square $[-1,1]^2$ seven times uniformly, giving a mesh size of $h=\frac 1{64}$, which is what we set the time step to. The fact that we set the time step and mesh size individually in two different places is error prone: it is too easy to refine the mesh once more but forget to also adjust the time step. step-24 shows a better way how to keep these things in sync.

        The test case

        -

        Although the program has all the hooks to deal with nonzero initial and boundary conditions and body forces, we take a simple case where the domain is a square $[-1,1]^2$ and

        -\begin{eqnarray*}
+<p>Although the program has all the hooks to deal with nonzero initial and boundary conditions and body forces, we take a simple case where the domain is a square <picture><source srcset=$[-1,1]^2$ and

        +\begin{eqnarray*}
         f &=& 0,
         \\
         u_0 &=& 0,
@@ -345,7 +345,7 @@
         &&\text{otherwise}
/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_24.html differs (JavaScript source, Unicode text, UTF-8 text, with very long lines (1535))
--- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_24.html	2023-11-25 15:26:01.569855915 +0100
+++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_24.html	2023-11-25 15:26:01.569855915 +0100
@@ -129,101 +129,101 @@
 <p><a class=

        The problem

        The temperature at a given location, neglecting thermal diffusion, can be stated as

        -\[
+<picture><source srcset=\[
 \rho C_p \frac{\partial}{\partial t}T(t,\mathbf r) = H(t,\mathbf r)
-\] +\]" src="form_3429.png"/>

        -

        Here $\rho (\mathbf r) $ is the density; $C_p (\mathbf r) $ is the specific heat; $\frac{\partial T}{\partial t}(t,\mathbf r)$ is the temperature rise due to the delivered microwave energy; and $H(t,\mathbf r)$ is the heating function defined as the thermal energy per time and volume transformed from deposited microwave energy.

        -

        Let us assume that tissues have heterogeneous dielectric properties but homogeneous acoustic properties. The basic acoustic generation equation in an acoustically homogeneous medium can be described as follows: if $u$ is the vector-valued displacement, then tissue certainly reacts to changes in pressure by acceleration:

        -\[
+<p>Here <picture><source srcset=$\rho (\mathbf r) $ is the density; $C_p (\mathbf r) $ is the specific heat; $\frac{\partial T}{\partial t}(t,\mathbf r)$ is the temperature rise due to the delivered microwave energy; and $H(t,\mathbf r)$ is the heating function defined as the thermal energy per time and volume transformed from deposited microwave energy.

        +

        Let us assume that tissues have heterogeneous dielectric properties but homogeneous acoustic properties. The basic acoustic generation equation in an acoustically homogeneous medium can be described as follows: if $u$ is the vector-valued displacement, then tissue certainly reacts to changes in pressure by acceleration:

        +\[
 \rho \frac{\partial^2}{\partial t^2}u(t,\mathbf r) =
 -\nabla p(t,\mathbf r).
-\] +\]" src="form_3434.png"/>

        Furthermore, it contracts due to excess pressure and expands based on changes in temperature:

        -\[
+<picture><source srcset=\[
 \nabla \cdot u(t,\mathbf r) = -\frac{p(t,\mathbf r)}{\rho c_0^2}+\beta T(t,\mathbf r) .
-\] +\]" src="form_3435.png"/>

        Here, $\beta$ is a thermoexpansion coefficient.

        -

        Let us now make the assumption that heating only happens on a time scale much shorter than wave propagation through tissue (i.e. the temporal length of the microwave pulse that heats the tissue is much shorter than the time it takes a wave to cross the domain). In that case, the heating rate $H(t,\mathbf r)$ can be written as $H(t,\mathbf r) = a(\mathbf
-r)\delta(t)$ (where $a(\mathbf r)$ is a map of absorption strengths for microwave energy and $\delta(t)$ is the Dirac delta function), which together with the first equation above will yield an instantaneous jump in the temperature $T(\mathbf r)$ at time $t=0$. Using this assumption, and taking all equations together, we can rewrite and combine the above as follows:

        -\[
+<p>Let us now make the assumption that heating only happens on a time scale much shorter than wave propagation through tissue (i.e. the temporal length of the microwave pulse that heats the tissue is much shorter than the time it takes a wave to cross the domain). In that case, the heating rate <picture><source srcset=$H(t,\mathbf r)$ can be written as $H(t,\mathbf r) = a(\mathbf
+r)\delta(t)$ (where $a(\mathbf r)$ is a map of absorption strengths for microwave energy and $\delta(t)$ is the Dirac delta function), which together with the first equation above will yield an instantaneous jump in the temperature $T(\mathbf r)$ at time $t=0$. Using this assumption, and taking all equations together, we can rewrite and combine the above as follows:

        +\[
 \Delta p-\frac{1}{c_0^2} \frac{\partial^2 p}{\partial t^2} = \lambda
 a(\mathbf r)\frac{d\delta(t)}{dt}
-\] +\]" src="form_3440.png"/>

        -

        where $\lambda = - \frac{\beta}{C_p}$.

        +

        where $\lambda = - \frac{\beta}{C_p}$.

        This somewhat strange equation with the derivative of a Dirac delta function on the right hand side can be rewritten as an initial value problem as follows:

        -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
 \Delta \bar{p}- \frac{1}{c_0^2} \frac{\partial^2 \bar{p}}{\partial t^2} & = &
 0 \\
 \bar{p}(0,\mathbf r) &=& c_0^2 \lambda a(\mathbf r) = b(\mathbf r)  \\
 \frac{\partial\bar{p}(0,\mathbf r)}{\partial t} &=& 0.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3442.png"/>

        (A derivation of this transformation into an initial value problem is given at the end of this introduction as an appendix.)

        -

        In the inverse problem, it is the initial condition $b(\mathbf r) = c_0^2 \lambda a(\mathbf r)$ that one would like to recover, since it is a map of absorption strengths for microwave energy, and therefore presumably an indicator to discern healthy from diseased tissue.

        +

        In the inverse problem, it is the initial condition $b(\mathbf r) = c_0^2 \lambda a(\mathbf r)$ that one would like to recover, since it is a map of absorption strengths for microwave energy, and therefore presumably an indicator to discern healthy from diseased tissue.

        In real application, the thermoacoustic source is very small as compared to the medium. The propagation path of the thermoacoustic waves can then be approximated as from the source to the infinity. Furthermore, detectors are only a limited distance from the source. One only needs to evaluate the values when the thermoacoustic waves pass through the detectors, although they do continue beyond. This is therefore a problem where we are only interested in a small part of an infinite medium, and we do not want waves generated somewhere to be reflected at the boundary of the domain which we consider interesting. Rather, we would like to simulate only that part of the wave field that is contained inside the domain of interest, and waves that hit the boundary of that domain to simply pass undisturbed through the boundary. In other words, we would like the boundary to absorb any waves that hit it.

        In general, this is a hard problem: Good absorbing boundary conditions are nonlinear and/or numerically very expensive. We therefore opt for a simple first order approximation to absorbing boundary conditions that reads

        -\[
+<picture><source srcset=\[
 \frac{\partial\bar{p}}{\partial\mathbf n} =
 -\frac{1}{c_0} \frac{\partial\bar{p}}{\partial t}
-\] +\]" src="form_3444.png"/>

        -

        Here, $\frac{\partial\bar{p}}{\partial\mathbf n}$ is the normal derivative at the boundary. It should be noted that this is not a particularly good boundary condition, but it is one of the very few that are reasonably simple to implement.

        +

        Here, $\frac{\partial\bar{p}}{\partial\mathbf n}$ is the normal derivative at the boundary. It should be noted that this is not a particularly good boundary condition, but it is one of the very few that are reasonably simple to implement.

        Weak form and discretization

        As in step-23, one first introduces a second variable, which is defined as the derivative of the pressure potential:

        -\[
+<picture><source srcset=\[
 v = \frac{\partial\bar{p}}{\partial t}
-\] +\]" src="form_3446.png"/>

        With the second variable, one then transforms the forward problem into two separate equations:

        -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
 \bar{p}_{t} - v & = & 0 \\
 \Delta\bar{p} - \frac{1}{c_0^2}\,v_{t} & = & f
-\end{eqnarray*} +\end{eqnarray*}" src="form_3447.png"/>

        with initial conditions:

        -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
 \bar{p}(0,\mathbf r) & = & b(r) \\
 v(0,\mathbf r)=\bar{p}_t(0,\mathbf r) & = & 0.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3448.png"/>

        -

        Note that we have introduced a right hand side $f(t,\mathbf r)$ here to show how to derive these formulas in the general case, although in the application to the thermoacoustic problem $f=0$.

        -

        The semi-discretized, weak version of this model, using the general $\theta$ scheme introduced in step-23 is then:

        -\begin{eqnarray*}
+<p> Note that we have introduced a right hand side <picture><source srcset=$f(t,\mathbf r)$ here to show how to derive these formulas in the general case, although in the application to the thermoacoustic problem $f=0$.

        +

        The semi-discretized, weak version of this model, using the general $\theta$ scheme introduced in step-23 is then:

        +\begin{eqnarray*}
 \left(\frac{\bar{p}^n-\bar{p}^{n-1}}{k},\phi\right)_\Omega-
 \left(\theta v^{n}+(1-\theta)v^{n-1},\phi\right)_\Omega & = & 0   \\
 -\left(\nabla((\theta\bar{p}^n+(1-\theta)\bar{p}^{n-1})),\nabla\phi\right)_\Omega-
 \frac{1}{c_0}\left(\frac{\bar{p}^n-\bar{p}^{n-1}}{k},\phi\right)_{\partial\Omega} -
 \frac{1}{c_0^2}\left(\frac{v^n-v^{n-1}}{k},\phi\right)_\Omega & =
 & \left(\theta f^{n}+(1-\theta)f^{n-1}, \phi\right)_\Omega,
-\end{eqnarray*} +\end{eqnarray*}" src="form_3450.png"/>

        -

        where $\phi$ is an arbitrary test function, and where we have used the absorbing boundary condition to integrate by parts: absorbing boundary conditions are incorporated into the weak form by using

        -\[
+<p> where <picture><source srcset=$\phi$ is an arbitrary test function, and where we have used the absorbing boundary condition to integrate by parts: absorbing boundary conditions are incorporated into the weak form by using

        +\[
 \int_\Omega\varphi \, \Delta p\; dx =
 -\int_\Omega\nabla \varphi \cdot \nabla p dx +
 \int_{\partial\Omega}\varphi \frac{\partial p}{\partial {\mathbf n}}ds.
-\] +\]" src="form_3451.png"/>

        From this we obtain the discrete model by introducing a finite number of shape functions, and get

        -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
 M\bar{p}^{n}-k \theta M v^n & = & M\bar{p}^{n-1}+k (1-\theta)Mv^{n-1},\\
 
 (-c_0^2k \theta A-c_0 B)\bar{p}^n-Mv^{n} & = &
 (c_0^2k(1-\theta)A-c_0B)\bar{p}^{n-1}-Mv^{n-1}+c_0^2k(\theta F^{n}+(1-\theta)F^{n-1}).
-\end{eqnarray*} +\end{eqnarray*}" src="form_3452.png"/>

        -

        The matrices $M$ and $A$ are here as in step-23, and the boundary mass matrix

        -\[
+<p> The matrices <picture><source srcset=$M$ and $A$ are here as in step-23, and the boundary mass matrix

        +\[
         B_{ij} = \left(\varphi_i,\varphi_j\right)_{\partial\Omega}
-\] +\]" src="form_3453.png"/>

        results from the use of absorbing boundary conditions.

        Above two equations can be rewritten in a matrix form with the pressure and its derivative as an unknown vector:

        -\[
+<picture><source srcset=\[
 \left(\begin{array}{cc}
  M         &       -k\theta M \\
 c_0^2\,k\,\theta\,A+c_0\,B  &  M   \\
@@ -236,10 +236,10 @@
  G_1  \\
  G_2 -(\theta F^{n}+(1-\theta)F ^{n-1})c_{0}^{2}k \\
                 \end{array}\right)
-\] +\]" src="form_3454.png"/>

        where

        -\[
+<picture><source srcset=\[
 \left(\begin{array}{c}
 G_1 \\
 G_2 \\
@@ -248,115 +248,115 @@
  M\bar{p}^{n-1}+k(1-\theta)Mv^{n-1}\\
  (-c_{0}^{2}k (1-\theta)A+c_0 B)\bar{p}^{n-1} +Mv^{n-1}
                 \end{array}\right)
-\] +\]" src="form_3455.png"/>

        By simple transformations, one then obtains two equations for the pressure potential and its derivative, just as in the previous tutorial program:

        -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
 (M+(k\,\theta\,c_{0})^{2}A+c_0k\theta B)\bar{p}^{n} & = &
 G_{1}+(k\, \theta)G_{2}-(c_0k)^2\theta (\theta F^{n}+(1-\theta)F^{n-1}) \\
 Mv^n & = & -(c_0^2\,k\, \theta\, A+c_0B)\bar{p}^{n}+ G_2 -
 c_0^2k(\theta F^{n}+(1-\theta)F^{n-1})
-\end{eqnarray*} +\end{eqnarray*}" src="form_3456.png"/>

        What the program does

        Compared to step-23, this programs adds the treatment of a simple absorbing boundary conditions. In addition, it deals with data obtained from actual experimental measurements. To this end, we need to evaluate the solution at points at which the experiment also evaluates a real pressure field. We will see how to do that using the VectorTools::point_value function further down below.

        Appendix: PDEs with Dirac delta functions as right hand side and their transformation to an initial value problem

        In the derivation of the initial value problem for the wave equation, we initially found that the equation had the derivative of a Dirac delta function as a right hand side:

        -\[
+<picture><source srcset=\[
 \Delta p-\frac{1}{c_0^2} \frac{\partial^2 p}{\partial t^2} = \lambda
 a(\mathbf r)\frac{d\delta(t)}{dt}.
-\] +\]" src="form_3457.png"/>

        -

        In order to see how to transform this single equation into the usual statement of a PDE with initial conditions, let us make the assumption that the physically quite reasonable medium is at rest initially, i.e. $p(t,\mathbf
-r)=\frac{\partial p(t,\mathbf r)}{\partial t}=0$ for $t<0$. Next, let us form the indefinite integral with respect to time of both sides:

        -\[
+<p> In order to see how to transform this single equation into the usual statement of a PDE with initial conditions, let us make the assumption that the physically quite reasonable medium is at rest initially, i.e. <picture><source srcset=$p(t,\mathbf
+r)=\frac{\partial p(t,\mathbf r)}{\partial t}=0$ for $t<0$. Next, let us form the indefinite integral with respect to time of both sides:

        +\[
 \int^t \Delta p\; dt -\int^t \frac{1}{c_0^2} \frac{\partial^2 p}{\partial t^2}
 \; dt
 =
 \int^t \lambda a(\mathbf r)\frac{d\delta(t)}{dt} \;dt.
-\] +\]" src="form_3460.png"/>

        This immediately leads to the statement

        -\[
+<picture><source srcset=\[
 P(t,\mathbf r) - \frac{1}{c_0^2} \frac{\partial p}{\partial t}
/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_25.html differs (JavaScript source, Unicode text, UTF-8 text, with very long lines (2066))
--- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_25.html	2023-11-25 15:26:01.596522039 +0100
+++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_25.html	2023-11-25 15:26:01.596522039 +0100
@@ -151,14 +151,14 @@
 \end{eqnarray*}

        Discretization of the equations in time

        -

        Now, we can discretize the split formulation in time using the $\theta$-method, which has a stencil of only two time steps. By choosing a $\theta\in [0,1]$, the latter discretization allows us to choose from a continuum of schemes. In particular, if we pick $\theta=0$ or $\theta=1$, we obtain the first-order accurate explicit or implicit Euler method, respectively. Another important choice is $\theta=\frac{1}{2}$, which gives the second-order accurate Crank-Nicolson scheme. Henceforth, a superscript $n$ denotes the values of the variables at the $n^{\mathrm{th}}$ time step, i.e. at $t=t_n \dealcoloneq n k$, where $k$ is the (fixed) time step size. Thus, the split formulation of the time-discretized sine-Gordon equation becomes

        +

        Now, we can discretize the split formulation in time using the $\theta$-method, which has a stencil of only two time steps. By choosing a $\theta\in [0,1]$, the latter discretization allows us to choose from a continuum of schemes. In particular, if we pick $\theta=0$ or $\theta=1$, we obtain the first-order accurate explicit or implicit Euler method, respectively. Another important choice is $\theta=\frac{1}{2}$, which gives the second-order accurate Crank-Nicolson scheme. Henceforth, a superscript $n$ denotes the values of the variables at the $n^{\mathrm{th}}$ time step, i.e. at $t=t_n \dealcoloneq n k$, where $k$ is the (fixed) time step size. Thus, the split formulation of the time-discretized sine-Gordon equation becomes

        \begin{eqnarray*}
   \frac{u^n - u^{n-1}}{k} - \left[\theta v^n + (1-\theta) v^{n-1}\right] &=& 0,\\
   \frac{v^n - v^{n-1}}{k} - \Delta\left[\theta u^n + (1-\theta) u^{n-1}\right]
   &=& -\sin\left[\theta u^n + (1-\theta) u^{n-1}\right].
 \end{eqnarray*}

        -

        We can simplify the latter via a bit of algebra. Eliminating $v^n$ from the first equation and rearranging, we obtain

        +

        We can simplify the latter via a bit of algebra. Eliminating $v^n$ from the first equation and rearranging, we obtain

        \begin{eqnarray*}
   \left[ 1-k^2\theta^2\Delta \right] u^n &=&
          \left[ 1+k^2\theta(1-\theta)\Delta\right] u^{n-1} + k v^{n-1}
@@ -167,8 +167,8 @@
          - k\sin\left[ \theta u^n + (1-\theta) u^{n-1} \right].
 \end{eqnarray*}

        -

        It may seem as though we can just proceed to discretize the equations in space at this point. While this is true for the second equation (which is linear in $v^n$), this would not work for all $\theta$ since the first equation above is nonlinear. Therefore, a nonlinear solver must be implemented, then the equations can be discretized in space and solved.

        -

        To this end, we can use Newton's method. Given the nonlinear equation $F(u^n) = 0$, we produce successive approximations to $u^n$ as follows:

        +

        It may seem as though we can just proceed to discretize the equations in space at this point. While this is true for the second equation (which is linear in $v^n$), this would not work for all $\theta$ since the first equation above is nonlinear. Therefore, a nonlinear solver must be implemented, then the equations can be discretized in space and solved.

        +

        To this end, we can use Newton's method. Given the nonlinear equation $F(u^n) = 0$, we produce successive approximations to $u^n$ as follows:

        \begin{eqnarray*}
   \mbox{ Find } \delta u^n_l \mbox{ s.t. } F'(u^n_l)\delta u^n_l = -F(u^n_l)
   \mbox{, set }  u^n_{l+1} = u^n_l + \delta u^n_l.
@@ -185,7 +185,7 @@
 </p>
 <p> Notice that while <picture><source srcset=$F(u^n_l)$ is a function, $F'(u^n_l)$ is an operator.

        Weak formulation of the time-discretized equations

        -

        With hindsight, we choose both the solution and the test space to be $H^1(\Omega)$. Hence, multiplying by a test function $\varphi$ and integrating, we obtain the following variational (or weak) formulation of the split formulation (including the nonlinear solver for the first equation) at each time step:

        +

        With hindsight, we choose both the solution and the test space to be $H^1(\Omega)$. Hence, multiplying by a test function $\varphi$ and integrating, we obtain the following variational (or weak) formulation of the split formulation (including the nonlinear solver for the first equation) at each time step:

        \begin{eqnarray*}
   &\mbox{ Find}& \delta u^n_l \in H^1(\Omega) \mbox{ s.t. }
   \left( F'(u^n_l)\delta u^n_l, \varphi \right)_{\Omega}
@@ -199,10 +199,10 @@
          \varphi \right)_{\Omega} \;\forall\varphi\in H^1(\Omega).
 \end{eqnarray*}

        -

        Note that the we have used integration by parts and the zero Neumann boundary conditions on all terms involving the Laplacian operator. Moreover, $F(\cdot)$ and $F'(\cdot)$ are as defined above, and $(\cdot,\cdot)_{\Omega}$ denotes the usual $L^2$ inner product over the domain $\Omega$, i.e. $(f,g)_{\Omega} = \int_\Omega fg
+<p> Note that the we have used integration by parts and the zero Neumann boundary conditions on all terms involving the Laplacian operator. Moreover, <picture><source srcset=$F(\cdot)$ and $F'(\cdot)$ are as defined above, and $(\cdot,\cdot)_{\Omega}$ denotes the usual $L^2$ inner product over the domain $\Omega$, i.e. $(f,g)_{\Omega} = \int_\Omega fg
 \,\mathrm{d}x$. Finally, notice that the first equation is, in fact, the definition of an iterative procedure, so it is solved multiple times during each time step until a stopping criterion is met.

        Discretization of the weak formulation in space

        -

        Using the Finite Element Method, we discretize the variational formulation in space. To this end, let $V_h$ be a finite-dimensional $H^1(\Omega)$-conforming finite element space ( $\mathrm{dim}\, V_h = N
+<p>Using the Finite Element Method, we discretize the variational formulation in space. To this end, let <picture><source srcset=$V_h$ be a finite-dimensional $H^1(\Omega)$-conforming finite element space ( $\mathrm{dim}\, V_h = N
 < \infty$) with nodal basis $\{\varphi_1,\ldots,\varphi_N\}$. Now, we can expand all functions in the weak formulation (see above) in terms of the nodal basis. Henceforth, we shall denote by a capital letter the vector of coefficients (in the nodal basis) of a function denoted by the same letter in lower case; e.g., $u^n = \sum_{i=1}^N
 U^n_i \varphi_i$ where $U^n \in {R}^N$ and $u^n \in
 H^1(\Omega)$. Thus, the finite-dimensional version of the variational formulation requires that we solve the following matrix equations at each time step:

        @@ -221,9 +221,9 @@ + k^2\theta^2N(u^n_l,u^{n-1}) \end{eqnarray*}" src="form_3524.png"/>

        -

        Again, note that the first matrix equation above is, in fact, the definition of an iterative procedure, so it is solved multiple times until a stopping criterion is met. Moreover, $M$ is the mass matrix, i.e. $M_{ij} = \left( \varphi_i,\varphi_j \right)_{\Omega}$, $A$ is the Laplace matrix, i.e. $A_{ij} = \left( \nabla \varphi_i, \nabla
+<p> Again, note that the first matrix equation above is, in fact, the definition of an iterative procedure, so it is solved multiple times until a stopping criterion is met. Moreover, <picture><source srcset=$M$ is the mass matrix, i.e. $M_{ij} = \left( \varphi_i,\varphi_j \right)_{\Omega}$, $A$ is the Laplace matrix, i.e. $A_{ij} = \left( \nabla \varphi_i, \nabla
 \varphi_j \right)_{\Omega}$, $S$ is the nonlinear term in the equation that defines our auxiliary velocity variable, i.e. $S_j(f,g) = \left(
-  \sin\left[ \theta f + (1-\theta) g\right], \varphi_j \right)_{\Omega}$, and $N$ is the nonlinear term in the Jacobian matrix of $F(\cdot)$, i.e. $N_{ij}(f,g) = \left( \cos\left[ \theta f + (1-\theta) g\right]\varphi_i,
+  \sin\left[ \theta f + (1-\theta) g\right], \varphi_j \right)_{\Omega}$, and $N$ is the nonlinear term in the Jacobian matrix of $F(\cdot)$, i.e. $N_{ij}(f,g) = \left( \cos\left[ \theta f + (1-\theta) g\right]\varphi_i,
   \varphi_j \right)_{\Omega}$.

        What solvers can we use for the first equation? Let's look at the matrix we have to invert:

        \[
@@ -233,12 +233,12 @@
   + k^2 \theta^2 \int_\Omega \nabla\varphi_i\nabla\varphi_j \; dx,
 \]

        -

        for some $\alpha$ that depends on the present and previous solution. First, note that the matrix is symmetric. In addition, if the time step $k$ is small enough, i.e. if $k\theta<1$, then the matrix is also going to be positive definite. In the program below, this will always be the case, so we will use the Conjugate Gradient method together with the SSOR method as preconditioner. We should keep in mind, however, that this will fail if we happen to use a bigger time step. Fortunately, in that case the solver will just throw an exception indicating a failure to converge, rather than silently producing a wrong result. If that happens, then we can simply replace the CG method by something that can handle indefinite symmetric systems. The GMRES solver is typically the standard method for all "bad" linear systems, but it is also a slow one. Possibly better would be a solver that utilizes the symmetry, such as, for example, SymmLQ, which is also implemented in deal.II.

        -

        This program uses a clever optimization over step-23 and step-24: If you read the above formulas closely, it becomes clear that the velocity $V$ only ever appears in products with the mass matrix. In step-23 and step-24, we were, therefore, a bit wasteful: in each time step, we would solve a linear system with the mass matrix, only to multiply the solution of that system by $M$ again in the next time step. This can, of course, be avoided, and we do so in this program.

        +

        for some $\alpha$ that depends on the present and previous solution. First, note that the matrix is symmetric. In addition, if the time step $k$ is small enough, i.e. if $k\theta<1$, then the matrix is also going to be positive definite. In the program below, this will always be the case, so we will use the Conjugate Gradient method together with the SSOR method as preconditioner. We should keep in mind, however, that this will fail if we happen to use a bigger time step. Fortunately, in that case the solver will just throw an exception indicating a failure to converge, rather than silently producing a wrong result. If that happens, then we can simply replace the CG method by something that can handle indefinite symmetric systems. The GMRES solver is typically the standard method for all "bad" linear systems, but it is also a slow one. Possibly better would be a solver that utilizes the symmetry, such as, for example, SymmLQ, which is also implemented in deal.II.

        +

        This program uses a clever optimization over step-23 and step-24: If you read the above formulas closely, it becomes clear that the velocity $V$ only ever appears in products with the mass matrix. In step-23 and step-24, we were, therefore, a bit wasteful: in each time step, we would solve a linear system with the mass matrix, only to multiply the solution of that system by $M$ again in the next time step. This can, of course, be avoided, and we do so in this program.

        The test case

        There are a few analytical solutions for the sine-Gordon equation, both in 1D and 2D. In particular, the program as is computes the solution to a problem with a single kink-like solitary wave initial condition. This solution is given by Leibbrandt in Phys. Rev. Lett. 41(7), and is implemented in the ExactSolution class.

        It should be noted that this closed-form solution, strictly speaking, only holds for the infinite-space initial-value problem (not the Neumann initial-boundary-value problem under consideration here). However, given that we impose zero Neumann boundary conditions, we expect that the solution to our initial-boundary-value problem would be close to the solution of the infinite-space initial-value problem, if reflections of waves off the boundaries of our domain do not occur. In practice, this is of course not the case, but we can at least assume that this were so.

        -

        The constants $\vartheta$ and $\lambda$ in the 2D solution and $\vartheta$, $\phi$ and $\tau$ in the 3D solution are called the Bäcklund transformation parameters. They control such things as the orientation and steepness of the kink. For the purposes of testing the code against the exact solution, one should choose the parameters so that the kink is aligned with the grid.

        +

        The constants $\vartheta$ and $\lambda$ in the 2D solution and $\vartheta$, $\phi$ and $\tau$ in the 3D solution are called the Bäcklund transformation parameters. They control such things as the orientation and steepness of the kink. For the purposes of testing the code against the exact solution, one should choose the parameters so that the kink is aligned with the grid.

        The solutions that we implement in the ExactSolution class are these:

        • In 1D:

          @@ -261,7 +261,7 @@ u(x,y,t) = 4 \arctan \left[a_0 e^{s\xi}\right], \]" src="form_3534.png"/>

          -

          where $\xi$ is defined as

          +

          where $\xi$ is defined as

          \[
     \xi = x \cos\vartheta + \sin(\vartheta) (y\cosh\lambda + t\sinh \lambda),
   \] @@ -275,7 +275,7 @@ u(x,y,z,t) = 4 \arctan \left[c_0 e^{s\xi}\right], \]" src="form_3537.png"/>

          - where $\xi$ is defined as

          + where $\xi$ is defined as

          \[
     \xi = x \cos\vartheta + y \sin \vartheta \cos\phi +
           \sin \vartheta \sin\phi (z\cosh\tau + t\sinh \tau),
@@ -327,7 +327,7 @@
 <p>The entire algorithm for solving the problem is encapsulated in this class. As in previous example programs, the class is declared with a template parameter, which is the spatial dimension, so that we can solve the sine-Gordon equation in one, two or three spatial dimensions. For more on the dimension-independent class-encapsulation of the problem, the reader should consult <a class=step-3 and step-4.

          Compared to step-23 and step-24, there isn't anything newsworthy in the general structure of the program (though there is of course in the inner workings of the various functions!). The most notable difference is the presence of the two new functions compute_nl_term and compute_nl_matrix that compute the nonlinear contributions to the system matrix and right-hand side of the first equation, as discussed in the Introduction. In addition, we have to have a vector solution_update that contains the nonlinear update to the solution vector in each Newton step.

          As also mentioned in the introduction, we do not store the velocity variable in this program, but the mass matrix times the velocity. This is done in the M_x_velocity variable (the "x" is intended to stand for "times").

          -

          Finally, the output_timestep_skip variable stores the number of time steps to be taken each time before graphical output is to be generated. This is of importance when using fine meshes (and consequently small time steps) where we would run lots of time steps and create lots of output files of solutions that look almost the same in subsequent files. This only clogs up our visualization procedures and we should avoid creating more output than we are really interested in. Therefore, if this variable is set to a value $n$ bigger than one, output is generated only every $n$th time step.

          +

          Finally, the output_timestep_skip variable stores the number of time steps to be taken each time before graphical output is to be generated. This is of importance when using fine meshes (and consequently small time steps) where we would run lots of time steps and create lots of output files of solutions that look almost the same in subsequent files. This only clogs up our visualization procedures and we should avoid creating more output than we are really interested in. Therefore, if this variable is set to a value $n$ bigger than one, output is generated only every $n$th time step.

            template <int dim>
            class SineGordonProblem
            {
          @@ -470,8 +470,8 @@

          Implementation of the SineGordonProblem class

          Let's move on to the implementation of the main class, as it implements the algorithm outlined in the introduction.

          SineGordonProblem::SineGordonProblem

          -

          This is the constructor of the SineGordonProblem class. It specifies the desired polynomial degree of the finite elements, associates a DoFHandler to the triangulation object (just as in the example programs step-3 and step-4), initializes the current or initial time, the final time, the time step size, and the value of $\theta$ for the time stepping scheme. Since the solutions we compute here are time-periodic, the actual value of the start-time doesn't matter, and we choose it so that we start at an interesting time.

          -

          Note that if we were to chose the explicit Euler time stepping scheme ( $\theta = 0$), then we must pick a time step $k \le h$, otherwise the scheme is not stable and oscillations might arise in the solution. The Crank-Nicolson scheme ( $\theta = \frac{1}{2}$) and the implicit Euler scheme ( $\theta=1$) do not suffer from this deficiency, since they are unconditionally stable. However, even then the time step should be chosen to be on the order of $h$ in order to obtain a good solution. Since we know that our mesh results from the uniform subdivision of a rectangle, we can compute that time step easily; if we had a different domain, the technique in step-24 using GridTools::minimal_cell_diameter would work as well.

          +

          This is the constructor of the SineGordonProblem class. It specifies the desired polynomial degree of the finite elements, associates a DoFHandler to the triangulation object (just as in the example programs step-3 and step-4), initializes the current or initial time, the final time, the time step size, and the value of $\theta$ for the time stepping scheme. Since the solutions we compute here are time-periodic, the actual value of the start-time doesn't matter, and we choose it so that we start at an interesting time.

          +

          Note that if we were to chose the explicit Euler time stepping scheme ( $\theta = 0$), then we must pick a time step $k \le h$, otherwise the scheme is not stable and oscillations might arise in the solution. The Crank-Nicolson scheme ( $\theta = \frac{1}{2}$) and the implicit Euler scheme ( $\theta=1$) do not suffer from this deficiency, since they are unconditionally stable. However, even then the time step should be chosen to be on the order of $h$ in order to obtain a good solution. Since we know that our mesh results from the uniform subdivision of a rectangle, we can compute that time step easily; if we had a different domain, the technique in step-24 using GridTools::minimal_cell_diameter would work as well.

            template <int dim>
            SineGordonProblem<dim>::SineGordonProblem()
            : fe(1)
          @@ -486,7 +486,7 @@
           
          STL namespace.

          SineGordonProblem::make_grid_and_dofs

          -

          This function creates a rectangular grid in dim dimensions and refines it several times. Also, all matrix and vector members of the SineGordonProblem class are initialized to their appropriate sizes once the degrees of freedom have been assembled. Like step-24, we use MatrixCreator functions to generate a mass matrix $M$ and a Laplace matrix $A$ and store them in the appropriate variables for the remainder of the program's life.

          +

          This function creates a rectangular grid in dim dimensions and refines it several times. Also, all matrix and vector members of the SineGordonProblem class are initialized to their appropriate sizes once the degrees of freedom have been assembled. Like step-24, we use MatrixCreator functions to generate a mass matrix $M$ and a Laplace matrix $A$ and store them in the appropriate variables for the remainder of the program's life.

            template <int dim>
            void SineGordonProblem<dim>::make_grid_and_dofs()
            {
          @@ -762,7 +762,7 @@
            << "advancing to t = " << time << '.' << std::endl;
           

          At the beginning of each time step we must solve the nonlinear equation in the split formulation via Newton's method — i.e. solve for $\delta U^{n,l}$ then compute $U^{n,l+1}$ and so on. The stopping criterion for this nonlinear iteration is that $\|F_h(U^{n,l})\|_2 \le 10^{-6} \|F_h(U^{n,0})\|_2$. Consequently, we need to record the norm of the residual in the first iteration.

          -

          At the end of each iteration, we output to the console how many linear solver iterations it took us. When the loop below is done, we have (an approximation of) $U^n$.

          +

          At the end of each iteration, we output to the console how many linear solver iterations it took us. When the loop below is done, we have (an approximation of) $U^n$.

            double initial_rhs_norm = 0.;
            bool first_iteration = true;
            do
          @@ -786,7 +786,7 @@
           
            std::cout << " CG iterations per nonlinear step." << std::endl;
           
          -

          Upon obtaining the solution to the first equation of the problem at $t=t_n$, we must update the auxiliary velocity variable $V^n$. However, we do not compute and store $V^n$ since it is not a quantity we use directly in the problem. Hence, for simplicity, we update $MV^n$ directly:

          +

      Upon obtaining the solution to the first equation of the problem at $t=t_n$, we must update the auxiliary velocity variable $V^n$. However, we do not compute and store $V^n$ since it is not a quantity we use directly in the problem. Hence, for simplicity, we update $MV^n$ directly:

        Vector<double> tmp_vector(solution.size());
        laplace_matrix.vmult(tmp_vector, solution);
        M_x_velocity.add(-time_step * theta, tmp_vector);
      @@ -845,7 +845,7 @@
        return 0;
        }

      Results

      -

      The explicit Euler time stepping scheme ( $\theta=0$) performs adequately for the problems we wish to solve. Unfortunately, a rather small time step has to be chosen due to stability issues — $k\sim h/10$ appears to work for most the simulations we performed. On the other hand, the Crank-Nicolson scheme ( $\theta=\frac{1}{2}$) is unconditionally stable, and (at least for the case of the 1D breather) we can pick the time step to be as large as $25h$ without any ill effects on the solution. The implicit Euler scheme ( $\theta=1$) is "exponentially damped," so it is not a good choice for solving the sine-Gordon equation, which is conservative. However, some of the damped schemes in the continuum that is offered by the $\theta$-method were useful for eliminating spurious oscillations due to boundary effects.

      +

      The explicit Euler time stepping scheme ( $\theta=0$) performs adequately for the problems we wish to solve. Unfortunately, a rather small time step has to be chosen due to stability issues — $k\sim h/10$ appears to work for most the simulations we performed. On the other hand, the Crank-Nicolson scheme ( $\theta=\frac{1}{2}$) is unconditionally stable, and (at least for the case of the 1D breather) we can pick the time step to be as large as $25h$ without any ill effects on the solution. The implicit Euler scheme ( $\theta=1$) is "exponentially damped," so it is not a good choice for solving the sine-Gordon equation, which is conservative. However, some of the damped schemes in the continuum that is offered by the $\theta$-method were useful for eliminating spurious oscillations due to boundary effects.

      In the simulations below, we solve the sine-Gordon equation on the interval $\Omega =
 [-10,10]$ in 1D and on the square $\Omega = [-10,10]\times [-10,10]$ in 2D. In each case, the respective grid is refined uniformly 6 times, i.e. $h\sim
 2^{-6}$.

      @@ -855,11 +855,11 @@ u_{\mathrm{breather}}(x,t) = -4\arctan \left(\frac{m}{\sqrt{1-m^2}} \frac{\sin\left(\sqrt{1-m^2}t +c_2\right)}{\cosh(mx+c_1)} \right), \]" src="form_3569.png"/>

      -

      where $c_1$, $c_2$ and $m<1$ are constants. In the simulation below, we have chosen $c_1=0$, $c_2=0$, $m=0.5$. Moreover, it is know that the period of oscillation of the breather is $2\pi\sqrt{1-m^2}$, hence we have chosen $t_0=-5.4414$ and $t_f=2.7207$ so that we can observe three oscillations of the solution. Then, taking $u_0(x) = u_{\mathrm{breather}}(x,t_0)$, $\theta=0$ and $k=h/10$, the program computed the following solution.

      +

      where $c_1$, $c_2$ and $m<1$ are constants. In the simulation below, we have chosen $c_1=0$, $c_2=0$, $m=0.5$. Moreover, it is know that the period of oscillation of the breather is $2\pi\sqrt{1-m^2}$, hence we have chosen $t_0=-5.4414$ and $t_f=2.7207$ so that we can observe three oscillations of the solution. Then, taking $u_0(x) = u_{\mathrm{breather}}(x,t_0)$, $\theta=0$ and $k=h/10$, the program computed the following solution.

      Animation of the 1D stationary breather.

      Though not shown how to do this in the program, another way to visualize the (1+1)-d solution is to use output generated by the DataOutStack class; it allows to "stack" the solutions of individual time steps, so that we get 2D space-time graphs from 1D time-dependent solutions. This produces the space-time plot below instead of the animation above.

      A space-time plot of the 1D stationary breather.

      -

      Furthermore, since the breather is an analytical solution of the sine-Gordon equation, we can use it to validate our code, although we have to assume that the error introduced by our choice of Neumann boundary conditions is small compared to the numerical error. Under this assumption, one could use the VectorTools::integrate_difference function to compute the difference between the numerical solution and the function described by the ExactSolution class of this program. For the simulation shown in the two images above, the $L^2$ norm of the error in the finite element solution at each time step remained on the order of $10^{-2}$. Hence, we can conclude that the numerical method has been implemented correctly in the program.

      +

      Furthermore, since the breather is an analytical solution of the sine-Gordon equation, we can use it to validate our code, although we have to assume that the error introduced by our choice of Neumann boundary conditions is small compared to the numerical error. Under this assumption, one could use the VectorTools::integrate_difference function to compute the difference between the numerical solution and the function described by the ExactSolution class of this program. For the simulation shown in the two images above, the $L^2$ norm of the error in the finite element solution at each time step remained on the order of $10^{-2}$. Hence, we can conclude that the numerical method has been implemented correctly in the program.

      A few (2+1)D Solutions

      The only analytical solution to the sine-Gordon equation in (2+1)D that can be found in the literature is the so-called kink solitary wave. It has the following closed-form expression:

      \[
@@ -871,11 +871,11 @@
     \xi = x \cos\vartheta + \sin(\vartheta) (y\cosh\lambda + t\sinh \lambda)
   \]

      -

      where $a_0$, $\vartheta$ and $\lambda$ are constants. In the simulation below we have chosen $a_0=\lambda=1$. Notice that if $\vartheta=\pi$ the kink is stationary, hence it would make a good solution against which we can validate the program in 2D because no reflections off the boundary of the domain occur.

      -

      The simulation shown below was performed with $u_0(x) = u_{\mathrm{kink}}(x,t_0)$, $\theta=\frac{1}{2}$, $k=20h$, $t_0=1$ and $t_f=500$. The $L^2$ norm of the error of the finite element solution at each time step remained on the order of $10^{-2}$, showing that the program is working correctly in 2D, as well as 1D. Unfortunately, the solution is not very interesting, nonetheless we have included a snapshot of it below for completeness.

      +

      where $a_0$, $\vartheta$ and $\lambda$ are constants. In the simulation below we have chosen $a_0=\lambda=1$. Notice that if $\vartheta=\pi$ the kink is stationary, hence it would make a good solution against which we can validate the program in 2D because no reflections off the boundary of the domain occur.

      +

      The simulation shown below was performed with $u_0(x) = u_{\mathrm{kink}}(x,t_0)$, $\theta=\frac{1}{2}$, $k=20h$, $t_0=1$ and $t_f=500$. The $L^2$ norm of the error of the finite element solution at each time step remained on the order of $10^{-2}$, showing that the program is working correctly in 2D, as well as 1D. Unfortunately, the solution is not very interesting, nonetheless we have included a snapshot of it below for completeness.

      Stationary 2D kink.

      Now that we have validated the code in 1D and 2D, we move to a problem where the analytical solution is unknown.

      -

      To this end, we rotate the kink solution discussed above about the $z$ axis: we let $\vartheta=\frac{\pi}{4}$. The latter results in a solitary wave that is not aligned with the grid, so reflections occur at the boundaries of the domain immediately. For the simulation shown below, we have taken $u_0(x)=u_{\mathrm{kink}}(x,t_0)$, $\theta=\frac{2}{3}$, $k=20h$, $t_0=0$ and $t_f=20$. Moreover, we had to pick $\theta=\frac{2}{3}$ because for any $\theta\le\frac{1}{2}$ oscillations arose at the boundary, which are likely due to the scheme and not the equation, thus picking a value of $\theta$ a good bit into the "exponentially damped" spectrum of the time stepping schemes assures these oscillations are not created.

      +

      To this end, we rotate the kink solution discussed above about the $z$ axis: we let $\vartheta=\frac{\pi}{4}$. The latter results in a solitary wave that is not aligned with the grid, so reflections occur at the boundaries of the domain immediately. For the simulation shown below, we have taken $u_0(x)=u_{\mathrm{kink}}(x,t_0)$, $\theta=\frac{2}{3}$, $k=20h$, $t_0=0$ and $t_f=20$. Moreover, we had to pick $\theta=\frac{2}{3}$ because for any $\theta\le\frac{1}{2}$ oscillations arose at the boundary, which are likely due to the scheme and not the equation, thus picking a value of $\theta$ a good bit into the "exponentially damped" spectrum of the time stepping schemes assures these oscillations are not created.

      Animation of a moving 2D kink, at 45 degrees to the axes of the grid, showing boundary effects.

      Another interesting solution to the sine-Gordon equation (which cannot be obtained analytically) can be produced by using two 1D breathers to construct the following separable 2D initial condition:

      \[
/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_26.html differs (JavaScript source, Unicode text, UTF-8 text, with very long lines (1683))
--- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_26.html	2023-11-25 15:26:01.619854897 +0100
+++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_26.html	2023-11-25 15:26:01.619854897 +0100
@@ -164,8 +164,8 @@
   \right].
 \end{align*}

      -

      Here, $k_n=t_n-t_{n-1}$ is the time step size. The theta-scheme generalizes the explicit Euler ( $\theta=0$), implicit Euler ( $\theta=1$) and Crank-Nicolson ( $\theta=\frac 12$) time discretizations. Since the latter has the highest convergence order, we will choose $\theta=\frac 12$ in the program below, but make it so that playing with this parameter remains simple. (If you are interested in playing with higher order methods, take a look at step-52.)

      -

      Given this time discretization, space discretization happens as it always does, by multiplying with test functions, integrating by parts, and then restricting everything to a finite dimensional subspace. This yields the following set of fully discrete equations after multiplying through with $k_n$:

      +

      Here, $k_n=t_n-t_{n-1}$ is the time step size. The theta-scheme generalizes the explicit Euler ( $\theta=0$), implicit Euler ( $\theta=1$) and Crank-Nicolson ( $\theta=\frac 12$) time discretizations. Since the latter has the highest convergence order, we will choose $\theta=\frac 12$ in the program below, but make it so that playing with this parameter remains simple. (If you are interested in playing with higher order methods, take a look at step-52.)

      +

      Given this time discretization, space discretization happens as it always does, by multiplying with test functions, integrating by parts, and then restricting everything to a finite dimensional subspace. This yields the following set of fully discrete equations after multiplying through with $k_n$:

      \begin{align*}
   M U^n-MU^{n-1}
   +
@@ -183,7 +183,7 @@
   \right],
 \end{align*}

      -

      where $M$ is the mass matrix and $A$ is the stiffness matrix that results from discretizing the Laplacian. Bringing all known quantities to the right hand side yields the linear system we have to solve in every step:

      +

      where $M$ is the mass matrix and $A$ is the stiffness matrix that results from discretizing the Laplacian. Bringing all known quantities to the right hand side yields the linear system we have to solve in every step:

      \begin{align*}
   (M
   +
@@ -209,7 +209,7 @@
 <ul>
 <li>
 <p class=Time step size and minimal mesh size: For stationary problems, the general approach is "make the mesh as fine as it is necessary". For problems with singularities, this often leads to situations where we get many levels of refinement into corners or along interfaces. The very first tutorial to use adaptive meshes, step-6, is a point in case already.

      -

      However, for time dependent problems, we typically need to choose the time step related to the mesh size. For explicit time discretizations, this is obvious, since we need to respect a CFL condition that ties the time step size to the smallest mesh size. For implicit time discretizations, no such hard restriction exists, but in practice we still want to make the time step smaller if we make the mesh size smaller since we typically have error estimates of the form $\|e\| \le {\cal O}(k^p + h^q)$ where $p,q$ are the convergence orders of the time and space discretization, respectively. We can only make the error small if we decrease both terms. Ideally, an estimate like this would suggest to choose $k \propto h^{q/p}$. Because, at least for problems with non-smooth solutions, the error is typically localized in the cells with the smallest mesh size, we have to indeed choose $k \propto h_{\text{min}}^{q/p}$, using the smallest mesh size.

      +

      However, for time dependent problems, we typically need to choose the time step related to the mesh size. For explicit time discretizations, this is obvious, since we need to respect a CFL condition that ties the time step size to the smallest mesh size. For implicit time discretizations, no such hard restriction exists, but in practice we still want to make the time step smaller if we make the mesh size smaller since we typically have error estimates of the form $\|e\| \le {\cal O}(k^p + h^q)$ where $p,q$ are the convergence orders of the time and space discretization, respectively. We can only make the error small if we decrease both terms. Ideally, an estimate like this would suggest to choose $k \propto h^{q/p}$. Because, at least for problems with non-smooth solutions, the error is typically localized in the cells with the smallest mesh size, we have to indeed choose $k \propto h_{\text{min}}^{q/p}$, using the smallest mesh size.

      The consequence is that refining the mesh further in one place implies not only the moderate additional effort of increasing the number of degrees of freedom slightly, but also the much larger effort of having the solve the global linear system more often because of the smaller time step.

      In practice, one typically deals with this by acknowledging that we can not make the time step arbitrarily small, and consequently can not make the local mesh size arbitrarily small. Rather, we set a maximal level of refinement and when we flag cells for refinement, we simply do not refine those cells whose children would exceed this maximal level of refinement.

      There is a similar problem in that we will choose a right hand side that will switch on in different parts of the domain at different times. To avoid being caught flat footed with too coarse a mesh in areas where we suddenly need a finer mesh, we will also enforce in our program a minimal mesh refinement level.

      @@ -240,7 +240,7 @@ \sum_j U^n \varphi_j(\mathbf x), \end{align*}" src="form_3618.png"/>

      -

      multiply with test functions $\varphi_i(\mathbf x)$ and integrate by parts where necessary. In a process as outlined above, this would yield

      +

      multiply with test functions $\varphi_i(\mathbf x)$ and integrate by parts where necessary. In a process as outlined above, this would yield

      \begin{align*}
     \sum_j
     (M
@@ -260,7 +260,7 @@
     \right].
   \end{align*}

      -

      Now imagine that we have changed the mesh between time steps $n-1$ and $n$. Then the problem is that the basis functions we use for $u_h^n$ and $u^{n-1}$ are different! This pertains to the terms on the right hand side, the first of which we could more clearly write as (the second follows the same pattern)

      +

      Now imagine that we have changed the mesh between time steps $n-1$ and $n$. Then the problem is that the basis functions we use for $u_h^n$ and $u^{n-1}$ are different! This pertains to the terms on the right hand side, the first of which we could more clearly write as (the second follows the same pattern)

      \begin{align*}
     (\varphi_i, u_h^{n-1})
     =
@@ -272,7 +272,7 @@
     i=1\ldots N_n.
   \end{align*}

      -

      If the meshes used in these two time steps are the same, then $(\varphi_i^n, \varphi_j^{n-1})$ forms a square mass matrix $M_{ij}$. However, if the meshes are not the same, then in general the matrix is rectangular. Worse, it is difficult to even compute these integrals because if we loop over the cells of the mesh at time step $n$, then we need to evaluate $\varphi_j^{n-1}$ at the quadrature points of these cells, but they do not necessarily correspond to the cells of the mesh at time step $n-1$ and $\varphi_j^{n-1}$ is not defined via these cells; the same of course applies if we wanted to compute the integrals via integration on the cells of mesh $n-1$.

      +

      If the meshes used in these two time steps are the same, then $(\varphi_i^n, \varphi_j^{n-1})$ forms a square mass matrix $M_{ij}$. However, if the meshes are not the same, then in general the matrix is rectangular. Worse, it is difficult to even compute these integrals because if we loop over the cells of the mesh at time step $n$, then we need to evaluate $\varphi_j^{n-1}$ at the quadrature points of these cells, but they do not necessarily correspond to the cells of the mesh at time step $n-1$ and $\varphi_j^{n-1}$ is not defined via these cells; the same of course applies if we wanted to compute the integrals via integration on the cells of mesh $n-1$.

      In any case, what we have to face is a situation where we need to integrate shape functions defined on two different meshes. This can be done, and is in fact demonstrated in step-28, but the process is at best described by the word "awkward".

      In practice, one does not typically want to do this. Rather, we avoid the whole situation by interpolating the solution from the old to the new mesh every time we adapt the mesh. In other words, rather than solving the equations above, we instead solve the problem

      \begin{align*}
@@ -294,14 +294,14 @@
     \right],
   \end{align*}

      -

      where $I_h^n$ is the interpolation operator onto the finite element space used in time step $n$. This is not the optimal approach since it introduces an additional error besides time and space discretization, but it is a pragmatic one that makes it feasible to do time adapting meshes.

      +

      where $I_h^n$ is the interpolation operator onto the finite element space used in time step $n$. This is not the optimal approach since it introduces an additional error besides time and space discretization, but it is a pragmatic one that makes it feasible to do time adapting meshes.

      What could possibly go wrong? Verifying whether the code is correct

      There are a number of things one can typically get wrong when implementing a finite element code. In particular, for time dependent problems, the following are common sources of bugs:

        -
      • The time integration, for example by getting the coefficients in front of the terms involving the current and previous time steps wrong (e.g., mixing up a factor $\theta$ for $1-\theta$).
      • -
      • Handling the right hand side, for example forgetting a factor of $k_n$ or $\theta$.
      • -
      • Mishandling the boundary values, again for example forgetting a factor of $k_n$ or $\theta$, or forgetting to apply nonzero boundary values not only to the right hand side but also to the system matrix.
      • +
      • The time integration, for example by getting the coefficients in front of the terms involving the current and previous time steps wrong (e.g., mixing up a factor $\theta$ for $1-\theta$).
      • +
      • Handling the right hand side, for example forgetting a factor of $k_n$ or $\theta$.
      • +
      • Mishandling the boundary values, again for example forgetting a factor of $k_n$ or $\theta$, or forgetting to apply nonzero boundary values not only to the right hand side but also to the system matrix.

      A less common problem is getting the initial conditions wrong because one can typically see that it is wrong by just outputting the first time step. In any case, in order to verify the correctness of the code, it is helpful to have a testing protocol that allows us to verify each of these components separately. This means:

      • Testing the code with nonzero initial conditions but zero right hand side and boundary values and verifying that the time evolution is correct.
      • @@ -337,7 +337,7 @@ \end{align*}" src="form_3637.png"/>

        In other words, if the initial condition is a product of sines, then the solution has exactly the same shape of a product of sines that decays to zero with a known time dependence. This is something that is easy to test if you have a sufficiently fine mesh and sufficiently small time step.

        -

        What is typically going to happen if you get the time integration scheme wrong (e.g., by having the wrong factors of $\theta$ or $k$ in front of the various terms) is that you don't get the right temporal behavior of the solution. Double check the various factors until you get the right behavior. You may also want to verify that the temporal decay rate (as determined, for example, by plotting the value of the solution at a fixed point) does not double or halve each time you double or halve the time step or mesh size. You know that it's not the handling of the boundary conditions or right hand side because these were both zero.

        +

        What is typically going to happen if you get the time integration scheme wrong (e.g., by having the wrong factors of $\theta$ or $k$ in front of the various terms) is that you don't get the right temporal behavior of the solution. Double check the various factors until you get the right behavior. You may also want to verify that the temporal decay rate (as determined, for example, by plotting the value of the solution at a fixed point) does not double or halve each time you double or halve the time step or mesh size. You know that it's not the handling of the boundary conditions or right hand side because these were both zero.

        If you have so verified that the time integrator is correct, take the situation where the right hand side is nonzero but the initial conditions are zero: $u_0(x,y)=0$ and $f(x,y,t)=\sin(n_x \pi x) \sin(n_x \pi y)$. Again,

        \begin{align*}
   \left(\frac{\partial}{\partial t} -\Delta\right)
@@ -360,7 +360,7 @@
   a(t) = \frac{1}{(n_x^2+n_y^2)\pi^2} \left[ 1 - e^{-(n_x^2+n_y^2)\pi^2 t} \right].
 \end{align*}

        -

        Again, if you have the wrong factors of $\theta$ or $k$ in front of the right hand side terms you will either not get the right temporal behavior of the solution, or it will converge to a maximum value other than $\frac{1}{(n_x^2+n_y^2)\pi^2}$.

        +

        Again, if you have the wrong factors of $\theta$ or $k$ in front of the right hand side terms you will either not get the right temporal behavior of the solution, or it will converge to a maximum value other than $\frac{1}{(n_x^2+n_y^2)\pi^2}$.

        Once we have verified that the time integration and right hand side handling are correct using this scheme, we can go on to verifying that we have the boundary values correct, using a very similar approach.

        The testcase

        Solving the heat equation on a simple domain with a simple right hand side almost always leads to solutions that are exceedingly boring, since they become very smooth very quickly and then do not move very much any more. Rather, we here solve the equation on the L-shaped domain with zero Dirichlet boundary values and zero initial conditions, but as right hand side we choose

        @@ -408,7 +408,7 @@ \right. \end{align*}" src="form_3646.png"/>

        -

        In other words, in every period of length $\tau$, the right hand side first flashes on in domain 1, then off completely, then on in domain 2, then off completely again. This pattern is probably best observed via the little animation of the solution shown in the results section.

        +

        In other words, in every period of length $\tau$, the right hand side first flashes on in domain 1, then off completely, then on in domain 2, then off completely again. This pattern is probably best observed via the little animation of the solution shown in the results section.

        If you interpret the heat equation as finding the spatially and temporally variable temperature distribution of a conducting solid, then the test case above corresponds to an L-shaped body where we keep the boundary at zero temperature, and heat alternatingly in two parts of the domain. While heating is in effect, the temperature rises in these places, after which it diffuses and diminishes again. The point of these initial conditions is that they provide us with a solution that has singularities both in time (when sources switch on and off) as well as time (at the reentrant corner as well as at the edges and corners of the regions where the source acts).

        The commented program

        The program starts with the usual include files, all of which you should have seen before by now:

        @@ -795,7 +795,7 @@
          system_rhs.add(-(1 - theta) * time_step, tmp);
         

        The second piece is to compute the contributions of the source terms. This corresponds to the term $k_n
-   \left[ (1-\theta)F^{n-1} + \theta F^n \right]$. The following code calls VectorTools::create_right_hand_side to compute the vectors $F$, where we set the time of the right hand side (source) function before we evaluate it. The result of this all ends up in the forcing_terms variable:

        + \left[ (1-\theta)F^{n-1} + \theta F^n \right]$" src="form_3649.png"/>. The following code calls VectorTools::create_right_hand_side to compute the vectors $F$, where we set the time of the right hand side (source) function before we evaluate it. The result of this all ends up in the forcing_terms variable:

          RightHandSide<dim> rhs_function;
          rhs_function.set_time(time);
        @@ -1004,34 +1004,34 @@

        There are two factors at play. First, there are some islands where cells have been refined but that are surrounded by non-refined cells (and there are probably also a few occasional coarsened islands). These are not terrible, as they most of the time do not affect the approximation quality of the mesh, but they also don't help because so many of their additional degrees of freedom are in fact constrained by hanging node constraints. That said, this is easy to fix: the Triangulation class takes an argument to its constructor indicating a level of "mesh smoothing". Passing one of many possible flags, this instructs the triangulation to refine some additional cells, or not to refine some cells, so that the resulting mesh does not have these artifacts.

        The second problem is more severe: the mesh appears to lag the solution. The underlying reason is that we only adapt the mesh once every fifth time step, and only allow for a single refinement in these cases. Whenever a source switches on, the solution had been very smooth in this area before and the mesh was consequently rather coarse. This implies that the next time step when we refine the mesh, we will get one refinement level more in this area, and five time steps later another level, etc. But this is not enough: first, we should refine immediately when a source switches on (after all, in the current context we at least know what the right hand side is), and we should allow for more than one refinement level. Of course, all of this can be done using deal.II, it just requires a bit of algorithmic thinking in how to make this work!

        Positivity preservation

        -

        To increase the accuracy and resolution of your simulation in time, one typically decreases the time step size $k_n$. If you start playing around with the time step in this particular example, you will notice that the solution becomes partly negative, if $k_n$ is below a certain threshold. This is not what we would expect to happen (in nature).

        +

        To increase the accuracy and resolution of your simulation in time, one typically decreases the time step size $k_n$. If you start playing around with the time step in this particular example, you will notice that the solution becomes partly negative, if $k_n$ is below a certain threshold. This is not what we would expect to happen (in nature).

        To get an idea of this behavior mathematically, let us consider a general, fully discrete problem:

        \begin{align*}
   A u^{n} = B u^{n-1}.
 \end{align*}

        -

        The general form of the $i$th equation then reads:

        +

        The general form of the $i$th equation then reads:

        \begin{align*}
   a_{ii} u^{n}_i &= b_{ii} u^{n-1}_i +
   \sum\limits_{j \in S_i} \left( b_{ij} u^{n-1}_j - a_{ij} u^{n}_j \right),
 \end{align*}

        -

        where $S_i$ is the set of degrees of freedom that DoF $i$ couples with (i.e., for which either the matrix $A$ or matrix $B$ has a nonzero entry at position $(i,j)$). If all coefficients fulfill the following conditions:

        +

        where $S_i$ is the set of degrees of freedom that DoF $i$ couples with (i.e., for which either the matrix $A$ or matrix $B$ has a nonzero entry at position $(i,j)$). If all coefficients fulfill the following conditions:

        \begin{align*}
   a_{ii} &> 0, & b_{ii} &\geq 0, & a_{ij} &\leq 0, & b_{ij} &\geq 0,
   &
   \forall j &\in S_i,
 \end{align*}

        -

        all solutions $u^{n}$ keep their sign from the previous ones $u^{n-1}$, and consequently from the initial values $u^0$. See e.g. Kuzmin, Hämäläinen for more information on positivity preservation.

        -

        Depending on the PDE to solve and the time integration scheme used, one is able to deduce conditions for the time step $k_n$. For the heat equation with the Crank-Nicolson scheme, Schatz et. al. have translated it to the following ones:

        +

        all solutions $u^{n}$ keep their sign from the previous ones $u^{n-1}$, and consequently from the initial values $u^0$. See e.g. Kuzmin, Hämäläinen for more information on positivity preservation.

        +

        Depending on the PDE to solve and the time integration scheme used, one is able to deduce conditions for the time step $k_n$. For the heat equation with the Crank-Nicolson scheme, Schatz et. al. have translated it to the following ones:

        \begin{align*}
   (1 - \theta) k a_{ii} &\leq m_{ii},\qquad \forall i,
   &
   \theta k \left| a_{ij} \right| &\geq m_{ij},\qquad j \neq i,
 \end{align*}

        -

        where $M = m_{ij}$ denotes the mass matrix and $A = a_{ij}$ the stiffness matrix with $a_{ij} \leq 0$ for $j \neq i$, respectively. With $a_{ij} \leq 0$, we can formulate bounds for the global time step $k$ as follows:

        +

        where $M = m_{ij}$ denotes the mass matrix and $A = a_{ij}$ the stiffness matrix with $a_{ij} \leq 0$ for $j \neq i$, respectively. With $a_{ij} \leq 0$, we can formulate bounds for the global time step $k$ as follows:

        \begin{align*}
   k_{\text{max}} &= \frac{ 1 }{ 1 - \theta }
   \min\left( \frac{ m_{ii} }{ a_{ii} } \right),~ \forall i,
/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_27.html differs (JavaScript source, ASCII text, with very long lines (2713))
--- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_27.html	2023-11-25 15:26:01.646521022 +0100
+++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_27.html	2023-11-25 15:26:01.646521022 +0100
@@ -143,8 +143,8 @@
   <li> <a href=The plain program

      Introduction

      -

      This tutorial program attempts to show how to use $hp$-finite element methods with deal.II. It solves the Laplace equation and so builds only on the first few tutorial programs, in particular on step-4 for dimension independent programming and step-6 for adaptive mesh refinement.

      -

      The $hp$-finite element method was proposed in the early 1980s by Babuška and Guo as an alternative to either (i) mesh refinement (i.e., decreasing the mesh parameter $h$ in a finite element computation) or (ii) increasing the polynomial degree $p$ used for shape functions. It is based on the observation that increasing the polynomial degree of the shape functions reduces the approximation error if the solution is sufficiently smooth. On the other hand, it is well known that even for the generally well-behaved class of elliptic problems, higher degrees of regularity can not be guaranteed in the vicinity of boundaries, corners, or where coefficients are discontinuous; consequently, the approximation can not be improved in these areas by increasing the polynomial degree $p$ but only by refining the mesh, i.e., by reducing the mesh size $h$. These differing means to reduce the error have led to the notion of $hp$-finite elements, where the approximating finite element spaces are adapted to have a high polynomial degree $p$ wherever the solution is sufficiently smooth, while the mesh width $h$ is reduced at places wherever the solution lacks regularity. It was already realized in the first papers on this method that $hp$-finite elements can be a powerful tool that can guarantee that the error is reduced not only with some negative power of the number of degrees of freedom, but in fact exponentially.

      +

      This tutorial program attempts to show how to use $hp$-finite element methods with deal.II. It solves the Laplace equation and so builds only on the first few tutorial programs, in particular on step-4 for dimension independent programming and step-6 for adaptive mesh refinement.

      +

      The $hp$-finite element method was proposed in the early 1980s by Babuška and Guo as an alternative to either (i) mesh refinement (i.e., decreasing the mesh parameter $h$ in a finite element computation) or (ii) increasing the polynomial degree $p$ used for shape functions. It is based on the observation that increasing the polynomial degree of the shape functions reduces the approximation error if the solution is sufficiently smooth. On the other hand, it is well known that even for the generally well-behaved class of elliptic problems, higher degrees of regularity can not be guaranteed in the vicinity of boundaries, corners, or where coefficients are discontinuous; consequently, the approximation can not be improved in these areas by increasing the polynomial degree $p$ but only by refining the mesh, i.e., by reducing the mesh size $h$. These differing means to reduce the error have led to the notion of $hp$-finite elements, where the approximating finite element spaces are adapted to have a high polynomial degree $p$ wherever the solution is sufficiently smooth, while the mesh width $h$ is reduced at places wherever the solution lacks regularity. It was already realized in the first papers on this method that $hp$-finite elements can be a powerful tool that can guarantee that the error is reduced not only with some negative power of the number of degrees of freedom, but in fact exponentially.

      In order to implement this method, we need several things above and beyond what a usual finite element program needs, and in particular above what we have introduced in the tutorial programs leading up to step-6. In particular, we will have to discuss the following aspects:

      • Instead of using the same finite element on all cells, we now will want a collection of finite element objects, and associate each cell with one of these objects in this collection.

        @@ -162,11 +162,11 @@ After solving the resulting linear system, we will want to analyze the solution. In particular, we will want to compute error indicators that tell us whether a given cell should be refined and/or whether the polynomial degree of the shape functions used on it should be increased.

      We will discuss all these aspects in the following subsections of this introduction. It will not come as a big surprise that most of these tasks are already well supported by functionality provided by the deal.II, and that we will only have to provide the logic of what the program should do, not exactly how all this is going to happen.

      -

      In deal.II, the $hp$-functionality is largely packaged into the hp-namespace. This namespace provides classes that handle $hp$-discretizations, assembling matrices and vectors, and other tasks. We will get to know many of them further down below. In addition, most of the functions in the DoFTools, and VectorTools namespaces accept $hp$-objects in addition to the non- $hp$-ones. Much of the $hp$-implementation is also discussed in the hp-finite element support documentation module and the links found there.

      -

      It may be worth giving a slightly larger perspective at the end of this first part of the introduction. $hp$-functionality has been implemented in a number of different finite element packages (see, for example, the list of references cited in the hp-paper). However, by and large, most of these packages have implemented it only for the (i) the 2d case, and/or (ii) the discontinuous Galerkin method. The latter is a significant simplification because discontinuous finite elements by definition do not require continuity across faces between cells and therefore do not require the special treatment otherwise necessary whenever finite elements of different polynomial degree meet at a common face. In contrast, deal.II implements the most general case, i.e., it allows for continuous and discontinuous elements in 1d, 2d, and 3d, and automatically handles the resulting complexity. In particular, it handles computing the constraints (similar to hanging node constraints) of elements of different degree meeting at a face or edge. The many algorithmic and data structure techniques necessary for this are described in the hp-paper for those interested in such detail.

      -

      We hope that providing such a general implementation will help explore the potential of $hp$-methods further.

      +

      In deal.II, the $hp$-functionality is largely packaged into the hp-namespace. This namespace provides classes that handle $hp$-discretizations, assembling matrices and vectors, and other tasks. We will get to know many of them further down below. In addition, most of the functions in the DoFTools, and VectorTools namespaces accept $hp$-objects in addition to the non- $hp$-ones. Much of the $hp$-implementation is also discussed in the hp-finite element support documentation module and the links found there.

      +

      It may be worth giving a slightly larger perspective at the end of this first part of the introduction. $hp$-functionality has been implemented in a number of different finite element packages (see, for example, the list of references cited in the hp-paper). However, by and large, most of these packages have implemented it only for the (i) the 2d case, and/or (ii) the discontinuous Galerkin method. The latter is a significant simplification because discontinuous finite elements by definition do not require continuity across faces between cells and therefore do not require the special treatment otherwise necessary whenever finite elements of different polynomial degree meet at a common face. In contrast, deal.II implements the most general case, i.e., it allows for continuous and discontinuous elements in 1d, 2d, and 3d, and automatically handles the resulting complexity. In particular, it handles computing the constraints (similar to hanging node constraints) of elements of different degree meeting at a face or edge. The many algorithmic and data structure techniques necessary for this are described in the hp-paper for those interested in such detail.

      +

      We hope that providing such a general implementation will help explore the potential of $hp$-methods further.

      Finite element collections

      -

      Now on again to the details of how to use the $hp$-functionality in deal.II. The first aspect we have to deal with is that now we do not have only a single finite element any more that is used on all cells, but a number of different elements that cells can choose to use. For this, deal.II introduces the concept of a finite element collection, implemented in the class hp::FECollection. In essence, such a collection acts like an object of type std::vector<FiniteElement>, but with a few more bells and whistles and a memory management better suited to the task at hand. As we will later see, we will also use similar quadrature collections, and — although we don't use them here — there is also the concept of mapping collections. All of these classes are described in the hp-Collections overview.

      +

      Now on again to the details of how to use the $hp$-functionality in deal.II. The first aspect we have to deal with is that now we do not have only a single finite element any more that is used on all cells, but a number of different elements that cells can choose to use. For this, deal.II introduces the concept of a finite element collection, implemented in the class hp::FECollection. In essence, such a collection acts like an object of type std::vector<FiniteElement>, but with a few more bells and whistles and a memory management better suited to the task at hand. As we will later see, we will also use similar quadrature collections, and — although we don't use them here — there is also the concept of mapping collections. All of these classes are described in the hp-Collections overview.

      In this tutorial program, we will use continuous Lagrange elements of orders 2 through 7 (in 2d) or 2 through 5 (in 3d). The collection of used elements can then be created as follows:

      hp::FECollection<dim> fe_collection;
      for (unsigned int degree = 2; degree <= max_degree; ++degree)
      fe_collection.push_back(FE_Q<dim>(degree));
      @@ -183,16 +183,16 @@
      dof_handler.distribute_dofs(fe_collection);
      const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation
      -

      Dots in the call to set_active_fe_index() indicate that we will have to have some sort of strategy later on to decide which element to use on which cell; we will come back to this later. The main point here is that the first and last line of this code snippet is pretty much exactly the same as for the non- $hp$-case.

      +

      Dots in the call to set_active_fe_index() indicate that we will have to have some sort of strategy later on to decide which element to use on which cell; we will come back to this later. The main point here is that the first and last line of this code snippet is pretty much exactly the same as for the non- $hp$-case.

      Another complication arises from the fact that this time we do not simply have hanging nodes from local mesh refinement, but we also have to deal with the case that if there are two cells with different active finite element indices meeting at a face (for example a Q2 and a Q3 element) then we have to compute additional constraints on the finite element field to ensure that it is continuous. This is conceptually very similar to how we compute hanging node constraints, and in fact the code looks exactly the same:

      DoFTools::make_hanging_node_constraints(dof_handler, constraints);
      void make_hanging_node_constraints(const DoFHandler< dim, spacedim > &dof_handler, AffineConstraints< number > &constraints)
      -

      In other words, the DoFTools::make_hanging_node_constraints deals not only with hanging node constraints, but also with $hp$-constraints at the same time.

      +

      In other words, the DoFTools::make_hanging_node_constraints deals not only with hanging node constraints, but also with $hp$-constraints at the same time.

      Assembling matrices and vectors with hp-objects

      -

      Following this, we have to set up matrices and vectors for the linear system of the correct size and assemble them. Setting them up works in exactly the same way as for the non- $hp$-case. Assembling requires a bit more thought.

      +

      Following this, we have to set up matrices and vectors for the linear system of the correct size and assemble them. Setting them up works in exactly the same way as for the non- $hp$-case. Assembling requires a bit more thought.

      The main idea is of course unchanged: we have to loop over all cells, assemble local contributions, and then copy them into the global objects. As discussed in some detail first in step-3, deal.II has the FEValues class that pulls the finite element description, mapping, and quadrature formula together and aids in evaluating values and gradients of shape functions as well as other information on each of the quadrature points mapped to the real location of a cell. Every time we move on to a new cell we re-initialize this FEValues object, thereby asking it to re-compute that part of the information that changes from cell to cell. It can then be used to sum up local contributions to bilinear form and right hand side.

      -

      In the context of $hp$-finite element methods, we have to deal with the fact that we do not use the same finite element object on each cell. In fact, we should not even use the same quadrature object for all cells, but rather higher order quadrature formulas for cells where we use higher order finite elements. Similarly, we may want to use higher order mappings on such cells as well.

      +

      In the context of $hp$-finite element methods, we have to deal with the fact that we do not use the same finite element object on each cell. In fact, we should not even use the same quadrature object for all cells, but rather higher order quadrature formulas for cells where we use higher order finite elements. Similarly, we may want to use higher order mappings on such cells as well.

      To facilitate these considerations, deal.II has a class hp::FEValues that does what we need in the current context. The difference is that instead of a single finite element, quadrature formula, and mapping, it takes collections of these objects. It's use is very much like the regular FEValues class, i.e., the interesting part of the loop over all cells would look like this:

      hp::FEValues<dim> hp_fe_values(mapping_collection,
      fe_collection,
      @@ -216,15 +216,15 @@
      @ update_gradients
      Shape function gradients.
      @ update_quadrature_points
      Transformed quadrature points.

      In this tutorial program, we will always use a Q1 mapping, so the mapping collection argument to the hp::FEValues construction will be omitted. Inside the loop, we first initialize the hp::FEValues object for the current cell. The second, third and fourth arguments denote the index within their respective collections of the quadrature, mapping, and finite element objects we wish to use on this cell. These arguments can be omitted (and are in the program below), in which case cell->active_fe_index() is used for this index. The order of these arguments is chosen in this way because one may sometimes want to pick a different quadrature or mapping object from their respective collections, but hardly ever a different finite element than the one in use on this cell, i.e., one with an index different from cell->active_fe_index(). The finite element collection index is therefore the last default argument so that it can be conveniently omitted.

      -

      What this reinit call does is the following: the hp::FEValues class checks whether it has previously already allocated a non- $hp$-FEValues object for this combination of finite element, quadrature, and mapping objects. If not, it allocates one. It then re-initializes this object for the current cell, after which there is now a FEValues object for the selected finite element, quadrature and mapping usable on the current cell. A reference to this object is then obtained using the call hp_fe_values.get_present_fe_values(), and will be used in the usual fashion to assemble local contributions.

      +

      What this reinit call does is the following: the hp::FEValues class checks whether it has previously already allocated a non- $hp$-FEValues object for this combination of finite element, quadrature, and mapping objects. If not, it allocates one. It then re-initializes this object for the current cell, after which there is now a FEValues object for the selected finite element, quadrature and mapping usable on the current cell. A reference to this object is then obtained using the call hp_fe_values.get_present_fe_values(), and will be used in the usual fashion to assemble local contributions.

      A simple indicator for hp-refinement and estimating smoothness

      One of the central pieces of the adaptive finite element method is that we inspect the computed solution (a posteriori) with an indicator that tells us which are the cells where the error is largest, and then refine them. In many of the other tutorial programs, we use the KellyErrorEstimator class to get an indication of the size of the error on a cell, although we also discuss more complicated strategies in some programs, most importantly in step-14.

      In any case, as long as the decision is only "refine this cell" or "do not -refine this cell", the actual refinement step is not particularly challenging. However, here we have a code that is capable of hp-refinement, i.e., we suddenly have two choices whenever we detect that the error on a certain cell is too large for our liking: we can refine the cell by splitting it into several smaller ones, or we can increase the polynomial degree of the shape functions used on it. How do we know which is the more promising strategy? Answering this question is the central problem in $hp$-finite element research at the time of this writing.

      -

      In short, the question does not appear to be settled in the literature at this time. There are a number of more or less complicated schemes that address it, but there is nothing like the KellyErrorEstimator that is universally accepted as a good, even if not optimal, indicator of the error. Most proposals use the fact that it is beneficial to increase the polynomial degree whenever the solution is locally smooth whereas it is better to refine the mesh wherever it is rough. However, the questions of how to determine the local smoothness of the solution as well as the decision when a solution is smooth enough to allow for an increase in $p$ are certainly big and important ones.

      -

      In the following, we propose a simple estimator of the local smoothness of a solution. As we will see in the results section, this estimator has flaws, in particular as far as cells with local hanging nodes are concerned. We therefore do not intend to present the following ideas as a complete solution to the problem. Rather, it is intended as an idea to approach it that merits further research and investigation. In other words, we do not intend to enter a sophisticated proposal into the fray about answers to the general question. However, to demonstrate our approach to $hp$-finite elements, we need a simple indicator that does generate some useful information that is able to drive the simple calculations this tutorial program will perform.

      +refine this cell", the actual refinement step is not particularly challenging. However, here we have a code that is capable of hp-refinement, i.e., we suddenly have two choices whenever we detect that the error on a certain cell is too large for our liking: we can refine the cell by splitting it into several smaller ones, or we can increase the polynomial degree of the shape functions used on it. How do we know which is the more promising strategy? Answering this question is the central problem in $hp$-finite element research at the time of this writing.

      +

      In short, the question does not appear to be settled in the literature at this time. There are a number of more or less complicated schemes that address it, but there is nothing like the KellyErrorEstimator that is universally accepted as a good, even if not optimal, indicator of the error. Most proposals use the fact that it is beneficial to increase the polynomial degree whenever the solution is locally smooth whereas it is better to refine the mesh wherever it is rough. However, the questions of how to determine the local smoothness of the solution as well as the decision when a solution is smooth enough to allow for an increase in $p$ are certainly big and important ones.

      +

      In the following, we propose a simple estimator of the local smoothness of a solution. As we will see in the results section, this estimator has flaws, in particular as far as cells with local hanging nodes are concerned. We therefore do not intend to present the following ideas as a complete solution to the problem. Rather, it is intended as an idea to approach it that merits further research and investigation. In other words, we do not intend to enter a sophisticated proposal into the fray about answers to the general question. However, to demonstrate our approach to $hp$-finite elements, we need a simple indicator that does generate some useful information that is able to drive the simple calculations this tutorial program will perform.

      The idea

      -

      Our approach here is simple: for a function $u({\bf x})$ to be in the Sobolev space $H^s(K)$ on a cell $K$, it has to satisfy the condition

      +

      Our approach here is simple: for a function $u({\bf x})$ to be in the Sobolev space $H^s(K)$ on a cell $K$, it has to satisfy the condition

      \[
    \int_K |\nabla^s u({\bf x})|^2 \; d{\bf x} < \infty.
 \] @@ -240,7 +240,7 @@ = \sum_{\bf k} \hat U_{\bf k}\,e^{-i {\bf k}\cdot \hat{\bf x}}, \]" src="form_3666.png"/>

      -

      with Fourier vectors ${\bf k}=(k_x,k_y)$ in 2d, ${\bf k}=(k_x,k_y,k_z)$ in 3d, etc, and $k_x,k_y,k_z=0,2\pi,4\pi,\ldots$. The coefficients of expansion $\hat U_{\bf k}$ can be obtained using $L^2$-orthogonality of the exponential basis

      +

      with Fourier vectors ${\bf k}=(k_x,k_y)$ in 2d, ${\bf k}=(k_x,k_y,k_z)$ in 3d, etc, and $k_x,k_y,k_z=0,2\pi,4\pi,\ldots$. The coefficients of expansion $\hat U_{\bf k}$ can be obtained using $L^2$-orthogonality of the exponential basis

      \[
 \int_{\hat K} e^{-i {\bf m}\cdot \hat{\bf x}} e^{i {\bf n}\cdot \hat{\bf x}} d\hat{\bf x} = \delta_{\bf m \bf n},
 \] @@ -270,9 +270,9 @@ |\hat U_{\bf k}| = {\cal O}\left(|{\bf k}|^{-\left(s+1/2+\frac{d-1}{2}+\epsilon\right)}\right). \]" src="form_3676.png"/>

      -

      Put differently: the higher regularity $s$ we want, the faster the Fourier coefficients have to go to zero. If you wonder where the additional exponent $\frac{d-1}2$ comes from: we would like to make use of the fact that $\sum_l a_l < \infty$ if the sequence $a_l =
-{\cal O}(l^{-1-\epsilon})$ for any $\epsilon>0$. The problem is that we here have a summation not only over a single variable, but over all the integer multiples of $2\pi$ that are located inside the $d$-dimensional sphere, because we have vector components $k_x, k_y,
-\ldots$. In the same way as we prove that the sequence $a_l$ above converges by replacing the sum by an integral over the entire line, we can replace our $d$-dimensional sum by an integral over $d$-dimensional space. Now we have to note that between distance $|{\bf k}|$ and $|{\bf k}|+d|{\bf k}|$, there are, up to a constant, $|{\bf k}|^{d-1}$ modes, in much the same way as we can transform the volume element $dx\;dy$ into $2\pi r\; dr$. Consequently, it is no longer $|{\bf k}|^{2s}|\hat
+<p> Put differently: the higher regularity <picture><source srcset=$s$ we want, the faster the Fourier coefficients have to go to zero. If you wonder where the additional exponent $\frac{d-1}2$ comes from: we would like to make use of the fact that $\sum_l a_l < \infty$ if the sequence $a_l =
+{\cal O}(l^{-1-\epsilon})$ for any $\epsilon>0$. The problem is that we here have a summation not only over a single variable, but over all the integer multiples of $2\pi$ that are located inside the $d$-dimensional sphere, because we have vector components $k_x, k_y,
+\ldots$. In the same way as we prove that the sequence $a_l$ above converges by replacing the sum by an integral over the entire line, we can replace our $d$-dimensional sum by an integral over $d$-dimensional space. Now we have to note that between distance $|{\bf k}|$ and $|{\bf k}|+d|{\bf k}|$, there are, up to a constant, $|{\bf k}|^{d-1}$ modes, in much the same way as we can transform the volume element $dx\;dy$ into $2\pi r\; dr$. Consequently, it is no longer $|{\bf k}|^{2s}|\hat
 U_{\bf k}|^2$ that has to decay as ${\cal O}(|{\bf k}|^{-1-\epsilon})$, but it is in fact $|{\bf k}|^{2s}|\hat U_{\bf k}|^2 |{\bf k}|^{d-1}$. A comparison of exponents yields the result.

      We can turn this around: Assume we are given a function $\hat u$ of unknown smoothness. Let us compute its Fourier coefficients $\hat U_{\bf k}$ and see how fast they decay. If they decay as

      \[
@@ -281,7 +281,7 @@
 </p>
 <p> then consequently the function we had here was in <picture><source srcset=$H^{\mu-d/2}$.

      What we have to do

      -

      So what do we have to do to estimate the local smoothness of $u({\bf x})$ on a cell $K$? Clearly, the first step is to compute the Fourier coefficients of our solution. Fourier series being infinite series, we simplify our task by only computing the first few terms of the series, such that $|{\bf k}|\le 2\pi N$ with a cut-off $N$. Let us parenthetically remark that we want to choose $N$ large enough so that we capture at least the variation of those shape functions that vary the most. On the other hand, we should not choose $N$ too large: clearly, a finite element function, being a polynomial, is in $C^\infty$ on any given cell, so the coefficients will have to decay exponentially at one point; since we want to estimate the smoothness of the function this polynomial approximates, not of the polynomial itself, we need to choose a reasonable cutoff for $N$. Either way, computing this series is not particularly hard: from the definition

      +

      So what do we have to do to estimate the local smoothness of $u({\bf x})$ on a cell $K$? Clearly, the first step is to compute the Fourier coefficients of our solution. Fourier series being infinite series, we simplify our task by only computing the first few terms of the series, such that $|{\bf k}|\le 2\pi N$ with a cut-off $N$. Let us parenthetically remark that we want to choose $N$ large enough so that we capture at least the variation of those shape functions that vary the most. On the other hand, we should not choose $N$ too large: clearly, a finite element function, being a polynomial, is in $C^\infty$ on any given cell, so the coefficients will have to decay exponentially at one point; since we want to estimate the smoothness of the function this polynomial approximates, not of the polynomial itself, we need to choose a reasonable cutoff for $N$. Either way, computing this series is not particularly hard: from the definition

      \[
    \hat U_{\bf k}
    = \int_{\hat K} e^{i {\bf k}\cdot \hat{\bf x}} \hat u(\hat{\bf x}) d\hat{\bf x}
@@ -296,7 +296,7 @@
    d\hat{\bf x} \right] u_i,
 \]

      -

      where $u_i$ is the value of the $i$th degree of freedom on this cell. In other words, we can write it as a matrix-vector product

      +

      where $u_i$ is the value of the $i$th degree of freedom on this cell. In other words, we can write it as a matrix-vector product

      \[
    \hat U_{\bf k}
    = {\cal F}_{{\bf k},j} u_j,
@@ -309,7 +309,7 @@
    \int_{\hat K} e^{i {\bf k}\cdot \hat{\bf x}} \hat \varphi_j(\hat{\bf x}) d\hat{\bf x}.
 \]

      -

      This matrix is easily computed for a given number of shape functions $\varphi_j$ and Fourier modes $N$. Consequently, finding the coefficients $\hat U_{\bf k}$ is a rather trivial job. To simplify our life even further, we will use FESeries::Fourier class which does exactly this.

      +

      This matrix is easily computed for a given number of shape functions $\varphi_j$ and Fourier modes $N$. Consequently, finding the coefficients $\hat U_{\bf k}$ is a rather trivial job. To simplify our life even further, we will use FESeries::Fourier class which does exactly this.

      The next task is that we have to estimate how fast these coefficients decay with $|{\bf k}|$. The problem is that, of course, we have only finitely many of these coefficients in the first place. In other words, the best we can do is to fit a function $\alpha |{\bf k}|^{-\mu}$ to our data points $\hat U_{\bf k}$, for example by determining $\alpha,\mu$ via a least-squares procedure:

      \[
    \min_{\alpha,\mu}
@@ -333,7 +333,7 @@
    \left( \ln |\hat U_{\bf k}| - \beta + \mu \ln |{\bf k}|\right)^2,
 \]

      -

      where $\beta=\ln \alpha$. This is now a problem for which the optimality conditions $\frac{\partial Q}{\partial\beta}=0,
+<p> where <picture><source srcset=$\beta=\ln \alpha$. This is now a problem for which the optimality conditions $\frac{\partial Q}{\partial\beta}=0,
 \frac{\partial Q}{\partial\mu}=0$, are linear in $\beta,\mu$. We can write these conditions as follows:

      \[
    \left(\begin{array}{cc}
@@ -392,11 +392,11 @@
    }.
 \]

      -

      This is nothing else but linear regression fit and to do that we will use FESeries::linear_regression(). While we are not particularly interested in the actual value of $\beta$, the formula above gives us a mean to calculate the value of the exponent $\mu$ that we can then use to determine that $\hat u(\hat{\bf x})$ is in $H^s(\hat K)$ with $s=\mu-\frac d2$.

      +

      This is nothing else but linear regression fit and to do that we will use FESeries::linear_regression(). While we are not particularly interested in the actual value of $\beta$, the formula above gives us a mean to calculate the value of the exponent $\mu$ that we can then use to determine that $\hat u(\hat{\bf x})$ is in $H^s(\hat K)$ with $s=\mu-\frac d2$.

      These steps outlined above are applicable to many different scenarios, which motivated the introduction of a generic function SmoothnessEstimator::Fourier::coefficient_decay() in deal.II, that combines all the tasks described in this section in one simple function call. We will use it in the implementation of this program.

      Compensating for anisotropy

      In the formulas above, we have derived the Fourier coefficients $\hat U_{\bf
-k}$. Because ${\bf k}$ is a vector, we will get a number of Fourier coefficients $\hat U_{{\bf k}}$ for the same absolute value $|{\bf k}|$, corresponding to the Fourier transform in different directions. If we now consider a function like $|x|y^2$ then we will find lots of large Fourier coefficients in $x$-direction because the function is non-smooth in this direction, but fast-decaying Fourier coefficients in $y$-direction because the function is smooth there. The question that arises is this: if we simply fit our polynomial decay $\alpha |{\bf k}|^\mu$ to all Fourier coefficients, we will fit it to a smoothness averaged in all spatial directions. Is this what we want? Or would it be better to only consider the largest coefficient $\hat U_{{\bf k}}$ for all ${\bf k}$ with the same magnitude, essentially trying to determine the smoothness of the solution in that spatial direction in which the solution appears to be roughest?

      +k}$" src="form_3710.png"/>. Because ${\bf k}$ is a vector, we will get a number of Fourier coefficients $\hat U_{{\bf k}}$ for the same absolute value $|{\bf k}|$, corresponding to the Fourier transform in different directions. If we now consider a function like $|x|y^2$ then we will find lots of large Fourier coefficients in $x$-direction because the function is non-smooth in this direction, but fast-decaying Fourier coefficients in $y$-direction because the function is smooth there. The question that arises is this: if we simply fit our polynomial decay $\alpha |{\bf k}|^\mu$ to all Fourier coefficients, we will fit it to a smoothness averaged in all spatial directions. Is this what we want? Or would it be better to only consider the largest coefficient $\hat U_{{\bf k}}$ for all ${\bf k}$ with the same magnitude, essentially trying to determine the smoothness of the solution in that spatial direction in which the solution appears to be roughest?

      One can probably argue for either case. The issue would be of more interest if deal.II had the ability to use anisotropic finite elements, i.e., ones that use different polynomial degrees in different spatial directions, as they would be able to exploit the directionally variable smoothness much better. Alas, this capability does not exist at the time of writing this tutorial program.

      Either way, because we only have isotopic finite element classes, we adopt the viewpoint that we should tailor the polynomial degree to the lowest amount of regularity, in order to keep numerical efforts low. Consequently, instead of using the formula

      \[
@@ -417,7 +417,7 @@
    }.
 \]

      -

      To calculate $\mu$ as shown above, we have to slightly modify all sums: instead of summing over all Fourier modes, we only sum over those for which the Fourier coefficient is the largest one among all $\hat U_{{\bf k}}$ with the same magnitude $|{\bf k}|$, i.e., all sums above have to replaced by the following sums:

      +

      To calculate $\mu$ as shown above, we have to slightly modify all sums: instead of summing over all Fourier modes, we only sum over those for which the Fourier coefficient is the largest one among all $\hat U_{{\bf k}}$ with the same magnitude $|{\bf k}|$, i.e., all sums above have to replaced by the following sums:

      \[
   \sum_{{\bf k}, |{\bf k}|\le N}
   \longrightarrow
@@ -433,7 +433,7 @@
    \ln |\hat U_{{\bf k}}| - \beta + \mu \ln |{\bf k}|.
 \]

      -

      To compensate for the transformation means not attempting to fit a decay $|{\bf k}|^\mu$ with respect to the Fourier frequencies ${\bf k}$ on the unit cell, but to fit the coefficients $\hat U_{{\bf k}}$ computed on the reference cell to the Fourier frequencies on the real cell $|\bf
+<p> To compensate for the transformation means not attempting to fit a decay <picture><source srcset=$|{\bf k}|^\mu$ with respect to the Fourier frequencies ${\bf k}$ on the unit cell, but to fit the coefficients $\hat U_{{\bf k}}$ computed on the reference cell to the Fourier frequencies on the real cell $|\bf
 k|h$, where $h$ is the norm of the transformation operator (i.e., something like the diameter of the cell). In other words, we would have to minimize the sum of squares of the terms

      \[
    \ln |\hat U_{{\bf k}}| - \beta + \mu \ln (|{\bf k}|h).
@@ -444,10 +444,10 @@
    \ln |\hat U_{{\bf k}}| - (\beta - \mu \ln h) + \mu \ln (|{\bf k}|).
 \]

      -

      In other words, this and the original least squares problem will produce the same best-fit exponent $\mu$, though the offset will in one case be $\beta$ and in the other $\beta-\mu \ln h$. However, since we are not interested in the offset at all but only in the exponent, it doesn't matter whether we scale Fourier frequencies in order to account for mesh size effects or not, the estimated smoothness exponent will be the same in either case.

      +

      In other words, this and the original least squares problem will produce the same best-fit exponent $\mu$, though the offset will in one case be $\beta$ and in the other $\beta-\mu \ln h$. However, since we are not interested in the offset at all but only in the exponent, it doesn't matter whether we scale Fourier frequencies in order to account for mesh size effects or not, the estimated smoothness exponent will be the same in either case.

      Complications with linear systems for hp-discretizations

      Creating the sparsity pattern

      -

      One of the problems with $hp$-methods is that the high polynomial degree of shape functions together with the large number of constrained degrees of freedom leads to matrices with large numbers of nonzero entries in some rows. At the same time, because there are areas where we use low polynomial degree and consequently matrix rows with relatively few nonzero entries. Consequently, allocating the sparsity pattern for these matrices is a challenge: we cannot simply assemble a SparsityPattern by starting with an estimate of the bandwidth without using a lot of extra memory.

      +

      One of the problems with $hp$-methods is that the high polynomial degree of shape functions together with the large number of constrained degrees of freedom leads to matrices with large numbers of nonzero entries in some rows. At the same time, because there are areas where we use low polynomial degree and consequently matrix rows with relatively few nonzero entries. Consequently, allocating the sparsity pattern for these matrices is a challenge: we cannot simply assemble a SparsityPattern by starting with an estimate of the bandwidth without using a lot of extra memory.

      The way in which we create a SparsityPattern for the underlying linear system is tightly coupled to the strategy we use to enforce constraints. deal.II supports handling constraints in linear systems in two ways:

      1. Assembling the matrix without regard to the constraints and applying them afterwards with AffineConstraints::condense, or
      2. @@ -457,8 +457,8 @@

        Most programs built on deal.II use the DoFTools::make_sparsity_pattern function to allocate a DynamicSparsityPattern that takes constraints into account. The system matrix then uses a SparsityPattern copied over from the DynamicSparsityPattern. This method is explained in step-2 and used in most tutorial programs.

        The early tutorial programs use first or second degree finite elements, so removing entries in the sparsity pattern corresponding to constrained degrees of freedom does not have a large impact on the overall number of zeros explicitly stored by the matrix. However, since as many as a third of the degrees of freedom may be constrained in an hp-discretization (and, with higher degree elements, these constraints can couple one DoF to as many as ten or twenty other DoFs), it is worthwhile to take these constraints into consideration since the resulting matrix will be much sparser (and, therefore, matrix-vector products or factorizations will be substantially faster too).

        Eliminating constrained degrees of freedom

        -

        A second problem particular to $hp$-methods arises because we have so many constrained degrees of freedom: typically up to about one third of all degrees of freedom (in 3d) are constrained because they either belong to cells with hanging nodes or because they are on cells adjacent to cells with a higher or lower polynomial degree. This is, in fact, not much more than the fraction of constrained degrees of freedom in non- $hp$-mode, but the difference is that each constrained hanging node is constrained not only against the two adjacent degrees of freedom, but is constrained against many more degrees of freedom.

        -

        It turns out that the strategy presented first in step-6 to eliminate the constraints while computing the element matrices and vectors with AffineConstraints::distribute_local_to_global is the most efficient approach also for this case. The alternative strategy to first build the matrix without constraints and then "condensing" away constrained degrees of freedom is considerably more expensive. It turns out that building the sparsity pattern by this inefficient algorithm requires at least ${\cal O}(N \log N)$ in the number of unknowns, whereas an ideal finite element program would of course only have algorithms that are linear in the number of unknowns. Timing the sparsity pattern creation as well as the matrix assembly shows that the algorithm presented in step-6 (and used in the code below) is indeed faster.

        +

        A second problem particular to $hp$-methods arises because we have so many constrained degrees of freedom: typically up to about one third of all degrees of freedom (in 3d) are constrained because they either belong to cells with hanging nodes or because they are on cells adjacent to cells with a higher or lower polynomial degree. This is, in fact, not much more than the fraction of constrained degrees of freedom in non- $hp$-mode, but the difference is that each constrained hanging node is constrained not only against the two adjacent degrees of freedom, but is constrained against many more degrees of freedom.

        +

        It turns out that the strategy presented first in step-6 to eliminate the constraints while computing the element matrices and vectors with AffineConstraints::distribute_local_to_global is the most efficient approach also for this case. The alternative strategy to first build the matrix without constraints and then "condensing" away constrained degrees of freedom is considerably more expensive. It turns out that building the sparsity pattern by this inefficient algorithm requires at least ${\cal O}(N \log N)$ in the number of unknowns, whereas an ideal finite element program would of course only have algorithms that are linear in the number of unknowns. Timing the sparsity pattern creation as well as the matrix assembly shows that the algorithm presented in step-6 (and used in the code below) is indeed faster.

        In our program, we will also treat the boundary conditions as (possibly inhomogeneous) constraints and eliminate the matrix rows and columns to those as well. All we have to do for this is to call the function that interpolates the Dirichlet boundary conditions already in the setup phase in order to tell the AffineConstraints object about them, and then do the transfer from local to global data on matrix and vector simultaneously. This is exactly what we've shown in step-6.

        The test case

        The test case we will solve with this program is a re-take of the one we already look at in step-14: we solve the Laplace equation

        @@ -466,8 +466,8 @@ -\Delta u = f \]" src="form_3722.png"/>

        -

        in 2d, with $f=(x+1)(y+1)$, and with zero Dirichlet boundary values for $u$. We do so on the domain $[-1,1]^2\backslash[-\frac 12,\frac 12]^2$, i.e., a square with a square hole in the middle.

        -

        The difference to step-14 is of course that we use $hp$-finite elements for the solution. The test case is of interest because it has re-entrant corners in the corners of the hole, at which the solution has singularities. We therefore expect that the solution will be smooth in the interior of the domain, and rough in the vicinity of the singularities. The hope is that our refinement and smoothness indicators will be able to see this behavior and refine the mesh close to the singularities, while the polynomial degree is increased away from it. As we will see in the results section, this is indeed the case.

        +

        in 2d, with $f=(x+1)(y+1)$, and with zero Dirichlet boundary values for $u$. We do so on the domain $[-1,1]^2\backslash[-\frac 12,\frac 12]^2$, i.e., a square with a square hole in the middle.

        +

        The difference to step-14 is of course that we use $hp$-finite elements for the solution. The test case is of interest because it has re-entrant corners in the corners of the hole, at which the solution has singularities. We therefore expect that the solution will be smooth in the interior of the domain, and rough in the vicinity of the singularities. The hope is that our refinement and smoothness indicators will be able to see this behavior and refine the mesh close to the singularities, while the polynomial degree is increased away from it. As we will see in the results section, this is indeed the case.

        The commented program

        Include files

        The first few files have already been covered in previous examples and will thus not be further commented on.

        @@ -492,7 +492,7 @@
          #include <deal.II/numerics/error_estimator.h>
         
        const ::Triangulation< dim, spacedim > & tria
        /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_28.html differs (JavaScript source, ASCII text, with very long lines (2070)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_28.html 2023-11-25 15:26:01.686520207 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_28.html 2023-11-25 15:26:01.686520207 +0100 @@ -172,8 +172,8 @@

        Introduction

        In this example, we intend to solve the multigroup diffusion approximation of the neutron transport equation. Essentially, the way to view this is as follows: In a nuclear reactor, neutrons are speeding around at different energies, get absorbed or scattered, or start a new fission event. If viewed at long enough length scales, the movement of neutrons can be considered a diffusion process.

        -

        A mathematical description of this would group neutrons into energy bins, and consider the balance equations for the neutron fluxes in each of these bins, or energy groups. The scattering, absorption, and fission events would then be operators within the diffusion equation describing the neutron fluxes. Assume we have energy groups $g=1,\ldots,G$, where by convention we assume that the neutrons with the highest energy are in group 1 and those with the lowest energy in group $G$. Then the neutron flux of each group satisfies the following equations:

        -\begin{eqnarray*}
+<p>A mathematical description of this would group neutrons into energy bins, and consider the balance equations for the neutron fluxes in each of these bins, or energy groups. The scattering, absorption, and fission events would then be operators within the diffusion equation describing the neutron fluxes. Assume we have energy groups <picture><source srcset=$g=1,\ldots,G$, where by convention we assume that the neutrons with the highest energy are in group 1 and those with the lowest energy in group $G$. Then the neutron flux of each group satisfies the following equations:

        +\begin{eqnarray*}
 \frac 1{v_g}\frac{\partial \phi_g(x,t)}{\partial t}
 &=&
 \nabla \cdot(D_g(x) \nabla \phi_g(x,t))
@@ -187,25 +187,25 @@
 \sum_{g'\ne g}\Sigma_{s,g'\to g}(x)\phi_{g'}(x,t)
 +
 s_{\mathrm{ext},g}(x,t)
-\end{eqnarray*} +\end{eqnarray*}" src="form_3729.png"/>

        -

        augmented by appropriate boundary conditions. Here, $v_g$ is the velocity of neutrons within group $g$. In other words, the change in time in flux of neutrons in group $g$ is governed by the following processes:

          +

          augmented by appropriate boundary conditions. Here, $v_g$ is the velocity of neutrons within group $g$. In other words, the change in time in flux of neutrons in group $g$ is governed by the following processes:

          • -Diffusion $\nabla \cdot(D_g(x) \nabla \phi_g(x,t))$. Here, $D_g$ is the (spatially variable) diffusion coefficient.
          • +Diffusion $\nabla \cdot(D_g(x) \nabla \phi_g(x,t))$. Here, $D_g$ is the (spatially variable) diffusion coefficient.
          • -Absorption $\Sigma_{r,g}(x)\phi_g(x,t)$ (note the negative sign). The coefficient $\Sigma_{r,g}$ is called the removal cross section.
          • +Absorption $\Sigma_{r,g}(x)\phi_g(x,t)$ (note the negative sign). The coefficient $\Sigma_{r,g}$ is called the removal cross section.
          • -Nuclear fission $\chi_g\sum_{g'=1}^G\nu\Sigma_{f,g'}(x)\phi_{g'}(x,t)$. The production of neutrons of energy $g$ is proportional to the flux of neutrons of energy $g'$ times the probability $\Sigma_{f,g'}$ that neutrons of energy $g'$ cause a fission event times the number $\nu$ of neutrons produced in each fission event times the probability that a neutron produced in this event has energy $g$. $\nu\Sigma_{f,g'}$ is called the fission cross section and $\chi_g$ the fission spectrum. We will denote the term $\chi_g\nu\Sigma_{f,g'}$ as the fission distribution cross section in the program.
          • +Nuclear fission $\chi_g\sum_{g'=1}^G\nu\Sigma_{f,g'}(x)\phi_{g'}(x,t)$. The production of neutrons of energy $g$ is proportional to the flux of neutrons of energy $g'$ times the probability $\Sigma_{f,g'}$ that neutrons of energy $g'$ cause a fission event times the number $\nu$ of neutrons produced in each fission event times the probability that a neutron produced in this event has energy $g$. $\nu\Sigma_{f,g'}$ is called the fission cross section and $\chi_g$ the fission spectrum. We will denote the term $\chi_g\nu\Sigma_{f,g'}$ as the fission distribution cross section in the program.
          • -Scattering $\sum_{g'\ne g}\Sigma_{s,g'\to g}(x)\phi_{g'}(x,t)$ of neutrons of energy $g'$ producing neutrons of energy $g$. $\Sigma_{s,g'\to g}$ is called the scattering cross section. The case of elastic, in-group scattering $g'=g$ exists, too, but we subsume this into the removal cross section. The case $g'<g$ is called down-scattering, since a neutron loses energy in such an event. On the other hand, $g'>g$ corresponds to up-scattering: a neutron gains energy in a scattering event from the thermal motion of the atoms surrounding it; up-scattering is therefore only an important process for neutrons with kinetic energies that are already on the same order as the thermal kinetic energy (i.e. in the sub $eV$ range).
          • +Scattering $\sum_{g'\ne g}\Sigma_{s,g'\to g}(x)\phi_{g'}(x,t)$ of neutrons of energy $g'$ producing neutrons of energy $g$. $\Sigma_{s,g'\to g}$ is called the scattering cross section. The case of elastic, in-group scattering $g'=g$ exists, too, but we subsume this into the removal cross section. The case $g'<g$ is called down-scattering, since a neutron loses energy in such an event. On the other hand, $g'>g$ corresponds to up-scattering: a neutron gains energy in a scattering event from the thermal motion of the atoms surrounding it; up-scattering is therefore only an important process for neutrons with kinetic energies that are already on the same order as the thermal kinetic energy (i.e. in the sub $eV$ range).
          • -An extraneous source $s_{\mathrm{ext},g}$.
          • +An extraneous source $s_{\mathrm{ext},g}$.

          For realistic simulations in reactor analysis, one may want to split the continuous spectrum of neutron energies into many energy groups, often up to 100. However, if neutron energy spectra are known well enough for some type of reactor (for example Pressurized Water Reactors, PWR), it is possible to obtain satisfactory results with only 2 energy groups.

          -

          In the program shown in this tutorial program, we provide the structure to compute with as many energy groups as desired. However, to keep computing times moderate and in order to avoid tabulating hundreds of coefficients, we only provide the coefficients for above equations for a two-group simulation, i.e. $g=1,2$. We do, however, consider a realistic situation by assuming that the coefficients are not constant, but rather depend on the materials that are assembled into reactor fuel assemblies in rather complicated ways (see below).

          +

          In the program shown in this tutorial program, we provide the structure to compute with as many energy groups as desired. However, to keep computing times moderate and in order to avoid tabulating hundreds of coefficients, we only provide the coefficients for above equations for a two-group simulation, i.e. $g=1,2$. We do, however, consider a realistic situation by assuming that the coefficients are not constant, but rather depend on the materials that are assembled into reactor fuel assemblies in rather complicated ways (see below).

          The eigenvalue problem

          If we consider all energy groups at once, we may write above equations in the following operator form:

          -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
 \frac 1v \frac{\partial \phi}{\partial t}
 =
 -L\phi
@@ -215,64 +215,64 @@
 X\phi
 +
 s_{\mathrm{ext}},
-\end{eqnarray*} +\end{eqnarray*}" src="form_3749.png"/>

          -

          where $L,F,X$ are sinking, fission, and scattering operators, respectively. $L$ here includes both the diffusion and removal terms. Note that $L$ is symmetric, whereas $F$ and $X$ are not.

          -

          It is well known that this equation admits a stable solution if all eigenvalues of the operator $-L+F+X$ are negative. This can be readily seen by multiplying the equation by $\phi$ and integrating over the domain, leading to

          -\begin{eqnarray*}
+<p> where <picture><source srcset=$L,F,X$ are sinking, fission, and scattering operators, respectively. $L$ here includes both the diffusion and removal terms. Note that $L$ is symmetric, whereas $F$ and $X$ are not.

          +

          It is well known that this equation admits a stable solution if all eigenvalues of the operator $-L+F+X$ are negative. This can be readily seen by multiplying the equation by $\phi$ and integrating over the domain, leading to

          +\begin{eqnarray*}
   \frac 1{2v} \frac{\partial}{\partial t}  \|\phi\|^2 = ((-L+F+X)\phi,\phi).
-\end{eqnarray*} +\end{eqnarray*}" src="form_3752.png"/>

          Stability means that the solution does not grow, i.e. we want the left hand side to be less than zero, which is the case if the eigenvalues of the operator on the right are all negative. For obvious reasons, it is not very desirable if a nuclear reactor produces neutron fluxes that grow exponentially, so eigenvalue analyses are the bread-and-butter of nuclear engineers. The main point of the program is therefore to consider the eigenvalue problem

          -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   (L-F-X) \phi = \lambda \phi,
-\end{eqnarray*} +\end{eqnarray*}" src="form_3753.png"/>

          -

          where we want to make sure that all eigenvalues are positive. Note that $L$, being the diffusion operator plus the absorption (removal), is positive definite; the condition that all eigenvalues are positive therefore means that we want to make sure that fission and inter-group scattering are weak enough to not shift the spectrum into the negative.

          -

          In nuclear engineering, one typically looks at a slightly different formulation of the eigenvalue problem. To this end, we do not just multiply with $\phi$ and integrate, but rather multiply with $\phi(L-X)^{-1}$. We then get the following evolution equation:

          -\begin{eqnarray*}
+<p> where we want to make sure that all eigenvalues are positive. Note that <picture><source srcset=$L$, being the diffusion operator plus the absorption (removal), is positive definite; the condition that all eigenvalues are positive therefore means that we want to make sure that fission and inter-group scattering are weak enough to not shift the spectrum into the negative.

          +

          In nuclear engineering, one typically looks at a slightly different formulation of the eigenvalue problem. To this end, we do not just multiply with $\phi$ and integrate, but rather multiply with $\phi(L-X)^{-1}$. We then get the following evolution equation:

          +\begin{eqnarray*}
   \frac 1{2v} \frac{\partial}{\partial t}  \|\phi\|^2_{(L-X)^{-1}} = ((L-X)^{-1}(-L+F+X)\phi,\phi).
-\end{eqnarray*} +\end{eqnarray*}" src="form_3755.png"/>

          Stability is then guaranteed if the eigenvalues of the following problem are all negative:

          -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   (L-X)^{-1}(-L+F+X)\phi = \lambda_F \phi,
-\end{eqnarray*} +\end{eqnarray*}" src="form_3756.png"/>

          which is equivalent to the eigenvalue problem

          -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   (L-X)\phi = \frac 1{\lambda_F+1} F \phi.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3757.png"/>

          The typical formulation in nuclear engineering is to write this as

          -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   (L-X) \phi = \frac 1{k_{\mathrm{eff}}} F \phi,
-\end{eqnarray*} +\end{eqnarray*}" src="form_3758.png"/>

          -

          where $k_{\mathrm{eff}}=\frac 1{\lambda^F+1}$. Intuitively, $k_{\mathrm{eff}}$ is something like the multiplication factor for neutrons per typical time scale and should be less than or equal to one for stable operation of a reactor: if it is less than one, the chain reaction will die down, whereas nuclear bombs for example have a $k$-eigenvalue larger than one. A stable reactor should have $k_{\mathrm{eff}}=1$.

          -

          For those who wonder how this can be achieved in practice without inadvertently getting slightly larger than one and triggering a nuclear bomb: first, fission processes happen on different time scales. While most neutrons are released very quickly after a fission event, a small number of neutrons are only released by daughter nuclei after several further decays, up to 10-60 seconds after the fission was initiated. If one is therefore slightly beyond $k_{\mathrm{eff}}=1$, one therefore has many seconds to react until all the neutrons created in fission re-enter the fission cycle. Nevertheless, control rods in nuclear reactors absorbing neutrons – and therefore reducing $k_{\mathrm{eff}}$ – are designed in such a way that they are all the way in the reactor in at most 2 seconds.

          -

          One therefore has on the order of 10-60 seconds to regulate the nuclear reaction if $k_{\mathrm{eff}}$ should be larger than one for some time, as indicated by a growing neutron flux. Regulation can be achieved by continuously monitoring the neutron flux, and if necessary increase or reduce neutron flux by moving neutron-absorbing control rods a few millimeters into or out of the reactor. On a longer scale, the water cooling the reactor contains boron, a good neutron absorber. Every few hours, boron concentrations are adjusted by adding boron or diluting the coolant.

          +

          where $k_{\mathrm{eff}}=\frac 1{\lambda^F+1}$. Intuitively, $k_{\mathrm{eff}}$ is something like the multiplication factor for neutrons per typical time scale and should be less than or equal to one for stable operation of a reactor: if it is less than one, the chain reaction will die down, whereas nuclear bombs for example have a $k$-eigenvalue larger than one. A stable reactor should have $k_{\mathrm{eff}}=1$.

          +

          For those who wonder how this can be achieved in practice without inadvertently getting slightly larger than one and triggering a nuclear bomb: first, fission processes happen on different time scales. While most neutrons are released very quickly after a fission event, a small number of neutrons are only released by daughter nuclei after several further decays, up to 10-60 seconds after the fission was initiated. If one is therefore slightly beyond $k_{\mathrm{eff}}=1$, one therefore has many seconds to react until all the neutrons created in fission re-enter the fission cycle. Nevertheless, control rods in nuclear reactors absorbing neutrons – and therefore reducing $k_{\mathrm{eff}}$ – are designed in such a way that they are all the way in the reactor in at most 2 seconds.

          +

          One therefore has on the order of 10-60 seconds to regulate the nuclear reaction if $k_{\mathrm{eff}}$ should be larger than one for some time, as indicated by a growing neutron flux. Regulation can be achieved by continuously monitoring the neutron flux, and if necessary increase or reduce neutron flux by moving neutron-absorbing control rods a few millimeters into or out of the reactor. On a longer scale, the water cooling the reactor contains boron, a good neutron absorber. Every few hours, boron concentrations are adjusted by adding boron or diluting the coolant.

          Finally, some of the absorption and scattering reactions have some stability built in; for example, higher neutron fluxes result in locally higher temperatures, which lowers the density of water and therefore reduces the number of scatterers that are necessary to moderate neutrons from high to low energies before they can start fission events themselves.

          -

          In this tutorial program, we solve above $k$-eigenvalue problem for two energy groups, and we are looking for the largest multiplication factor $k_{\mathrm{eff}}$, which is proportional to the inverse of the minimum eigenvalue plus one. To solve the eigenvalue problem, we generally use a modified version of the inverse power method. The algorithm looks like this:

          +

          In this tutorial program, we solve above $k$-eigenvalue problem for two energy groups, and we are looking for the largest multiplication factor $k_{\mathrm{eff}}$, which is proportional to the inverse of the minimum eigenvalue plus one. To solve the eigenvalue problem, we generally use a modified version of the inverse power method. The algorithm looks like this:

          1. -

            Initialize $\phi_g$ and $k_{\mathrm{eff}}$ with $\phi_g^{(0)}$ and $k_{\mathrm{eff}}^{(0)}$ and let $n=1$.

            +

            Initialize $\phi_g$ and $k_{\mathrm{eff}}$ with $\phi_g^{(0)}$ and $k_{\mathrm{eff}}^{(0)}$ and let $n=1$.

          2. Define the so-called fission source by

            -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
     s_f^{(n-1)}(x)
     =
     \frac{1}{k_{\mathrm{eff}}^{(n-1)}}
     \sum_{g'=1}^G\nu\Sigma_{f,g'}(x)\phi_{g'}^{(n-1)}(x).
-  \end{eqnarray*} + \end{eqnarray*}" src="form_3766.png"/>

          3. -

            Solve for all group fluxes $\phi_g,g=1,\ldots,G$ using

            -\begin{eqnarray*}
+<p class=Solve for all group fluxes $\phi_g,g=1,\ldots,G$ using

            +\begin{eqnarray*}
     -\nabla \cdot D_g\nabla \phi_g^{(n)}
     +
     \Sigma_{r,g}\phi_g^{(n)}
@@ -282,111 +282,111 @@
     \sum_{g'< g} \Sigma_{s,g'\to g} \phi_{g'}^{(n)}
     +
     \sum_{g'> g}\Sigma_{s,g'\to g}\phi_{g'}^{(n-1)}.
-  \end{eqnarray*} + \end{eqnarray*}" src="form_3768.png"/>

          4. Update

            -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
     k_{\mathrm{eff}}^{(n)}
     =
     \sum_{g'=1}^G
     \int_{\Omega}\nu\Sigma_{f,g'}(x)
     \phi_{g'}^{(n)}(x)dx.
-  \end{eqnarray*} + \end{eqnarray*}" src="form_3769.png"/>

          5. -Compare $k_{\mathrm{eff}}^{(n)}$ with $k_{\mathrm{eff}}^{(n-1)}$. If the change greater than a prescribed tolerance then set $n=n+1$ repeat the iteration starting at step 2, otherwise end the iteration.
          6. +Compare $k_{\mathrm{eff}}^{(n)}$ with $k_{\mathrm{eff}}^{(n-1)}$. If the change greater than a prescribed tolerance then set $n=n+1$ repeat the iteration starting at step 2, otherwise end the iteration.
          -

          Note that in this scheme, we do not solve group fluxes exactly in each power iteration, but rather consider previously compute $\phi_{g'}^{(n)}$ only for down-scattering events $g'<g$. Up-scattering is only treated by using old iterators $\phi_{g'}^{(n-1)}$, in essence assuming that the scattering operator is triangular. This is physically motivated since up-scattering does not play a too important role in neutron scattering. In addition, practices shows that the inverse power iteration is stable even using this simplification.

          +

          Note that in this scheme, we do not solve group fluxes exactly in each power iteration, but rather consider previously compute $\phi_{g'}^{(n)}$ only for down-scattering events $g'<g$. Up-scattering is only treated by using old iterators $\phi_{g'}^{(n-1)}$, in essence assuming that the scattering operator is triangular. This is physically motivated since up-scattering does not play a too important role in neutron scattering. In addition, practices shows that the inverse power iteration is stable even using this simplification.

          Note also that one can use lots of extrapolation techniques to accelerate the power iteration laid out above. However, none of these are implemented in this example.

          Meshes and mesh refinement

          -

          One may wonder whether it is appropriate to solve for the solutions of the individual energy group equations on the same meshes. The question boils down to this: will $\phi_g$ and $\phi_{g'}$ have similar smoothness properties? If this is the case, then it is appropriate to use the same mesh for the two; a typical application could be chemical combustion, where typically the concentrations of all or most chemical species change rapidly within the flame front. As it turns out, and as will be apparent by looking at the graphs shown in the results section of this tutorial program, this isn't the case here, however: since the diffusion coefficient is different for different energy groups, fast neutrons (in bins with a small group number $g$) have a very smooth flux function, whereas slow neutrons (in bins with a large group number) are much more affected by the local material properties and have a correspondingly rough solution if the coefficient are rough as in the case we compute here. Consequently, we will want to use different meshes to compute each energy group.

          -

          This has two implications that we will have to consider: First, we need to find a way to refine the meshes individually. Second, assembling the source terms for the inverse power iteration, where we have to integrate solution $\phi_{g'}^{(n)}$ defined on mesh $g'$ against the shape functions defined on mesh $g$, becomes a much more complicated task.

          +

          One may wonder whether it is appropriate to solve for the solutions of the individual energy group equations on the same meshes. The question boils down to this: will $\phi_g$ and $\phi_{g'}$ have similar smoothness properties? If this is the case, then it is appropriate to use the same mesh for the two; a typical application could be chemical combustion, where typically the concentrations of all or most chemical species change rapidly within the flame front. As it turns out, and as will be apparent by looking at the graphs shown in the results section of this tutorial program, this isn't the case here, however: since the diffusion coefficient is different for different energy groups, fast neutrons (in bins with a small group number $g$) have a very smooth flux function, whereas slow neutrons (in bins with a large group number) are much more affected by the local material properties and have a correspondingly rough solution if the coefficient are rough as in the case we compute here. Consequently, we will want to use different meshes to compute each energy group.

          +

          This has two implications that we will have to consider: First, we need to find a way to refine the meshes individually. Second, assembling the source terms for the inverse power iteration, where we have to integrate solution $\phi_{g'}^{(n)}$ defined on mesh $g'$ against the shape functions defined on mesh $g$, becomes a much more complicated task.

          Mesh refinement

          We use the usual paradigm: solve on a given mesh, then evaluate an error indicator for each cell of each mesh we have. Because it is so convenient, we again use the a posteriori error estimator by Kelly, Gago, Zienkiewicz and Babuska which approximates the error per cell by integrating the jump of the gradient of the solution along the faces of each cell. Using this, we obtain indicators

          -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
 \eta_{g,K}, \qquad g=1,2,\ldots,G,\qquad K\in{\cal T}_g,
-\end{eqnarray*} +\end{eqnarray*}" src="form_3776.png"/>

          -

          where ${\cal T}_g$ is the triangulation used in the solution of $\phi_g$. The question is what to do with this. For one, it is clear that refining only those cells with the highest error indicators might lead to bad results. To understand this, it is important to realize that $\eta_{g,K}$ scales with the second derivative of $\phi_g$. In other words, if we have two energy groups $g=1,2$ whose solutions are equally smooth but where one is larger by a factor of 10,000, for example, then only the cells of that mesh will be refined, whereas the mesh for the solution of small magnitude will remain coarse. This is probably not what one wants, since we can consider both components of the solution equally important.

          -

          In essence, we would therefore have to scale $\eta_{g,K}$ by an importance factor $z_g$ that says how important it is to resolve $\phi_g$ to any given accuracy. Such important factors can be computed using duality techniques (see, for example, the step-14 tutorial program, and the reference to the book by Bangerth and Rannacher cited there). We won't go there, however, and simply assume that all energy groups are equally important, and will therefore normalize the error indicators $\eta_{g,K}$ for group $g$ by the maximum of the solution $\phi_g$. We then refine the cells whose errors satisfy

          -\begin{eqnarray*}
+<p> where <picture><source srcset=${\cal T}_g$ is the triangulation used in the solution of $\phi_g$. The question is what to do with this. For one, it is clear that refining only those cells with the highest error indicators might lead to bad results. To understand this, it is important to realize that $\eta_{g,K}$ scales with the second derivative of $\phi_g$. In other words, if we have two energy groups $g=1,2$ whose solutions are equally smooth but where one is larger by a factor of 10,000, for example, then only the cells of that mesh will be refined, whereas the mesh for the solution of small magnitude will remain coarse. This is probably not what one wants, since we can consider both components of the solution equally important.

          +

          In essence, we would therefore have to scale $\eta_{g,K}$ by an importance factor $z_g$ that says how important it is to resolve $\phi_g$ to any given accuracy. Such important factors can be computed using duality techniques (see, for example, the step-14 tutorial program, and the reference to the book by Bangerth and Rannacher cited there). We won't go there, however, and simply assume that all energy groups are equally important, and will therefore normalize the error indicators $\eta_{g,K}$ for group $g$ by the maximum of the solution $\phi_g$. We then refine the cells whose errors satisfy

          +\begin{eqnarray*}
   \frac{\eta_{g,K}}{\|\phi_g\|_\infty}
   >
   \alpha_1
   \displaystyle{\max_{\begin{matrix}1\le g\le G \\ K\in {\cal T}_g\end{matrix}}
     \frac{\eta_{g,K}}{\|\phi_g\|_\infty}}
-\end{eqnarray*} +\end{eqnarray*}" src="form_3780.png"/>

          and coarsen the cells where

          -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   \frac{\eta_{g,K}}{\|\phi_g\|_\infty}
/usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_29.html differs (JavaScript source, Unicode text, UTF-8 text, with very long lines (2264))
--- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_29.html	2023-11-25 15:26:01.713186332 +0100
+++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_29.html	2023-11-25 15:26:01.713186332 +0100
@@ -137,43 +137,43 @@
 <p><a class=

          Problem setting

          The original purpose of this program is to simulate the focusing properties of an ultrasound wave generated by a transducer lens with variable geometry. Recent applications in medical imaging use ultrasound waves not only for imaging purposes, but also to excite certain local effects in a material, like changes in optical properties, that can then be measured by other imaging techniques. A vital ingredient for these methods is the ability to focus the intensity of the ultrasound wave in a particular part of the material, ideally in a point, to be able to examine the properties of the material at that particular location.

          To derive a model for this problem, we think of ultrasound as a pressure wave governed by the wave equation:

          -\[
+<picture><source srcset=\[
         \frac{\partial^2 U}{\partial t^2}       -       c^2 \Delta U = 0
-\] +\]" src="form_3832.png"/>

          -

          where $c$ is the wave speed (that for simplicity we assume to be constant), $U
-= U(x,t),\;x \in \Omega,\;t\in\mathrm{R}$. The boundary $\Gamma=\partial\Omega$ is divided into two parts $\Gamma_1$ and $\Gamma_2=\Gamma\setminus\Gamma_1$, with $\Gamma_1$ representing the transducer lens and $\Gamma_2$ an absorbing boundary (that is, we want to choose boundary conditions on $\Gamma_2$ in such a way that they imitate a larger domain). On $\Gamma_1$, the transducer generates a wave of constant frequency ${\omega}>0$ and constant amplitude (that we chose to be 1 here):

          -\[
+<p> where <picture><source srcset=$c$ is the wave speed (that for simplicity we assume to be constant), $U
+= U(x,t),\;x \in \Omega,\;t\in\mathrm{R}$. The boundary $\Gamma=\partial\Omega$ is divided into two parts $\Gamma_1$ and $\Gamma_2=\Gamma\setminus\Gamma_1$, with $\Gamma_1$ representing the transducer lens and $\Gamma_2$ an absorbing boundary (that is, we want to choose boundary conditions on $\Gamma_2$ in such a way that they imitate a larger domain). On $\Gamma_1$, the transducer generates a wave of constant frequency ${\omega}>0$ and constant amplitude (that we chose to be 1 here):

          +\[
 U(x,t) = \cos{\omega t}, \qquad x\in \Gamma_1
-\] +\]" src="form_3838.png"/>

          -

          If there are no other (interior or boundary) sources, and since the only source has frequency $\omega$, then the solution admits a separation of variables of the form $U(x,t) = \textrm{Re}\left(u(x)\,e^{i\omega
-t})\right)$. The complex-valued function $u(x)$ describes the spatial dependency of amplitude and phase (relative to the source) of the waves of frequency ${\omega}$, with the amplitude being the quantity that we are interested in. By plugging this form of the solution into the wave equation, we see that for $u$ we have

          -\begin{eqnarray*}
+<p>If there are no other (interior or boundary) sources, and since the only source has frequency <picture><source srcset=$\omega$, then the solution admits a separation of variables of the form $U(x,t) = \textrm{Re}\left(u(x)\,e^{i\omega
+t})\right)$. The complex-valued function $u(x)$ describes the spatial dependency of amplitude and phase (relative to the source) of the waves of frequency ${\omega}$, with the amplitude being the quantity that we are interested in. By plugging this form of the solution into the wave equation, we see that for $u$ we have

          +\begin{eqnarray*}
 -\omega^2 u(x) - c^2\Delta u(x) &=& 0, \qquad x\in\Omega,\\
 u(x) &=& 1,  \qquad x\in\Gamma_1.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3843.png"/>

          -

          For finding suitable conditions on $\Gamma_2$ that model an absorbing boundary, consider a wave of the form $V(x,t)=e^{i(k\cdot x -\omega t)}$ with frequency ${\omega}$ traveling in direction $k\in {\mathrm{R}^2}$. In order for $V$ to solve the wave equation, $|k|={\frac{\omega}{c}}$ must hold. Suppose that this wave hits the boundary in $x_0\in\Gamma_2$ at a right angle, i.e. $n=\frac{k}{|k|}$ with $n$ denoting the outer unit normal of $\Omega$ in $x_0$. Then at $x_0$, this wave satisfies the equation

          -\[
+<p>For finding suitable conditions on <picture><source srcset=$\Gamma_2$ that model an absorbing boundary, consider a wave of the form $V(x,t)=e^{i(k\cdot x -\omega t)}$ with frequency ${\omega}$ traveling in direction $k\in {\mathrm{R}^2}$. In order for $V$ to solve the wave equation, $|k|={\frac{\omega}{c}}$ must hold. Suppose that this wave hits the boundary in $x_0\in\Gamma_2$ at a right angle, i.e. $n=\frac{k}{|k|}$ with $n$ denoting the outer unit normal of $\Omega$ in $x_0$. Then at $x_0$, this wave satisfies the equation

          +\[
 c (n\cdot\nabla V) + \frac{\partial V}{\partial t} = (i\, c\, |k| - i\, \omega) V = 0.
-\] +\]" src="form_3849.png"/>

          Hence, by enforcing the boundary condition

          -\[
+<picture><source srcset=\[
 c (n\cdot\nabla U) + \frac{\partial U}{\partial t} = 0, \qquad x\in\Gamma_2,
-\] +\]" src="form_3850.png"/>

          -

          waves that hit the boundary $\Gamma_2$ at a right angle will be perfectly absorbed. On the other hand, those parts of the wave field that do not hit a boundary at a right angle do not satisfy this condition and enforcing it as a boundary condition will yield partial reflections, i.e. only parts of the wave will pass through the boundary as if it wasn't here whereas the remaining fraction of the wave will be reflected back into the domain.

          -

          If we are willing to accept this as a sufficient approximation to an absorbing boundary we finally arrive at the following problem for $u$:

          -\begin{eqnarray*}
+<p> waves that hit the boundary <picture><source srcset=$\Gamma_2$ at a right angle will be perfectly absorbed. On the other hand, those parts of the wave field that do not hit a boundary at a right angle do not satisfy this condition and enforcing it as a boundary condition will yield partial reflections, i.e. only parts of the wave will pass through the boundary as if it wasn't here whereas the remaining fraction of the wave will be reflected back into the domain.

          +

          If we are willing to accept this as a sufficient approximation to an absorbing boundary we finally arrive at the following problem for $u$:

          +\begin{eqnarray*}
 -\omega^2 u - c^2\Delta u &=& 0, \qquad x\in\Omega,\\
 c (n\cdot\nabla u) + i\,\omega\,u &=&0, \qquad x\in\Gamma_2,\\
 u &=& 1,  \qquad x\in\Gamma_1.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3851.png"/>

          -

          This is a Helmholtz equation (similar to the one in step-7, but this time with ''the bad sign'') with Dirichlet data on $\Gamma_1$ and mixed boundary conditions on $\Gamma_2$. Because of the condition on $\Gamma_2$, we cannot just treat the equations for real and imaginary parts of $u$ separately. What we can do however is to view the PDE for $u$ as a system of two PDEs for the real and imaginary parts of $u$, with the boundary condition on $\Gamma_2$ representing the coupling terms between the two components of the system. This works along the following lines: Let $v=\textrm{Re}\;u,\; w=\textrm{Im}\;u$, then in terms of $v$ and $w$ we have the following system:

          -\begin{eqnarray*}
+<p> This is a Helmholtz equation (similar to the one in <a class=step-7, but this time with ''the bad sign'') with Dirichlet data on $\Gamma_1$ and mixed boundary conditions on $\Gamma_2$. Because of the condition on $\Gamma_2$, we cannot just treat the equations for real and imaginary parts of $u$ separately. What we can do however is to view the PDE for $u$ as a system of two PDEs for the real and imaginary parts of $u$, with the boundary condition on $\Gamma_2$ representing the coupling terms between the two components of the system. This works along the following lines: Let $v=\textrm{Re}\;u,\; w=\textrm{Im}\;u$, then in terms of $v$ and $w$ we have the following system:

          +\begin{eqnarray*}
   \left.\begin{array}{ccc}
     -\omega^2 v - c^2\Delta v &=& 0 \quad\\
     -\omega^2 w - c^2\Delta w &=& 0 \quad
@@ -188,26 +188,26 @@
     v &=& 1 \quad\\
     w &=& 0 \quad
   \end{array}\right\} &\;& x\in\Gamma_1.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3853.png"/>

          -

          For test functions $\phi,\psi$ with $\phi|_{\Gamma_1}=\psi|_{\Gamma_1}=0$, after the usual multiplication, integration over $\Omega$ and applying integration by parts, we get the weak formulation

          -\begin{eqnarray*}
+<p>For test functions <picture><source srcset=$\phi,\psi$ with $\phi|_{\Gamma_1}=\psi|_{\Gamma_1}=0$, after the usual multiplication, integration over $\Omega$ and applying integration by parts, we get the weak formulation

          +\begin{eqnarray*}
 -\omega^2 \langle \phi, v \rangle_{\mathrm{L}^2(\Omega)}
 + c^2 \langle \nabla \phi, \nabla v \rangle_{\mathrm{L}^2(\Omega)}
 - c \omega \langle \phi, w \rangle_{\mathrm{L}^2(\Gamma_2)} &=& 0, \\
 -\omega^2 \langle \psi, w \rangle_{\mathrm{L}^2(\Omega)}
 + c^2 \langle \nabla \psi, \nabla w \rangle_{\mathrm{L}^2(\Omega)}
 + c \omega \langle \psi, v \rangle_{\mathrm{L}^2(\Gamma_2)} &=& 0.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3856.png"/>

          -

          We choose finite element spaces $V_h$ and $W_h$ with bases $\{\phi_j\}_{j=1}^n,
-\{\psi_j\}_{j=1}^n$ and look for approximate solutions

          -\[
+<p>We choose finite element spaces <picture><source srcset=$V_h$ and $W_h$ with bases $\{\phi_j\}_{j=1}^n,
+\{\psi_j\}_{j=1}^n$ and look for approximate solutions

          +\[
 v_h = \sum_{j=1}^n \alpha_j \phi_j, \;\; w_h = \sum_{j=1}^n \beta_j \psi_j.
-\] +\]" src="form_3859.png"/>

          Plugging into the variational form yields the equation system

          -\[
+<picture><source srcset=\[
 \renewcommand{\arraystretch}{2.0}
 \left.\begin{array}{ccc}
 \sum_{j=1}^n
@@ -230,10 +230,10 @@
 \right)\alpha_j
 &=& 0
 \end{array}\right\}\;\;\forall\; i =1,\ldots,n.
-\] +\]" src="form_3860.png"/>

          In matrix notation:

          -\[
+<picture><source srcset=\[
 \renewcommand{\arraystretch}{2.0}
 \left(
 \begin{array}{cc}
@@ -256,12 +256,12 @@
 0 \\ 0
 \end{array}
 \right)
-\] +\]" src="form_3861.png"/>

          -

          (One should not be fooled by the right hand side being zero here, that is because we haven't included the Dirichlet boundary data yet.) Because of the alternating sign in the off-diagonal blocks, we can already see that this system is non-symmetric, in fact it is even indefinite. Of course, there is no necessity to choose the spaces $V_h$ and $W_h$ to be the same. However, we expect real and imaginary part of the solution to have similar properties and will therefore indeed take $V_h=W_h$ in the implementation, and also use the same basis functions $\phi_i = \psi_i$ for both spaces. The reason for the notation using different symbols is just that it allows us to distinguish between shape functions for $v$ and $w$, as this distinction plays an important role in the implementation.

          +

          (One should not be fooled by the right hand side being zero here, that is because we haven't included the Dirichlet boundary data yet.) Because of the alternating sign in the off-diagonal blocks, we can already see that this system is non-symmetric, in fact it is even indefinite. Of course, there is no necessity to choose the spaces $V_h$ and $W_h$ to be the same. However, we expect real and imaginary part of the solution to have similar properties and will therefore indeed take $V_h=W_h$ in the implementation, and also use the same basis functions $\phi_i = \psi_i$ for both spaces. The reason for the notation using different symbols is just that it allows us to distinguish between shape functions for $v$ and $w$, as this distinction plays an important role in the implementation.

          The test case

          -

          For the computations, we will consider wave propagation in the unit square, with ultrasound generated by a transducer lens that is shaped like a segment of the circle with center at $(0.5, d)$ and a radius slightly greater than $d$; this shape should lead to a focusing of the sound wave at the center of the circle. Varying $d$ changes the "focus" of the lens and affects the spatial distribution of the intensity of $u$, where our main concern is how well $|u|=\sqrt{v^2+w^2}$ is focused.

          -

          In the program below, we will implement the complex-valued Helmholtz equations using the formulation with split real and imaginary parts. We will also discuss how to generate a domain that looks like a square with a slight bulge simulating the transducer (in the UltrasoundProblem<dim>::make_grid() function), and how to generate graphical output that not only contains the solution components $v$ and $w$, but also the magnitude $\sqrt{v^2+w^2}$ directly in the output file (in UltrasoundProblem<dim>::output_results()). Finally, we use the ParameterHandler class to easily read parameters like the focal distance $d$, wave speed $c$, frequency $\omega$, and a number of other parameters from an input file at run-time, rather than fixing those parameters in the source code where we would have to re-compile every time we want to change parameters.

          +

          For the computations, we will consider wave propagation in the unit square, with ultrasound generated by a transducer lens that is shaped like a segment of the circle with center at $(0.5, d)$ and a radius slightly greater than $d$; this shape should lead to a focusing of the sound wave at the center of the circle. Varying $d$ changes the "focus" of the lens and affects the spatial distribution of the intensity of $u$, where our main concern is how well $|u|=\sqrt{v^2+w^2}$ is focused.

          +

          In the program below, we will implement the complex-valued Helmholtz equations using the formulation with split real and imaginary parts. We will also discuss how to generate a domain that looks like a square with a slight bulge simulating the transducer (in the UltrasoundProblem<dim>::make_grid() function), and how to generate graphical output that not only contains the solution components $v$ and $w$, but also the magnitude $\sqrt{v^2+w^2}$ directly in the output file (in UltrasoundProblem<dim>::output_results()). Finally, we use the ParameterHandler class to easily read parameters like the focal distance $d$, wave speed $c$, frequency $\omega$, and a number of other parameters from an input file at run-time, rather than fixing those parameters in the source code where we would have to re-compile every time we want to change parameters.

          The commented program

          Include files

          The following header files have all been discussed before:

          @@ -308,7 +308,7 @@

      The DirichletBoundaryValues class

      First we define a class for the function representing the Dirichlet boundary values. This has been done many times before and therefore does not need much explanation.

      -

      Since there are two values $v$ and $w$ that need to be prescribed at the boundary, we have to tell the base class that this is a vector-valued function with two components, and the vector_value function and its cousin vector_value_list must return vectors with two entries. In our case the function is very simple, it just returns 1 for the real part $v$ and 0 for the imaginary part $w$ regardless of the point where it is evaluated.

      +

      Since there are two values $v$ and $w$ that need to be prescribed at the boundary, we have to tell the base class that this is a vector-valued function with two components, and the vector_value function and its cousin vector_value_list must return vectors with two entries. In our case the function is very simple, it just returns 1 for the real part $v$ and 0 for the imaginary part $w$ regardless of the point where it is evaluated.

        template <int dim>
        class DirichletBoundaryValues : public Function<dim>
        {
      @@ -367,7 +367,7 @@

      The declare_parameters function declares all the parameters that our ParameterHandler object will be able to read from input files, along with their types, range conditions and the subsections they appear in. We will wrap all the entries that go into a section in a pair of braces to force the editor to indent them by one level, making it simpler to read which entries together form a section:

        void ParameterReader::declare_parameters()
        {
      -

      Parameters for mesh and geometry include the number of global refinement steps that are applied to the initial coarse mesh and the focal distance $d$ of the transducer lens. For the number of refinement steps, we allow integer values in the range $[0,\infty)$, where the omitted second argument to the Patterns::Integer object denotes the half-open interval. For the focal distance any number greater than zero is accepted:

      +

      Parameters for mesh and geometry include the number of global refinement steps that are applied to the initial coarse mesh and the focal distance $d$ of the transducer lens. For the number of refinement steps, we allow integer values in the range $[0,\infty)$, where the omitted second argument to the Patterns::Integer object denotes the half-open interval. For the focal distance any number greater than zero is accepted:

        prm.enter_subsection("Mesh & geometry parameters");
        {
        prm.declare_entry("Number of refinements",
      @@ -386,7 +386,7 @@
       
      -

      The next subsection is devoted to the physical parameters appearing in the equation, which are the frequency $\omega$ and wave speed $c$. Again, both need to lie in the half-open interval $[0,\infty)$ represented by calling the Patterns::Double class with only the left end-point as argument:

      +

      The next subsection is devoted to the physical parameters appearing in the equation, which are the frequency $\omega$ and wave speed $c$. Again, both need to lie in the half-open interval $[0,\infty)$ represented by calling the Patterns::Double class with only the left end-point as argument:

        prm.enter_subsection("Physical constants");
        {
        prm.declare_entry("c", "1.5e5", Patterns::Double(0), "Wave speed");
      @@ -424,8 +424,8 @@
       
       

      The ComputeIntensity class

      -

      As mentioned in the introduction, the quantity that we are really after is the spatial distribution of the intensity of the ultrasound wave, which corresponds to $|u|=\sqrt{v^2+w^2}$. Now we could just be content with having $v$ and $w$ in our output, and use a suitable visualization or postprocessing tool to derive $|u|$ from the solution we computed. However, there is also a way to output data derived from the solution in deal.II, and we are going to make use of this mechanism here.

      -

      So far we have always used the DataOut::add_data_vector function to add vectors containing output data to a DataOut object. There is a special version of this function that in addition to the data vector has an additional argument of type DataPostprocessor. What happens when this function is used for output is that at each point where output data is to be generated, the DataPostprocessor::evaluate_scalar_field() or DataPostprocessor::evaluate_vector_field() function of the specified DataPostprocessor object is invoked to compute the output quantities from the values, the gradients and the second derivatives of the finite element function represented by the data vector (in the case of face related data, normal vectors are available as well). Hence, this allows us to output any quantity that can locally be derived from the values of the solution and its derivatives. Of course, the ultrasound intensity $|u|$ is such a quantity and its computation doesn't even involve any derivatives of $v$ or $w$.

      +

      As mentioned in the introduction, the quantity that we are really after is the spatial distribution of the intensity of the ultrasound wave, which corresponds to $|u|=\sqrt{v^2+w^2}$. Now we could just be content with having $v$ and $w$ in our output, and use a suitable visualization or postprocessing tool to derive $|u|$ from the solution we computed. However, there is also a way to output data derived from the solution in deal.II, and we are going to make use of this mechanism here.

      +

      So far we have always used the DataOut::add_data_vector function to add vectors containing output data to a DataOut object. There is a special version of this function that in addition to the data vector has an additional argument of type DataPostprocessor. What happens when this function is used for output is that at each point where output data is to be generated, the DataPostprocessor::evaluate_scalar_field() or DataPostprocessor::evaluate_vector_field() function of the specified DataPostprocessor object is invoked to compute the output quantities from the values, the gradients and the second derivatives of the finite element function represented by the data vector (in the case of face related data, normal vectors are available as well). Hence, this allows us to output any quantity that can locally be derived from the values of the solution and its derivatives. Of course, the ultrasound intensity $|u|$ is such a quantity and its computation doesn't even involve any derivatives of $v$ or $w$.

      In practice, the DataPostprocessor class only provides an interface to this functionality, and we need to derive our own class from it in order to implement the functions specified by the interface. In the most general case one has to implement several member functions but if the output quantity is a single scalar then some of this boilerplate code can be handled by a more specialized class, DataPostprocessorScalar and we can derive from that one instead. This is what the ComputeIntensity class does:

        template <int dim>
        class ComputeIntensity : public DataPostprocessorScalar<dim>
      @@ -441,8 +441,8 @@
      virtual void evaluate_vector_field(const DataPostprocessorInputs::Vector< dim > &input_data, std::vector< Vector< double > > &computed_quantities) const
      -

      In the constructor, we need to call the constructor of the base class with two arguments. The first denotes the name by which the single scalar quantity computed by this class should be represented in output files. In our case, the postprocessor has $|u|$ as output, so we use "Intensity".

      -

      The second argument is a set of flags that indicate which data is needed by the postprocessor in order to compute the output quantities. This can be any subset of update_values, update_gradients and update_hessians (and, in the case of face data, also update_normal_vectors), which are documented in UpdateFlags. Of course, computation of the derivatives requires additional resources, so only the flags for data that are really needed should be given here, just as we do when we use FEValues objects. In our case, only the function values of $v$ and $w$ are needed to compute $|u|$, so we're good with the update_values flag.

      +

      In the constructor, we need to call the constructor of the base class with two arguments. The first denotes the name by which the single scalar quantity computed by this class should be represented in output files. In our case, the postprocessor has $|u|$ as output, so we use "Intensity".

      +

      The second argument is a set of flags that indicate which data is needed by the postprocessor in order to compute the output quantities. This can be any subset of update_values, update_gradients and update_hessians (and, in the case of face data, also update_normal_vectors), which are documented in UpdateFlags. Of course, computation of the derivatives requires additional resources, so only the flags for data that are really needed should be given here, just as we do when we use FEValues objects. In our case, only the function values of $v$ and $w$ are needed to compute $|u|$, so we're good with the update_values flag.

        template <int dim>
        ComputeIntensity<dim>::ComputeIntensity()
        : DataPostprocessorScalar<dim>("Intensity", update_values)
      @@ -450,7 +450,7 @@
       
       
      @ update_values
      Shape function values.
      -

      The actual postprocessing happens in the following function. Its input is an object that stores values of the function (which is here vector-valued) representing the data vector given to DataOut::add_data_vector, evaluated at all evaluation points where we generate output, and some tensor objects representing derivatives (that we don't use here since $|u|$ is computed from just $v$ and $w$). The derived quantities are returned in the computed_quantities vector. Remember that this function may only use data for which the respective update flag is specified by get_needed_update_flags. For example, we may not use the derivatives here, since our implementation of get_needed_update_flags requests that only function values are provided.

      +

      The actual postprocessing happens in the following function. Its input is an object that stores values of the function (which is here vector-valued) representing the data vector given to DataOut::add_data_vector, evaluated at all evaluation points where we generate output, and some tensor objects representing derivatives (that we don't use here since $|u|$ is computed from just $v$ and $w$). The derived quantities are returned in the computed_quantities vector. Remember that this function may only use data for which the respective update flag is specified by get_needed_update_flags. For example, we may not use the derivatives here, since our implementation of get_needed_update_flags requests that only function values are provided.

        template <int dim>
        void ComputeIntensity<dim>::evaluate_vector_field(
      @@ -458,7 +458,7 @@
        {
        AssertDimension(computed_quantities.size(), inputs.solution_values.size());
       
      -

      The computation itself is straightforward: We iterate over each entry in the output vector and compute $|u|$ from the corresponding values of $v$ and $w$. We do this by creating a complex number $u$ and then calling std::abs() on the result. (One may be tempted to call std::norm(), but in a historical quirk, the C++ committee decided that std::norm() should return the square of the absolute value – thereby not satisfying the properties mathematicians require of something called a "norm".)

      +

      The computation itself is straightforward: We iterate over each entry in the output vector and compute $|u|$ from the corresponding values of $v$ and $w$. We do this by creating a complex number $u$ and then calling std::abs() on the result. (One may be tempted to call std::norm(), but in a historical quirk, the C++ committee decided that std::norm() should return the square of the absolute value – thereby not satisfying the properties mathematicians require of something called a "norm".)

        for (unsigned int p = 0; p < computed_quantities.size(); ++p)
        {
        AssertDimension(computed_quantities[p].size(), 1);
      /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_3.html differs (JavaScript source, Unicode text, UTF-8 text, with very long lines (2467)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_3.html 2023-11-25 15:26:01.739852456 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_3.html 2023-11-25 15:26:01.739852456 +0100 @@ -135,99 +135,99 @@
      Note
      The material presented here is also discussed in video lecture 10. (All video lectures are also available here.)

      The basic set up of finite element methods

      This is the first example where we actually use finite elements to compute something. We will solve a simple version of Poisson's equation with zero boundary values, but a nonzero right hand side:

      -\begin{align*}
+<picture><source srcset=\begin{align*}
   -\Delta u &= f \qquad\qquad & \text{in}\ \Omega,
   \\
   u &= 0 \qquad\qquad & \text{on}\ \partial\Omega.
-\end{align*} +\end{align*}" src="form_3877.png"/>

      -

      We will solve this equation on the square, $\Omega=[-1,1]^2$, for which you've already learned how to generate a mesh in step-1 and step-2. In this program, we will also only consider the particular case $f(\mathbf x)=1$ and come back to how to implement the more general case in the next tutorial program, step-4.

      -

      If you've learned about the basics of the finite element method, you will remember the steps we need to take to approximate the solution $u$ by a finite dimensional approximation. Specifically, we first need to derive the weak form of the equation above, which we obtain by multiplying the equation by a test function $\varphi$ from the left (we will come back to the reason for multiplying from the left and not from the right below) and integrating over the domain $\Omega$:

      -\begin{align*}
+<p> We will solve this equation on the square, <picture><source srcset=$\Omega=[-1,1]^2$, for which you've already learned how to generate a mesh in step-1 and step-2. In this program, we will also only consider the particular case $f(\mathbf x)=1$ and come back to how to implement the more general case in the next tutorial program, step-4.

      +

      If you've learned about the basics of the finite element method, you will remember the steps we need to take to approximate the solution $u$ by a finite dimensional approximation. Specifically, we first need to derive the weak form of the equation above, which we obtain by multiplying the equation by a test function $\varphi$ from the left (we will come back to the reason for multiplying from the left and not from the right below) and integrating over the domain $\Omega$:

      +\begin{align*}
   -\int_\Omega \varphi \Delta u = \int_\Omega \varphi f.
-\end{align*} +\end{align*}" src="form_3880.png"/>

      This can be integrated by parts:

      -\begin{align*}
+<picture><source srcset=\begin{align*}
   \int_\Omega \nabla\varphi \cdot \nabla u
   -
   \int_{\partial\Omega} \varphi \mathbf{n}\cdot \nabla u
    = \int_\Omega \varphi f.
-\end{align*} +\end{align*}" src="form_3881.png"/>

      -

      The test function $\varphi$ has to satisfy the same kind of boundary conditions (in mathematical terms: it needs to come from the tangent space of the set in which we seek the solution), so on the boundary $\varphi=0$ and consequently the weak form we are looking for reads

      -\begin{align*}
+<p> The test function <picture><source srcset=$\varphi$ has to satisfy the same kind of boundary conditions (in mathematical terms: it needs to come from the tangent space of the set in which we seek the solution), so on the boundary $\varphi=0$ and consequently the weak form we are looking for reads

      +\begin{align*}
   (\nabla\varphi, \nabla u)
    = (\varphi, f),
-\end{align*} +\end{align*}" src="form_3883.png"/>

      -

      where we have used the common notation $(a,b)=\int_\Omega a\; b$. The problem then asks for a function $u$ for which this statement is true for all test functions $\varphi$ from the appropriate space (which here is the space $H^1$).

      -

      Of course we can't find such a function on a computer in the general case, and instead we seek an approximation $u_h(\mathbf x)=\sum_j U_j \varphi_j(\mathbf
-x)$, where the $U_j$ are unknown expansion coefficients we need to determine (the "degrees of freedom" of this problem), and $\varphi_i(\mathbf x)$ are the finite element shape functions we will use. To define these shape functions, we need the following:

      +

      where we have used the common notation $(a,b)=\int_\Omega a\; b$. The problem then asks for a function $u$ for which this statement is true for all test functions $\varphi$ from the appropriate space (which here is the space $H^1$).

      +

      Of course we can't find such a function on a computer in the general case, and instead we seek an approximation $u_h(\mathbf x)=\sum_j U_j \varphi_j(\mathbf
+x)$, where the $U_j$ are unknown expansion coefficients we need to determine (the "degrees of freedom" of this problem), and $\varphi_i(\mathbf x)$ are the finite element shape functions we will use. To define these shape functions, we need the following:

      • A mesh on which to define shape functions. You have already seen how to generate and manipulate the objects that describe meshes in step-1 and step-2.
      • -
      • A finite element that describes the shape functions we want to use on the reference cell (which in deal.II is always the unit interval $[0,1]$, the unit square $[0,1]^2$ or the unit cube $[0,1]^3$, depending on which space dimension you work in). In step-2, we had already used an object of type FE_Q<2>, which denotes the usual Lagrange elements that define shape functions by interpolation on support points. The simplest one is FE_Q<2>(1), which uses polynomial degree 1. In 2d, these are often referred to as bilinear, since they are linear in each of the two coordinates of the reference cell. (In 1d, they would be linear and in 3d tri-linear; however, in the deal.II documentation, we will frequently not make this distinction and simply always call these functions "linear".)
      • +
      • A finite element that describes the shape functions we want to use on the reference cell (which in deal.II is always the unit interval $[0,1]$, the unit square $[0,1]^2$ or the unit cube $[0,1]^3$, depending on which space dimension you work in). In step-2, we had already used an object of type FE_Q<2>, which denotes the usual Lagrange elements that define shape functions by interpolation on support points. The simplest one is FE_Q<2>(1), which uses polynomial degree 1. In 2d, these are often referred to as bilinear, since they are linear in each of the two coordinates of the reference cell. (In 1d, they would be linear and in 3d tri-linear; however, in the deal.II documentation, we will frequently not make this distinction and simply always call these functions "linear".)
      • A DoFHandler object that enumerates all the degrees of freedom on the mesh, taking the reference cell description the finite element object provides as the basis. You've also already seen how to do this in step-2.
      • A mapping that tells how the shape functions on the real cell are obtained from the shape functions defined by the finite element class on the reference cell. By default, unless you explicitly say otherwise, deal.II will use a (bi-, tri-)linear mapping for this, so in most cases you don't have to worry about this step.
      -

      Through these steps, we now have a set of functions $\varphi_i$, and we can define the weak form of the discrete problem: Find a function $u_h$, i.e., find the expansion coefficients $U_j$ mentioned above, so that

      -\begin{align*}
+<p>Through these steps, we now have a set of functions <picture><source srcset=$\varphi_i$, and we can define the weak form of the discrete problem: Find a function $u_h$, i.e., find the expansion coefficients $U_j$ mentioned above, so that

      +\begin{align*}
   (\nabla\varphi_i, \nabla u_h)
    = (\varphi_i, f),
    \qquad\qquad
    i=0\ldots N-1.
-\end{align*} +\end{align*}" src="form_3887.png"/>

      -

      Note that we here follow the convention that everything is counted starting at zero, as common in C and C++. This equation can be rewritten as a linear system if you insert the representation $u_h(\mathbf x)=\sum_j U_j
-\varphi_j(\mathbf x)$ and then observe that

      -\begin{align*}
+<p> Note that we here follow the convention that everything is counted starting at zero, as common in C and C++. This equation can be rewritten as a linear system if you insert the representation <picture><source srcset=$u_h(\mathbf x)=\sum_j U_j
+\varphi_j(\mathbf x)$ and then observe that

      +\begin{align*}
   (\nabla\varphi_i, \nabla u_h)
   &= \left(\nabla\varphi_i, \nabla \Bigl[\sum_j U_j \varphi_j\Bigr]\right)
 \\
   &= \sum_j \left(\nabla\varphi_i, \nabla \left[U_j \varphi_j\right]\right)
 \\
   &= \sum_j \left(\nabla\varphi_i, \nabla \varphi_j \right) U_j.
-\end{align*} +\end{align*}" src="form_3889.png"/>

      With this, the problem reads: Find a vector $U$ so that

      -\begin{align*}
+<picture><source srcset=\begin{align*}
   A U = F,
-\end{align*} +\end{align*}" src="form_3890.png"/>

      -

      where the matrix $A$ and the right hand side $F$ are defined as

      -\begin{align*}
+<p> where the matrix <picture><source srcset=$A$ and the right hand side $F$ are defined as

      +\begin{align*}
   A_{ij} &= (\nabla\varphi_i, \nabla \varphi_j),
   \\
   F_i &= (\varphi_i, f).
-\end{align*} +\end{align*}" src="form_3891.png"/>

      Should we multiply by a test function from the left or from the right?

      Before we move on with describing how these quantities can be computed, note that if we had multiplied the original equation from the right by a test function rather than from the left, then we would have obtained a linear system of the form

      -\begin{align*}
+<picture><source srcset=\begin{align*}
   U^T A = F^T
-\end{align*} +\end{align*}" src="form_3892.png"/>

      -

      with a row vector $F^T$. By transposing this system, this is of course equivalent to solving

      -\begin{align*}
+<p> with a row vector <picture><source srcset=$F^T$. By transposing this system, this is of course equivalent to solving

      +\begin{align*}
   A^T U = F
-\end{align*} +\end{align*}" src="form_3894.png"/>

      -

      which here is the same as above since $A=A^T$. But in general is not, and in order to avoid any sort of confusion, experience has shown that simply getting into the habit of multiplying the equation from the left rather than from the right (as is often done in the mathematical literature) avoids a common class of errors as the matrix is automatically correct and does not need to be transposed when comparing theory and implementation. See step-9 for the first example in this tutorial where we have a non-symmetric bilinear form for which it makes a difference whether we multiply from the right or from the left.

      +

      which here is the same as above since $A=A^T$. But in general is not, and in order to avoid any sort of confusion, experience has shown that simply getting into the habit of multiplying the equation from the left rather than from the right (as is often done in the mathematical literature) avoids a common class of errors as the matrix is automatically correct and does not need to be transposed when comparing theory and implementation. See step-9 for the first example in this tutorial where we have a non-symmetric bilinear form for which it makes a difference whether we multiply from the right or from the left.

      Assembling the matrix and right hand side vector

      -

      Now we know what we need (namely: objects that hold the matrix and vectors, as well as ways to compute $A_{ij},F_i$), and we can look at what it takes to make that happen:

      +

      Now we know what we need (namely: objects that hold the matrix and vectors, as well as ways to compute $A_{ij},F_i$), and we can look at what it takes to make that happen:

        -
      • The object for $A$ is of type SparseMatrix while those for $U$ and $F$ are of type Vector. We will see in the program below what classes are used to solve linear systems.
      • -
      • We need a way to form the integrals. In the finite element method, this is most commonly done using quadrature, i.e. the integrals are replaced by a weighted sum over a set of quadrature points on each cell. That is, we first split the integral over $\Omega$ into integrals over all cells,

        -\begin{align*}
+<li>The object for <picture><source srcset=$A$ is of type SparseMatrix while those for $U$ and $F$ are of type Vector. We will see in the program below what classes are used to solve linear systems.

      • +
      • We need a way to form the integrals. In the finite element method, this is most commonly done using quadrature, i.e. the integrals are replaced by a weighted sum over a set of quadrature points on each cell. That is, we first split the integral over $\Omega$ into integrals over all cells,

        +\begin{align*}
     A_{ij} &= (\nabla\varphi_i, \nabla \varphi_j)
     = \sum_{K \in {\mathbb T}} \int_K \nabla\varphi_i \cdot \nabla \varphi_j,
     \\
     F_i &= (\varphi_i, f)
     = \sum_{K \in {\mathbb T}} \int_K \varphi_i f,
-  \end{align*} + \end{align*}" src="form_3897.png"/>

        and then approximate each cell's contribution by quadrature:

        -\begin{align*}
+<picture><source srcset=\begin{align*}
     A^K_{ij} &=
     \int_K \nabla\varphi_i \cdot \nabla \varphi_j
     \approx
@@ -238,11 +238,11 @@
     \int_K \varphi_i f
     \approx
     \sum_q \varphi_i(\mathbf x^K_q) f(\mathbf x^K_q) w^K_q,
-  \end{align*} + \end{align*}" src="form_3898.png"/>

        - where $\mathbb{T} \approx \Omega$ is a Triangulation approximating the domain, $\mathbf x^K_q$ is the $q$th quadrature point on cell $K$, and $w^K_q$ the $q$th quadrature weight. There are different parts to what is needed in doing this, and we will discuss them in turn next.
      • -
      • First, we need a way to describe the location $\mathbf x_q^K$ of quadrature points and their weights $w^K_q$. They are usually mapped from the reference cell in the same way as shape functions, i.e., implicitly using the MappingQ1 class or, if you explicitly say so, through one of the other classes derived from Mapping. The locations and weights on the reference cell are described by objects derived from the Quadrature base class. Typically, one chooses a quadrature formula (i.e. a set of points and weights) so that the quadrature exactly equals the integral in the matrix; this can be achieved because all factors in the integral are polynomial, and is done by Gaussian quadrature formulas, implemented in the QGauss class.
      • -
      • We then need something that can help us evaluate $\varphi_i(\mathbf x^K_q)$ on cell $K$. This is what the FEValues class does: it takes a finite element objects to describe $\varphi$ on the reference cell, a quadrature object to describe the quadrature points and weights, and a mapping object (or implicitly takes the MappingQ1 class) and provides values and derivatives of the shape functions on the real cell $K$ as well as all sorts of other information needed for integration, at the quadrature points located on $K$.
      • + where $\mathbb{T} \approx \Omega$ is a Triangulation approximating the domain, $\mathbf x^K_q$ is the $q$th quadrature point on cell $K$, and $w^K_q$ the $q$th quadrature weight. There are different parts to what is needed in doing this, and we will discuss them in turn next. +
      • First, we need a way to describe the location $\mathbf x_q^K$ of quadrature points and their weights $w^K_q$. They are usually mapped from the reference cell in the same way as shape functions, i.e., implicitly using the MappingQ1 class or, if you explicitly say so, through one of the other classes derived from Mapping. The locations and weights on the reference cell are described by objects derived from the Quadrature base class. Typically, one chooses a quadrature formula (i.e. a set of points and weights) so that the quadrature exactly equals the integral in the matrix; this can be achieved because all factors in the integral are polynomial, and is done by Gaussian quadrature formulas, implemented in the QGauss class.
      • +
      • We then need something that can help us evaluate $\varphi_i(\mathbf x^K_q)$ on cell $K$. This is what the FEValues class does: it takes a finite element objects to describe $\varphi$ on the reference cell, a quadrature object to describe the quadrature points and weights, and a mapping object (or implicitly takes the MappingQ1 class) and provides values and derivatives of the shape functions on the real cell $K$ as well as all sorts of other information needed for integration, at the quadrature points located on $K$.

      The process of computing the matrix and right hand side as a sum over all cells (and then a sum over quadrature points) is usually called assembling the linear system, or assembly for short, using the meaning of the word related to assembly line, meaning "the act of putting together a set of pieces, fragments, or elements".

      FEValues really is the central class in the assembly process. One way you can view it is as follows: The FiniteElement and derived classes describe shape functions, i.e., infinite dimensional objects: functions have values at every point. We need this for theoretical reasons because we want to perform our analysis with integrals over functions. However, for a computer, this is a very difficult concept, since they can in general only deal with a finite amount of information, and so we replace integrals by sums over quadrature points that we obtain by mapping (the Mapping object) using points defined on a reference cell (the Quadrature object) onto points on the real cell. In essence, we reduce the problem to one where we only need a finite amount of information, namely shape function values and derivatives, quadrature weights, normal vectors, etc, exclusively at a finite set of points. The FEValues class is the one that brings the three components together and provides this finite set of information on a particular cell $K$. You will see it in action when we assemble the linear system below.

      @@ -250,17 +250,17 @@

      The final piece of this introduction is to mention that after a linear system is obtained, it is solved using an iterative solver and then postprocessed: we create an output file using the DataOut class that can then be visualized using one of the common visualization programs.

      Note
      The preceding overview of all the important steps of any finite element implementation has its counterpart in deal.II: The library can naturally be grouped into a number of "modules" that cover the basic concepts just outlined. You can access these modules through the tab at the top of this page. An overview of the most fundamental groups of concepts is also available on the front page of the deal.II manual.

      Solving the linear system

      -

      For a finite element program, the linear system we end up with here is relatively small: The matrix has size $1089 \times 1089$, owing to the fact that the mesh we use is $32\times 32$ and so there are $33^2=1089$ vertices in the mesh. In many of the later tutorial programs, matrix sizes in the range of tens of thousands to hundreds of thousands will not be uncommon, and with codes such as ASPECT that build on deal.II, we regularly solve problems with more than a hundred million equations (albeit using parallel computers). In any case, even for the small system here, the matrix is much larger than what one typically encounters in an undergraduate or most graduate courses, and so the question arises how we can solve such linear systems.

      -

      The first method one typically learns for solving linear systems is Gaussian elimination. The problem with this method is that it requires a number of operations that is proportional to $N^3$, where $N$ is the number of equations or unknowns in the linear system – more specifically, the number of operations is $\frac 23 N^3$, give or take a few. With $N=1089$, this means that we would have to do around $861$ million operations. This is a number that is quite feasible and it would take modern processors less than 0.1 seconds to do this. But it is clear that this isn't going to scale: If we have twenty times as many equations in the linear system (that is, twenty times as many unknowns), then it would already take 1000-10,000 seconds or on the order of an hour. Make the linear system another ten times larger, and it is clear that we can not solve it any more on a single computer.

      +

      For a finite element program, the linear system we end up with here is relatively small: The matrix has size $1089 \times 1089$, owing to the fact that the mesh we use is $32\times 32$ and so there are $33^2=1089$ vertices in the mesh. In many of the later tutorial programs, matrix sizes in the range of tens of thousands to hundreds of thousands will not be uncommon, and with codes such as ASPECT that build on deal.II, we regularly solve problems with more than a hundred million equations (albeit using parallel computers). In any case, even for the small system here, the matrix is much larger than what one typically encounters in an undergraduate or most graduate courses, and so the question arises how we can solve such linear systems.

      +

      The first method one typically learns for solving linear systems is Gaussian elimination. The problem with this method is that it requires a number of operations that is proportional to $N^3$, where $N$ is the number of equations or unknowns in the linear system – more specifically, the number of operations is $\frac 23 N^3$, give or take a few. With $N=1089$, this means that we would have to do around $861$ million operations. This is a number that is quite feasible and it would take modern processors less than 0.1 seconds to do this. But it is clear that this isn't going to scale: If we have twenty times as many equations in the linear system (that is, twenty times as many unknowns), then it would already take 1000-10,000 seconds or on the order of an hour. Make the linear system another ten times larger, and it is clear that we can not solve it any more on a single computer.

      One can rescue the situation somewhat by realizing that only a relatively small number of entries in the matrix are nonzero – that is, the matrix is sparse. Variations of Gaussian elimination can exploit this, making the process substantially faster; we will use one such method – implemented in the SparseDirectUMFPACK class – in step-29 for the first time, among several others than come after that. These variations of Gaussian elimination might get us to problem sizes on the order of 100,000 or 200,000, but not all that much beyond that.

      -

      Instead, what we will do here is take up an idea from 1952: the Conjugate Gradient method, or in short "CG". CG is an "iterative" solver in that it forms a sequence of vectors that converge to the exact solution; in fact, after $N$ such iterations in the absence of roundoff errors it finds the exact solution if the matrix is symmetric and positive definite. The method was originally developed as another way to solve a linear system exactly, like Gaussian elimination, but as such it had few advantages and was largely forgotten for a few decades. But, when computers became powerful enough to solve problems of a size where Gaussian elimination doesn't work well any more (sometime in the 1980s), CG was rediscovered as people realized that it is well suited for large and sparse systems like the ones we get from the finite element method. This is because (i) the vectors it computes converge to the exact solution, and consequently we do not actually have to do all $N$ iterations to find the exact solution as long as we're happy with reasonably good approximations; and (ii) it only ever requires matrix-vector products, which is very useful for sparse matrices because a sparse matrix has, by definition, only ${\cal O}(N)$ entries and so a matrix-vector product can be done with ${\cal O}(N)$ effort whereas it costs $N^2$ operations to do the same for dense matrices. As a consequence, we can hope to solve linear systems with at most ${\cal O}(N^2)$ operations, and in many cases substantially fewer.

      +

      Instead, what we will do here is take up an idea from 1952: the Conjugate Gradient method, or in short "CG". CG is an "iterative" solver in that it forms a sequence of vectors that converge to the exact solution; in fact, after $N$ such iterations in the absence of roundoff errors it finds the exact solution if the matrix is symmetric and positive definite. The method was originally developed as another way to solve a linear system exactly, like Gaussian elimination, but as such it had few advantages and was largely forgotten for a few decades. But, when computers became powerful enough to solve problems of a size where Gaussian elimination doesn't work well any more (sometime in the 1980s), CG was rediscovered as people realized that it is well suited for large and sparse systems like the ones we get from the finite element method. This is because (i) the vectors it computes converge to the exact solution, and consequently we do not actually have to do all $N$ iterations to find the exact solution as long as we're happy with reasonably good approximations; and (ii) it only ever requires matrix-vector products, which is very useful for sparse matrices because a sparse matrix has, by definition, only ${\cal O}(N)$ entries and so a matrix-vector product can be done with ${\cal O}(N)$ effort whereas it costs $N^2$ operations to do the same for dense matrices. As a consequence, we can hope to solve linear systems with at most ${\cal O}(N^2)$ operations, and in many cases substantially fewer.

      Finite element codes therefore almost always use iterative solvers such as CG for the solution of the linear systems, and we will do so in this code as well. (We note that the CG method is only usable for matrices that are symmetric and positive definite; for other equations, the matrix may not have these properties and we will have to use other variations of iterative solvers such as BiCGStab or GMRES that are applicable to more general matrices.)

      -

      An important component of these iterative solvers is that we specify the tolerance with which we want to solve the linear system – in essence, a statement about the error we are willing to accept in our approximate solution. The error in an approximate solution $\tilde x$ obtained to the exact solution $x$ of a linear system $Ax=b$ is defined as $\|x-\tilde x\|$, but this is a quantity we cannot compute because we don't know the exact solution $x$. Instead, we typically consider the residual, defined as $\|b-A\tilde x\|=\|A(x-\tilde x)\|$, as a computable measure. We then let the iterative solver compute more and more accurate solutions $\tilde x$, until $\|b-A\tilde x\|\le \tau$. A practical question is what value $\tau$ should have. In most applications, setting

      -\begin{align*}
+<p>An important component of these iterative solvers is that we specify the <em>tolerance</em> with which we want to solve the linear system – in essence, a statement about the error we are willing to accept in our approximate solution. The error in an approximate solution <picture><source srcset=$\tilde x$ obtained to the exact solution $x$ of a linear system $Ax=b$ is defined as $\|x-\tilde x\|$, but this is a quantity we cannot compute because we don't know the exact solution $x$. Instead, we typically consider the residual, defined as $\|b-A\tilde x\|=\|A(x-\tilde x)\|$, as a computable measure. We then let the iterative solver compute more and more accurate solutions $\tilde x$, until $\|b-A\tilde x\|\le \tau$. A practical question is what value $\tau$ should have. In most applications, setting

      +\begin{align*}
   \tau = 10^{-6} \|b\|
-\end{align*} +\end{align*}" src="form_3915.png"/>

      -

      is a reasonable choice. The fact that we make $\tau$ proportional to the size (norm) of $b$ makes sure that our expectations of the accuracy in the solution are relative to the size of the solution. This makes sense: If we make the right hand side $b$ ten times larger, then the solution $x$ of $Ax=b$ will also be ten times larger, and so will $\tilde x$; we want the same number of accurate digits in $\tilde x$ as before, which means that we should also terminate when the residual $\|b-A\tilde x\|$ is ten times the original size – which is exactly what we get if we make $\tau$ proportional to $\|b\|$.

      +

      is a reasonable choice. The fact that we make $\tau$ proportional to the size (norm) of $b$ makes sure that our expectations of the accuracy in the solution are relative to the size of the solution. This makes sense: If we make the right hand side $b$ ten times larger, then the solution $x$ of $Ax=b$ will also be ten times larger, and so will $\tilde x$; we want the same number of accurate digits in $\tilde x$ as before, which means that we should also terminate when the residual $\|b-A\tilde x\|$ is ten times the original size – which is exactly what we get if we make $\tau$ proportional to $\|b\|$.

      All of this will be implemented in the Step3::solve() function in this program. As you will see, it is quite simple to set up linear solvers with deal.II: The whole function will have only three lines.

      About the implementation

      Although this is the simplest possible equation you can solve using the finite element method, this program shows the basic structure of most finite element programs and also serves as the template that almost all of the following programs will essentially follow. Specifically, the main class of this program looks like this:

      class Step3
      @@ -302,17 +302,17 @@
    3. assemble_system(): This, then is where the contents of the matrix and right hand side are computed, as discussed at length in the introduction above. Since doing something with this linear system is conceptually very different from computing its entries, we separate it from the following function.
    4. -solve(): This then is the function in which we compute the solution $U$ of the linear system $AU=F$. In the current program, this is a simple task since the matrix is so simple, but it will become a significant part of a program's size whenever the problem is not so trivial any more (see, for example, step-20, step-22, or step-31 once you've learned a bit more about the library).
    5. +solve(): This then is the function in which we compute the solution $U$ of the linear system $AU=F$. In the current program, this is a simple task since the matrix is so simple, but it will become a significant part of a program's size whenever the problem is not so trivial any more (see, for example, step-20, step-22, or step-31 once you've learned a bit more about the library).
    6. output_results(): Finally, when you have computed a solution, you probably want to do something with it. For example, you may want to output it in a format that can be visualized, or you may want to compute quantities you are interested in: say, heat fluxes in a heat exchanger, air friction coefficients of a wing, maximum bridge loads, or simply the value of the numerical solution at a point. This function is therefore the place for postprocessing your solution.
    7. All of this is held together by the single public function (other than the constructor), namely the run() function. It is the one that is called from the place where an object of this type is created, and it is the one that calls all the other functions in their proper order. Encapsulating this operation into the run() function, rather than calling all the other functions from main() makes sure that you can change how the separation of concerns within this class is implemented. For example, if one of the functions becomes too big, you can split it up into two, and the only places you have to be concerned about changing as a consequence are within this very same class, and not anywhere else.

      As mentioned above, you will see this general structure — sometimes with variants in spelling of the functions' names, but in essentially this order of separation of functionality — again in many of the following tutorial programs.

      A note on types

      -

      deal.II defines a number of integral types via alias in namespace types. (In the previous sentence, the word "integral" is used as the adjective that corresponds to the noun "integer". It shouldn't be confused with the noun "integral" that represents the area or volume under a curve or surface. The adjective "integral" is widely used in the C++ world in contexts such as "integral type", "integral constant", etc.) In particular, in this program you will see types::global_dof_index in a couple of places: an integer type that is used to denote the global index of a degree of freedom, i.e., the index of a particular degree of freedom within the DoFHandler object that is defined on top of a triangulation (as opposed to the index of a particular degree of freedom within a particular cell). For the current program (as well as almost all of the tutorial programs), you will have a few thousand to maybe a few million unknowns globally (and, for $Q_1$ elements, you will have 4 locally on each cell in 2d and 8 in 3d). Consequently, a data type that allows to store sufficiently large numbers for global DoF indices is unsigned int given that it allows to store numbers between 0 and slightly more than 4 billion (on most systems, where integers are 32-bit). In fact, this is what types::global_dof_index is.

      +

      deal.II defines a number of integral types via alias in namespace types. (In the previous sentence, the word "integral" is used as the adjective that corresponds to the noun "integer". It shouldn't be confused with the noun "integral" that represents the area or volume under a curve or surface. The adjective "integral" is widely used in the C++ world in contexts such as "integral type", "integral constant", etc.) In particular, in this program you will see types::global_dof_index in a couple of places: an integer type that is used to denote the global index of a degree of freedom, i.e., the index of a particular degree of freedom within the DoFHandler object that is defined on top of a triangulation (as opposed to the index of a particular degree of freedom within a particular cell). For the current program (as well as almost all of the tutorial programs), you will have a few thousand to maybe a few million unknowns globally (and, for $Q_1$ elements, you will have 4 locally on each cell in 2d and 8 in 3d). Consequently, a data type that allows to store sufficiently large numbers for global DoF indices is unsigned int given that it allows to store numbers between 0 and slightly more than 4 billion (on most systems, where integers are 32-bit). In fact, this is what types::global_dof_index is.

      So, why not just use unsigned int right away? deal.II used to do this until version 7.3. However, deal.II supports very large computations (via the framework discussed in step-40) that may have more than 4 billion unknowns when spread across a few thousand processors. Consequently, there are situations where unsigned int is not sufficiently large and we need a 64-bit unsigned integral type. To make this possible, we introduced types::global_dof_index which by default is defined as simply unsigned int whereas it is possible to define it as unsigned long long int if necessary, by passing a particular flag during configuration (see the ReadMe file).

      This covers the technical aspect. But there is also a documentation purpose: everywhere in the library and codes that are built on it, if you see a place using the data type types::global_dof_index, you immediately know that the quantity that is being referenced is, in fact, a global dof index. No such meaning would be apparent if we had just used unsigned int (which may also be a local index, a boundary indicator, a material id, etc.). Immediately knowing what a variable refers to also helps avoid errors: it's quite clear that there must be a bug if you see an object of type types::global_dof_index being assigned to variable of type types::subdomain_id, even though they are both represented by unsigned integers and the compiler will, consequently, not complain.

      -

      In more practical terms what the presence of this type means is that during assembly, we create a $4\times 4$ matrix (in 2d, using a $Q_1$ element) of the contributions of the cell we are currently sitting on, and then we need to add the elements of this matrix to the appropriate elements of the global (system) matrix. For this, we need to get at the global indices of the degrees of freedom that are local to the current cell, for which we will always use the following piece of the code:

      cell->get_dof_indices (local_dof_indices);
      +

      In more practical terms what the presence of this type means is that during assembly, we create a $4\times 4$ matrix (in 2d, using a $Q_1$ element) of the contributions of the cell we are currently sitting on, and then we need to add the elements of this matrix to the appropriate elements of the global (system) matrix. For this, we need to get at the global indices of the degrees of freedom that are local to the current cell, for which we will always use the following piece of the code:

      cell->get_dof_indices (local_dof_indices);
      /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_30.html differs (JavaScript source, Unicode text, UTF-8 text, with very long lines (1884)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_30.html 2023-11-25 15:26:01.766518579 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_30.html 2023-11-25 15:26:01.766518579 +0100 @@ -216,9 +216,9 @@

      Motivation

      Adaptive local refinement is used to obtain fine meshes which are well adapted to solving the problem at hand efficiently. In short, the size of cells which produce a large error is reduced to obtain a better approximation of the solution to the problem at hand. However, a lot of problems contain anisotropic features. Prominent examples are shocks or boundary layers in compressible viscous flows. An efficient mesh approximates these features with cells of higher aspect ratio which are oriented according to the mentioned features. Using only isotropic refinement, the aspect ratios of the original mesh cells are preserved, as they are inherited by the children of a cell. Thus, starting from an isotropic mesh, a boundary layer will be refined in order to catch the rapid variation of the flow field in the wall normal direction, thus leading to cells with very small edge lengths both in normal and tangential direction. Usually, much higher edge lengths in tangential direction and thus significantly less cells could be used without a significant loss in approximation accuracy. An anisotropic refinement process can modify the aspect ratio from mother to child cells by a factor of two for each refinement step. In the course of several refinements, the aspect ratio of the fine cells can be optimized, saving a considerable number of cells and correspondingly degrees of freedom and thus computational resources, memory as well as CPU time.

      Implementation

      -

      Most of the time, when we do finite element computations, we only consider one cell at a time, for example to calculate cell contributions to the global matrix, or to interpolate boundary values. However, sometimes we have to look at how cells are related in our algorithms. Relationships between cells come in two forms: neighborship and mother-child relationship. For the case of isotropic refinement, deal.II uses certain conventions (invariants) for cell relationships that are always maintained. For example, a refined cell always has exactly $2^{dim}$ children. And (except for the 1d case), two neighboring cells may differ by at most one refinement level: they are equally often refined or one of them is exactly once more refined, leaving exactly one hanging node on the common face. Almost all of the time these invariants are only of concern in the internal implementation of the library. However, there are cases where knowledge of them is also relevant to an application program.

      -

      In the current context, it is worth noting that the kind of mesh refinement affects some of the most fundamental assumptions. Consequently, some of the usual code found in application programs will need modifications to exploit the features of meshes which were created using anisotropic refinement. For those interested in how deal.II evolved, it may be of interest that the loosening of such invariants required some incompatible changes. For example, the library used to have a member GeometryInfo<dim>::children_per_cell that specified how many children a cell has once it is refined. For isotropic refinement, this number is equal to $2^{dim}$, as mentioned above. However, for anisotropic refinement, this number does not exist, as is can be either two or four in 2D and two, four or eight in 3D, and the member GeometryInfo<dim>::children_per_cell has consequently been removed. It has now been replaced by GeometryInfo<dim>::max_children_per_cell which specifies the maximum number of children a cell can have. How many children a refined cell has was previously available as static information, but now it depends on the actual refinement state of a cell and can be retrieved using TriaAccessor::n_children(), a call that works equally well for both isotropic and anisotropic refinement. A very similar situation can be found for faces and their subfaces: the pertinent information can be queried using GeometryInfo<dim>::max_children_per_face or face->n_children(), depending on the context.

      -

      Another important aspect, and the most important one in this tutorial, is the treatment of neighbor-relations when assembling jump terms on the faces between cells. Looking at the documentation of the assemble_system functions in step-12 we notice, that we need to decide if a neighboring cell is coarser, finer or on the same (refinement) level as our current cell. These decisions do not work in the same way for anisotropic refinement as the information given by the level of a cell is not enough to completely characterize anisotropic cells; for example, are the terminal children of a two-dimensional cell that is first cut in $x$-direction and whose children are then cut in $y$-direction on level 2, or are they on level 1 as they would be if the cell would have been refined once isotropically, resulting in the same set of finest cells?

      +

      Most of the time, when we do finite element computations, we only consider one cell at a time, for example to calculate cell contributions to the global matrix, or to interpolate boundary values. However, sometimes we have to look at how cells are related in our algorithms. Relationships between cells come in two forms: neighborship and mother-child relationship. For the case of isotropic refinement, deal.II uses certain conventions (invariants) for cell relationships that are always maintained. For example, a refined cell always has exactly $2^{dim}$ children. And (except for the 1d case), two neighboring cells may differ by at most one refinement level: they are equally often refined or one of them is exactly once more refined, leaving exactly one hanging node on the common face. Almost all of the time these invariants are only of concern in the internal implementation of the library. However, there are cases where knowledge of them is also relevant to an application program.

      +

      In the current context, it is worth noting that the kind of mesh refinement affects some of the most fundamental assumptions. Consequently, some of the usual code found in application programs will need modifications to exploit the features of meshes which were created using anisotropic refinement. For those interested in how deal.II evolved, it may be of interest that the loosening of such invariants required some incompatible changes. For example, the library used to have a member GeometryInfo<dim>::children_per_cell that specified how many children a cell has once it is refined. For isotropic refinement, this number is equal to $2^{dim}$, as mentioned above. However, for anisotropic refinement, this number does not exist, as is can be either two or four in 2D and two, four or eight in 3D, and the member GeometryInfo<dim>::children_per_cell has consequently been removed. It has now been replaced by GeometryInfo<dim>::max_children_per_cell which specifies the maximum number of children a cell can have. How many children a refined cell has was previously available as static information, but now it depends on the actual refinement state of a cell and can be retrieved using TriaAccessor::n_children(), a call that works equally well for both isotropic and anisotropic refinement. A very similar situation can be found for faces and their subfaces: the pertinent information can be queried using GeometryInfo<dim>::max_children_per_face or face->n_children(), depending on the context.

      +

      Another important aspect, and the most important one in this tutorial, is the treatment of neighbor-relations when assembling jump terms on the faces between cells. Looking at the documentation of the assemble_system functions in step-12 we notice, that we need to decide if a neighboring cell is coarser, finer or on the same (refinement) level as our current cell. These decisions do not work in the same way for anisotropic refinement as the information given by the level of a cell is not enough to completely characterize anisotropic cells; for example, are the terminal children of a two-dimensional cell that is first cut in $x$-direction and whose children are then cut in $y$-direction on level 2, or are they on level 1 as they would be if the cell would have been refined once isotropically, resulting in the same set of finest cells?

      After anisotropic refinement, a coarser neighbor is not necessarily exactly one level below ours, but can pretty much have any level relative to the current one; in fact, it can even be on a higher level even though it is coarser. Thus the decisions have to be made on a different basis, whereas the intention of the decisions stays the same.

      In the following, we will discuss the cases that can happen when we want to compute contributions to the matrix (or right hand side) of the form

      \[
@@ -229,7 +229,7 @@
 <ul>
 <li>
 <p class=Finer neighbor: If we are on an active cell and want to integrate over a face $f\subset \partial K$, the first possibility is that the neighbor behind this face is more refined, i.e. has children occupying only part of the common face. In this case, the face under consideration has to be a refined one, which can determine by asking if (face->has_children()). If this is true, we need to loop over all subfaces and get the neighbors' child behind this subface, so that we can reinit an FEFaceValues object with the neighbor and an FESubfaceValues object with our cell and the respective subface.

      -

      For isotropic refinement, this kind is reasonably simple because we know that an invariant of the isotropically refined adaptive meshes in deal.II is that neighbors can only differ by exactly one refinement level. However, this isn't quite true any more for anisotropically refined meshes, in particular in 3d; there, the active cell we are interested on the other side of $f$ might not actually be a child of our neighbor, but perhaps a grandchild or even a farther offspring. Fortunately, this complexity is hidden in the internals of the library. All we need to do is call the CellAccessor::neighbor_child_on_subface() function. Still, in 3D there are two cases which need special consideration:

        +

        For isotropic refinement, this kind is reasonably simple because we know that an invariant of the isotropically refined adaptive meshes in deal.II is that neighbors can only differ by exactly one refinement level. However, this isn't quite true any more for anisotropically refined meshes, in particular in 3d; there, the active cell we are interested on the other side of $f$ might not actually be a child of our neighbor, but perhaps a grandchild or even a farther offspring. Fortunately, this complexity is hidden in the internals of the library. All we need to do is call the CellAccessor::neighbor_child_on_subface() function. Still, in 3D there are two cases which need special consideration:

        • If the neighbor is refined more than once anisotropically, it might be that here are not two or four but actually three subfaces to consider. Imagine the following refinement process of the (two-dimensional) face of the (three-dimensional) neighbor cell we are considering: first the face is refined along x, later on only the left subface is refined along y.

          -------* *---*---* *---*---*
          | | | | | | | |
          @@ -251,7 +251,7 @@
          # # # + + +
          # ## + ++
          ############# +++++++++++++
          -

          Here, the left two cells resulted from an anisotropic bisection of the mother cell in $y$-direction, whereas the right four cells resulted from a simultaneous anisotropic refinement in both the $y$- and $z$-directions. The left cell marked with # has two finer neighbors marked with +, but the actual neighbor of the left cell is the complete right mother cell, as the two cells marked with + are finer and their direct mother is the one large cell.

          +

      Here, the left two cells resulted from an anisotropic bisection of the mother cell in $y$-direction, whereas the right four cells resulted from a simultaneous anisotropic refinement in both the $y$- and $z$-directions. The left cell marked with # has two finer neighbors marked with +, but the actual neighbor of the left cell is the complete right mother cell, as the two cells marked with + are finer and their direct mother is the one large cell.

      However, fortunately, CellAccessor::neighbor_child_on_subface() takes care of these situations by itself, if you loop over the correct number of subfaces, in the above example this is two. The FESubfaceValues<dim>::reinit function takes care of this too, so that the resulting state is always correct. There is one little caveat, however: For reiniting the neighbors FEFaceValues object you need to know the index of the face that points toward the current cell. Usually you assume that the neighbor you get directly is as coarse or as fine as you, if it has children, thus this information can be obtained with CellAccessor::neighbor_of_neighbor(). If the neighbor is coarser, however, you would have to use the first value in CellAccessor::neighbor_of_coarser_neighbor() instead. In order to make this easy for you, there is CellAccessor::neighbor_face_no() which does the correct thing for you and returns the desired result.

      @@ -292,12 +292,12 @@

    This approach is similar to the one we have used in step-27 for hp-refinement and has the great advantage of flexibility: Any error indicator can be used in the anisotropic process, i.e. if you have quite involved a posteriori goal-oriented error indicators available you can use them as easily as a simple Kelly error estimator. The anisotropic part of the refinement process is not influenced by this choice. Furthermore, simply leaving out the third and forth steps leads to the same isotropic refinement you used to get before any anisotropic changes in deal.II or your application program. As a last advantage, working only on cells flagged for refinement results in a faster evaluation of the anisotropic indicator, which can become noticeable on finer meshes with a lot of cells if the indicator is quite involved.

    Here, we use a very simple approach which is only applicable to DG methods. The general idea is quite simple: DG methods allow the discrete solution to jump over the faces of a cell, whereas it is smooth within each cell. Of course, in the limit we expect that the jumps tend to zero as we refine the mesh and approximate the true solution better and better. Thus, a large jump across a given face indicates that the cell should be refined (at least) orthogonally to that face, whereas a small jump does not lead to this conclusion. It is possible, of course, that the exact solution is not smooth and that it also features a jump. In that case, however, a large jump over one face indicates, that this face is more or less parallel to the jump and in the vicinity of it, thus again we would expect a refinement orthogonal to the face under consideration to be effective.

    -

    The proposed indicator calculates the average jump $K_j$, i.e. the mean value of the absolute jump $|[u]|$ of the discrete solution $u$ over the two faces $f_i^j$, $i=1,2$, $j=1..d$ orthogonal to coordinate direction $j$ on the unit cell.

    +

    The proposed indicator calculates the average jump $K_j$, i.e. the mean value of the absolute jump $|[u]|$ of the discrete solution $u$ over the two faces $f_i^j$, $i=1,2$, $j=1..d$ orthogonal to coordinate direction $j$ on the unit cell.

    \[
 K_j = \frac{\sum_{i=1}^2 \int_{f_i^j}|[u]| dx}{\sum_{i=1}^2 |f_i^j|} .
 \]

    -

    If the average jump in one direction is larger than the average of the jumps in the other directions by a certain factor $\kappa$, i.e. if $K_i > \kappa \frac 1{d-1} \sum_{j=1, j\neq i}^d K_j$, the cell is refined only along that particular direction $i$, otherwise the cell is refined isotropically.

    +

    If the average jump in one direction is larger than the average of the jumps in the other directions by a certain factor $\kappa$, i.e. if $K_i > \kappa \frac 1{d-1} \sum_{j=1, j\neq i}^d K_j$, the cell is refined only along that particular direction $i$, otherwise the cell is refined isotropically.

    Such a criterion is easily generalized to systems of equations: the absolute value of the jump would be replaced by an appropriate norm of the vector-valued jump.

    The problem

    We solve the linear transport equation presented in step-12. The domain is extended to cover $[-1,1]\times[0,1]$ in 2D, where the flow field $\beta$ describes a counterclockwise quarter circle around the origin in the right half of the domain and is parallel to the x-axis in the left part of the domain. The inflow boundary is again located at $x=1$ and along the positive part of the x-axis, and the boundary conditions are chosen as in step-12.

    @@ -381,7 +381,7 @@
    Definition: point.h:112
    #define AssertDimension(dim1, dim2)
    Definition: exceptions.h:1787
    -

    The flow field is chosen to be a quarter circle with counterclockwise flow direction and with the origin as midpoint for the right half of the domain with positive $x$ values, whereas the flow simply goes to the left in the left part of the domain at a velocity that matches the one coming in from the right. In the circular part the magnitude of the flow velocity is proportional to the distance from the origin. This is a difference to step-12, where the magnitude was 1 everywhere. the new definition leads to a linear variation of $\beta$ along each given face of a cell. On the other hand, the solution $u(x,y)$ is exactly the same as before.

    +

    The flow field is chosen to be a quarter circle with counterclockwise flow direction and with the origin as midpoint for the right half of the domain with positive $x$ values, whereas the flow simply goes to the left in the left part of the domain at a velocity that matches the one coming in from the right. In the circular part the magnitude of the flow velocity is proportional to the distance from the origin. This is a difference to step-12, where the magnitude was 1 everywhere. the new definition leads to a linear variation of $\beta$ along each given face of a cell. On the other hand, the solution $u(x,y)$ is exactly the same as before.

      void value_list(const std::vector<Point<dim>> &points,
      std::vector<Point<dim>> & values) const
      {
    @@ -1335,7 +1335,7 @@

    We see, that the solution on the anisotropically refined mesh is very similar to the solution obtained on the isotropically refined mesh. Thus the anisotropic indicator seems to effectively select the appropriate cells for anisotropic refinement.

    -

    The pictures also explain why the mesh is refined as it is. In the whole left part of the domain refinement is only performed along the $y$-axis of cells. In the right part of the domain the refinement is dominated by isotropic refinement, as the anisotropic feature of the solution - the jump from one to zero - is not well aligned with the mesh where the advection direction takes a turn. However, at the bottom and closest (to the observer) parts of the quarter circle this jumps again becomes more and more aligned with the mesh and the refinement algorithm reacts by creating anisotropic cells of increasing aspect ratio.

    +

    The pictures also explain why the mesh is refined as it is. In the whole left part of the domain refinement is only performed along the $y$-axis of cells. In the right part of the domain the refinement is dominated by isotropic refinement, as the anisotropic feature of the solution - the jump from one to zero - is not well aligned with the mesh where the advection direction takes a turn. However, at the bottom and closest (to the observer) parts of the quarter circle this jumps again becomes more and more aligned with the mesh and the refinement algorithm reacts by creating anisotropic cells of increasing aspect ratio.

    It might seem that the necessary alignment of anisotropic features and the coarse mesh can decrease performance significantly for real world problems. That is not wrong in general: If one were, for example, to apply anisotropic refinement to problems in which shocks appear (e.g., the equations solved in step-69), then it many cases the shock is not aligned with the mesh and anisotropic refinement will help little unless one also introduces techniques to move the mesh in alignment with the shocks. On the other hand, many steep features of solutions are due to boundary layers. In those cases, the mesh is already aligned with the anisotropic features because it is of course aligned with the boundary itself, and anisotropic refinement will almost always increase the efficiency of computations on adapted grids for these cases.

    The plain program

    /* ---------------------------------------------------------------------
    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_31.html differs (JavaScript source, Unicode text, UTF-8 text, with very long lines (2241)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_31.html 2023-11-25 15:26:01.819850828 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_31.html 2023-11-25 15:26:01.819850828 +0100 @@ -172,7 +172,7 @@

    The Boussinesq equations

    This program deals with an interesting physical problem: how does a fluid (i.e., a liquid or gas) behave if it experiences differences in buoyancy caused by temperature differences? It is clear that those parts of the fluid that are hotter (and therefore lighter) are going to rise up and those that are cooler (and denser) are going to sink down with gravity.

    In cases where the fluid moves slowly enough such that inertial effects can be neglected, the equations that describe such behavior are the Boussinesq equations that read as follows:

    -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   -\nabla \cdot (2 \eta \varepsilon ({\mathbf u})) + \nabla p &=&
   -\rho\; \beta \; T\; \mathbf{g},
   \\
@@ -183,49 +183,49 @@
   {\mathbf u} \cdot \nabla T
   -
   \nabla \cdot \kappa \nabla T &=& \gamma.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3945.png"/>

    -

    These equations fall into the class of vector-valued problems (a toplevel overview of this topic can be found in the Handling vector valued problems module). Here, $\mathbf u$ is the velocity field, $p$ the pressure, and $T$ the temperature of the fluid. $\varepsilon ({\mathbf u}) = \frac 12
-[(\nabla{\mathbf u}) + (\nabla {\mathbf u})^T]$ is the symmetric gradient of the velocity. As can be seen, velocity and pressure solve a Stokes equation describing the motion of an incompressible fluid, an equation we have previously considered in step-22; we will draw extensively on the experience we have gained in that program, in particular with regard to efficient linear Stokes solvers.

    -

    The forcing term of the fluid motion is the buoyancy of the fluid, expressed as the product of the density $\rho$, the thermal expansion coefficient $\beta$, the temperature $T$ and the gravity vector $\mathbf{g}$ pointing downward. (A derivation of why the right hand side looks like it looks is given in the introduction of step-32.) While the first two equations describe how the fluid reacts to temperature differences by moving around, the third equation states how the fluid motion affects the temperature field: it is an advection diffusion equation, i.e., the temperature is attached to the fluid particles and advected along in the flow field, with an additional diffusion (heat conduction) term. In many applications, the diffusion coefficient is fairly small, and the temperature equation is in fact transport, not diffusion dominated and therefore in character more hyperbolic than elliptic; we will have to take this into account when developing a stable discretization.

    -

    In the equations above, the term $\gamma$ on the right hand side denotes the heat sources and may be a spatially and temporally varying function. $\eta$ and $\kappa$ denote the viscosity and diffusivity coefficients, which we assume constant for this tutorial program. The more general case when $\eta$ depends on the temperature is an important factor in physical applications: Most materials become more fluid as they get hotter (i.e., $\eta$ decreases with $T$); sometimes, as in the case of rock minerals at temperatures close to their melting point, $\eta$ may change by orders of magnitude over the typical range of temperatures.

    -

    We note that the Stokes equation above could be nondimensionalized by introducing the Rayleigh number $\mathrm{Ra}=\frac{\|\mathbf{g}\| \beta \rho}{\eta \kappa} \delta T L^3$ using a typical length scale $L$, typical temperature difference $\delta T$, density $\rho$, thermal diffusivity $\eta$, and thermal conductivity $\kappa$. $\mathrm{Ra}$ is a dimensionless number that describes the ratio of heat transport due to convection induced by buoyancy changes from temperature differences, and of heat transport due to thermal diffusion. A small Rayleigh number implies that buoyancy is not strong relative to viscosity and fluid motion $\mathbf{u}$ is slow enough so that heat diffusion $\kappa\nabla T$ is the dominant heat transport term. On the other hand, a fluid with a high Rayleigh number will show vigorous convection that dominates heat conduction.

    -

    For most fluids for which we are interested in computing thermal convection, the Rayleigh number is very large, often $10^6$ or larger. From the structure of the equations, we see that this will lead to large pressure differences and large velocities. Consequently, the convection term in the convection-diffusion equation for $T$ will also be very large and an accurate solution of this equation will require us to choose small time steps. Problems with large Rayleigh numbers are therefore hard to solve numerically for similar reasons that make solving the Navier-Stokes equations hard to solve when the Reynolds number $\mathrm{Re}$ is large.

    -

    Note that a large Rayleigh number does not necessarily involve large velocities in absolute terms. For example, the Rayleigh number in the earth mantle is larger than $10^6$. Yet the velocities are small: the material is in fact solid rock but it is so hot and under pressure that it can flow very slowly, on the order of at most a few centimeters per year. Nevertheless, this can lead to mixing over time scales of many million years, a time scale much shorter than for the same amount of heat to be distributed by thermal conductivity and a time scale of relevance to affect the evolution of the earth's interior and surface structure.

    +

    These equations fall into the class of vector-valued problems (a toplevel overview of this topic can be found in the Handling vector valued problems module). Here, $\mathbf u$ is the velocity field, $p$ the pressure, and $T$ the temperature of the fluid. $\varepsilon ({\mathbf u}) = \frac 12
+[(\nabla{\mathbf u}) + (\nabla {\mathbf u})^T]$ is the symmetric gradient of the velocity. As can be seen, velocity and pressure solve a Stokes equation describing the motion of an incompressible fluid, an equation we have previously considered in step-22; we will draw extensively on the experience we have gained in that program, in particular with regard to efficient linear Stokes solvers.

    +

    The forcing term of the fluid motion is the buoyancy of the fluid, expressed as the product of the density $\rho$, the thermal expansion coefficient $\beta$, the temperature $T$ and the gravity vector $\mathbf{g}$ pointing downward. (A derivation of why the right hand side looks like it looks is given in the introduction of step-32.) While the first two equations describe how the fluid reacts to temperature differences by moving around, the third equation states how the fluid motion affects the temperature field: it is an advection diffusion equation, i.e., the temperature is attached to the fluid particles and advected along in the flow field, with an additional diffusion (heat conduction) term. In many applications, the diffusion coefficient is fairly small, and the temperature equation is in fact transport, not diffusion dominated and therefore in character more hyperbolic than elliptic; we will have to take this into account when developing a stable discretization.

    +

    In the equations above, the term $\gamma$ on the right hand side denotes the heat sources and may be a spatially and temporally varying function. $\eta$ and $\kappa$ denote the viscosity and diffusivity coefficients, which we assume constant for this tutorial program. The more general case when $\eta$ depends on the temperature is an important factor in physical applications: Most materials become more fluid as they get hotter (i.e., $\eta$ decreases with $T$); sometimes, as in the case of rock minerals at temperatures close to their melting point, $\eta$ may change by orders of magnitude over the typical range of temperatures.

    +

    We note that the Stokes equation above could be nondimensionalized by introducing the Rayleigh number $\mathrm{Ra}=\frac{\|\mathbf{g}\| \beta \rho}{\eta \kappa} \delta T L^3$ using a typical length scale $L$, typical temperature difference $\delta T$, density $\rho$, thermal diffusivity $\eta$, and thermal conductivity $\kappa$. $\mathrm{Ra}$ is a dimensionless number that describes the ratio of heat transport due to convection induced by buoyancy changes from temperature differences, and of heat transport due to thermal diffusion. A small Rayleigh number implies that buoyancy is not strong relative to viscosity and fluid motion $\mathbf{u}$ is slow enough so that heat diffusion $\kappa\nabla T$ is the dominant heat transport term. On the other hand, a fluid with a high Rayleigh number will show vigorous convection that dominates heat conduction.

    +

    For most fluids for which we are interested in computing thermal convection, the Rayleigh number is very large, often $10^6$ or larger. From the structure of the equations, we see that this will lead to large pressure differences and large velocities. Consequently, the convection term in the convection-diffusion equation for $T$ will also be very large and an accurate solution of this equation will require us to choose small time steps. Problems with large Rayleigh numbers are therefore hard to solve numerically for similar reasons that make solving the Navier-Stokes equations hard to solve when the Reynolds number $\mathrm{Re}$ is large.

    +

    Note that a large Rayleigh number does not necessarily involve large velocities in absolute terms. For example, the Rayleigh number in the earth mantle is larger than $10^6$. Yet the velocities are small: the material is in fact solid rock but it is so hot and under pressure that it can flow very slowly, on the order of at most a few centimeters per year. Nevertheless, this can lead to mixing over time scales of many million years, a time scale much shorter than for the same amount of heat to be distributed by thermal conductivity and a time scale of relevance to affect the evolution of the earth's interior and surface structure.

    Note
    If you are interested in using the program as the basis for your own experiments, you will also want to take a look at its continuation in step-32. Furthermore, step-32 later was developed into the much larger open source code ASPECT (see https://aspect.geodynamics.org/ ) that can solve realistic problems and that you may want to investigate before trying to morph step-31 into something that can solve whatever you want to solve.

    Boundary and initial conditions

    -

    Since the Boussinesq equations are derived under the assumption that inertia of the fluid's motion does not play a role, the flow field is at each time entirely determined by buoyancy difference at that time, not by the flow field at previous times. This is reflected by the fact that the first two equations above are the steady state Stokes equation that do not contain a time derivative. Consequently, we do not need initial conditions for either velocities or pressure. On the other hand, the temperature field does satisfy an equation with a time derivative, so we need initial conditions for $T$.

    -

    As for boundary conditions: if $\kappa>0$ then the temperature satisfies a second order differential equation that requires boundary data all around the boundary for all times. These can either be a prescribed boundary temperature $T|_{\partial\Omega}=T_b$ (Dirichlet boundary conditions), or a prescribed thermal flux $\mathbf{n}\cdot\kappa\nabla
-T|_{\partial\Omega}=\phi$; in this program, we will use an insulated boundary condition, i.e., prescribe no thermal flux: $\phi=0$.

    -

    Similarly, the velocity field requires us to pose boundary conditions. These may be no-slip no-flux conditions $\mathbf{u}=0$ on $\partial\Omega$ if the fluid sticks to the boundary, or no normal flux conditions $\mathbf n \cdot \mathbf
-u = 0$ if the fluid can flow along but not across the boundary, or any number of other conditions that are physically reasonable. In this program, we will use no normal flux conditions.

    +

    Since the Boussinesq equations are derived under the assumption that inertia of the fluid's motion does not play a role, the flow field is at each time entirely determined by buoyancy difference at that time, not by the flow field at previous times. This is reflected by the fact that the first two equations above are the steady state Stokes equation that do not contain a time derivative. Consequently, we do not need initial conditions for either velocities or pressure. On the other hand, the temperature field does satisfy an equation with a time derivative, so we need initial conditions for $T$.

    +

    As for boundary conditions: if $\kappa>0$ then the temperature satisfies a second order differential equation that requires boundary data all around the boundary for all times. These can either be a prescribed boundary temperature $T|_{\partial\Omega}=T_b$ (Dirichlet boundary conditions), or a prescribed thermal flux $\mathbf{n}\cdot\kappa\nabla
+T|_{\partial\Omega}=\phi$; in this program, we will use an insulated boundary condition, i.e., prescribe no thermal flux: $\phi=0$.

    +

    Similarly, the velocity field requires us to pose boundary conditions. These may be no-slip no-flux conditions $\mathbf{u}=0$ on $\partial\Omega$ if the fluid sticks to the boundary, or no normal flux conditions $\mathbf n \cdot \mathbf
+u = 0$ if the fluid can flow along but not across the boundary, or any number of other conditions that are physically reasonable. In this program, we will use no normal flux conditions.

    Solution approach

    -

    Like the equations solved in step-21, we here have a system of differential-algebraic equations (DAE): with respect to the time variable, only the temperature equation is a differential equation whereas the Stokes system for $\mathbf{u}$ and $p$ has no time-derivatives and is therefore of the sort of an algebraic constraint that has to hold at each time instant. The main difference to step-21 is that the algebraic constraint there was a mixed Laplace system of the form

    -\begin{eqnarray*}
+<p>Like the equations solved in <a class=step-21, we here have a system of differential-algebraic equations (DAE): with respect to the time variable, only the temperature equation is a differential equation whereas the Stokes system for $\mathbf{u}$ and $p$ has no time-derivatives and is therefore of the sort of an algebraic constraint that has to hold at each time instant. The main difference to step-21 is that the algebraic constraint there was a mixed Laplace system of the form

    +\begin{eqnarray*}
   \mathbf u + {\mathbf K}\lambda \nabla p &=& 0, \\
   \nabla\cdot \mathbf u &=& f,
-\end{eqnarray*} +\end{eqnarray*}" src="form_3960.png"/>

    where now we have a Stokes system

    -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   -\nabla \cdot (2 \eta \varepsilon ({\mathbf u})) + \nabla p &=& f, \\
   \nabla\cdot \mathbf u &=& 0,
-\end{eqnarray*} +\end{eqnarray*}" src="form_3961.png"/>

    -

    where $\nabla \cdot \eta \varepsilon (\cdot)$ is an operator similar to the Laplacian $\Delta$ applied to a vector field.

    +

    where $\nabla \cdot \eta \varepsilon (\cdot)$ is an operator similar to the Laplacian $\Delta$ applied to a vector field.

    Given the similarity to what we have done in step-21, it may not come as a surprise that we choose a similar approach, although we will have to make adjustments for the change in operator in the top-left corner of the differential operator.

    Time stepping

    -

    The structure of the problem as a DAE allows us to use the same strategy as we have already used in step-21, i.e., we use a time lag scheme: we first solve the temperature equation (using an extrapolated velocity field), and then insert the new temperature solution into the right hand side of the velocity equation. The way we implement this in our code looks at things from a slightly different perspective, though. We first solve the Stokes equations for velocity and pressure using the temperature field from the previous time step, which means that we get the velocity for the previous time step. In other words, we first solve the Stokes system for time step $n - 1$ as

    -\begin{eqnarray*}
+<p>The structure of the problem as a DAE allows us to use the same strategy as we have already used in <a class=step-21, i.e., we use a time lag scheme: we first solve the temperature equation (using an extrapolated velocity field), and then insert the new temperature solution into the right hand side of the velocity equation. The way we implement this in our code looks at things from a slightly different perspective, though. We first solve the Stokes equations for velocity and pressure using the temperature field from the previous time step, which means that we get the velocity for the previous time step. In other words, we first solve the Stokes system for time step $n - 1$ as

    +\begin{eqnarray*}
   -\nabla \cdot (2\eta \varepsilon ({\mathbf u}^{n-1})) + \nabla p^{n-1} &=&
   -\rho\; \beta \; T^{n-1} \mathbf{g},
   \\
   \nabla \cdot {\mathbf u}^{n-1} &=& 0,
-\end{eqnarray*} +\end{eqnarray*}" src="form_3965.png"/>

    -

    and then the temperature equation with an extrapolated velocity field to time $n$.

    -

    In contrast to step-21, we'll use a higher order time stepping scheme here, namely the Backward Differentiation Formula scheme of order 2 (BDF-2 in short) that replaces the time derivative $\frac{\partial T}{\partial t}$ by the (one-sided) difference quotient $\frac{\frac 32 T^{n}-2T^{n-1}+\frac 12 T^{n-2}}{k}$ with $k$ the time step size. This gives the discretized-in-time temperature equation

    -\begin{eqnarray*}
+<p> and then the temperature equation with an extrapolated velocity field to time <picture><source srcset=$n$.

    +

    In contrast to step-21, we'll use a higher order time stepping scheme here, namely the Backward Differentiation Formula scheme of order 2 (BDF-2 in short) that replaces the time derivative $\frac{\partial T}{\partial t}$ by the (one-sided) difference quotient $\frac{\frac 32 T^{n}-2T^{n-1}+\frac 12 T^{n-2}}{k}$ with $k$ the time step size. This gives the discretized-in-time temperature equation

    +\begin{eqnarray*}
   \frac 32 T^n
   -
   k\nabla \cdot \kappa \nabla T^n
@@ -237,13 +237,13 @@
   k(2{\mathbf u}^{n-1} - {\mathbf u}^{n-2} ) \cdot \nabla (2T^{n-1}-T^{n-2})
   +
   k\gamma.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3968.png"/>

    -

    Note how the temperature equation is solved semi-explicitly: diffusion is treated implicitly whereas advection is treated explicitly using an extrapolation (or forward-projection) of temperature and velocity, including the just-computed velocity ${\mathbf u}^{n-1}$. The forward-projection to the current time level $n$ is derived from a Taylor expansion, $T^n
+<p> Note how the temperature equation is solved semi-explicitly: diffusion is treated implicitly whereas advection is treated explicitly using an extrapolation (or forward-projection) of temperature and velocity, including the just-computed velocity <picture><source srcset=${\mathbf u}^{n-1}$. The forward-projection to the current time level $n$ is derived from a Taylor expansion, $T^n
 \approx T^{n-1} + k_n \frac{\partial T}{\partial t} \approx T^{n-1} + k_n
-\frac{T^{n-1}-T^{n-2}}{k_n} = 2T^{n-1}-T^{n-2}$. We need this projection for maintaining the order of accuracy of the BDF-2 scheme. In other words, the temperature fields we use in the explicit right hand side are second order approximations of the current temperature field — not quite an explicit time stepping scheme, but by character not too far away either.

    -

    The introduction of the temperature extrapolation limits the time step by a Courant-Friedrichs-Lewy (CFL) condition just like it was in step-21. (We wouldn't have had that stability condition if we treated the advection term implicitly since the BDF-2 scheme is A-stable, at the price that we needed to build a new temperature matrix at each time step.) We will discuss the exact choice of time step in the results section, but for the moment of importance is that this CFL condition means that the time step size $k$ may change from time step to time step, and that we have to modify the above formula slightly. If $k_n,k_{n-1}$ are the time steps sizes of the current and previous time step, then we use the approximations

    -\begin{align*}
+\frac{T^{n-1}-T^{n-2}}{k_n} = 2T^{n-1}-T^{n-2}$. We need this projection for maintaining the order of accuracy of the BDF-2 scheme. In other words, the temperature fields we use in the explicit right hand side are second order approximations of the current temperature field — not quite an explicit time stepping scheme, but by character not too far away either.

    +

    The introduction of the temperature extrapolation limits the time step by a Courant-Friedrichs-Lewy (CFL) condition just like it was in step-21. (We wouldn't have had that stability condition if we treated the advection term implicitly since the BDF-2 scheme is A-stable, at the price that we needed to build a new temperature matrix at each time step.) We will discuss the exact choice of time step in the results section, but for the moment of importance is that this CFL condition means that the time step size $k$ may change from time step to time step, and that we have to modify the above formula slightly. If $k_n,k_{n-1}$ are the time steps sizes of the current and previous time step, then we use the approximations

    +\begin{align*}
 \frac{\partial T}{\partial t} \approx
  \frac 1{k_n}
  \left(
@@ -253,10 +253,10 @@
        +
        \frac{k_n^2}{k_{n-1}(k_n+k_{n-1})} T^{n-2}
  \right)
- \end{align*} + \end{align*}" src="form_3972.png"/>

    and

    -\begin{align*}
+<picture><source srcset=\begin{align*}
 T^n \approx
    T^{n-1} + k_n \frac{\partial T}{\partial t}
    \approx
@@ -264,10 +264,10 @@
    \frac{T^{n-1}-T^{n-2}}{k_{n-1}}
    =
    \left(1+\frac{k_n}{k_{n-1}}\right)T^{n-1}-\frac{k_n}{k_{n-1}}T^{n-2},
-\end{align*} +\end{align*}" src="form_3973.png"/>

    and above equation is generalized as follows:

    -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   \frac{2k_n+k_{n-1}}{k_n+k_{n-1}} T^n
   -
   k_n\nabla \cdot \kappa \nabla T^n
@@ -279,14 +279,14 @@
   k_n{\mathbf u}^{*,n} \cdot \nabla T^{*,n}
   +
   k_n\gamma,
-\end{eqnarray*} +\end{eqnarray*}" src="form_3974.png"/>

    -

    where ${(\cdot)}^{*,n} = \left(1+\frac{k_n}{k_{n-1}}\right)(\cdot)^{n-1} -
-\frac{k_n}{k_{n-1}}(\cdot)^{n-2}$ denotes the extrapolation of velocity $\mathbf u$ and temperature $T$ to time level $n$, using the values at the two previous time steps. That's not an easy to read equation, but will provide us with the desired higher order accuracy. As a consistency check, it is easy to verify that it reduces to the same equation as above if $k_n=k_{n-1}$.

    -

    As a final remark we note that the choice of a higher order time stepping scheme of course forces us to keep more time steps in memory; in particular, we here will need to have $T^{n-2}$ around, a vector that we could previously discard. This seems like a nuisance that we were able to avoid previously by using only a first order time stepping scheme, but as we will see below when discussing the topic of stabilization, we will need this vector anyway and so keeping it around for time discretization is essentially for free and gives us the opportunity to use a higher order scheme.

    +

    where ${(\cdot)}^{*,n} = \left(1+\frac{k_n}{k_{n-1}}\right)(\cdot)^{n-1} -
+\frac{k_n}{k_{n-1}}(\cdot)^{n-2}$ denotes the extrapolation of velocity $\mathbf u$ and temperature $T$ to time level $n$, using the values at the two previous time steps. That's not an easy to read equation, but will provide us with the desired higher order accuracy. As a consistency check, it is easy to verify that it reduces to the same equation as above if $k_n=k_{n-1}$.

    +

    As a final remark we note that the choice of a higher order time stepping scheme of course forces us to keep more time steps in memory; in particular, we here will need to have $T^{n-2}$ around, a vector that we could previously discard. This seems like a nuisance that we were able to avoid previously by using only a first order time stepping scheme, but as we will see below when discussing the topic of stabilization, we will need this vector anyway and so keeping it around for time discretization is essentially for free and gives us the opportunity to use a higher order scheme.

    Weak form and space discretization for the Stokes part

    -

    Like solving the mixed Laplace equations, solving the Stokes equations requires us to choose particular pairs of finite elements for velocities and pressure variables. Because this has already been discussed in step-22, we only cover this topic briefly: Here, we use the stable pair $Q_{p+1}^d \times Q_p, p\ge 1$. These are continuous elements, so we can form the weak form of the Stokes equation without problem by integrating by parts and substituting continuous functions by their discrete counterparts:

    -\begin{eqnarray*}
+<p>Like solving the mixed Laplace equations, solving the Stokes equations requires us to choose particular pairs of finite elements for velocities and pressure variables. Because this has already been discussed in <a class=step-22, we only cover this topic briefly: Here, we use the stable pair $Q_{p+1}^d \times Q_p, p\ge 1$. These are continuous elements, so we can form the weak form of the Stokes equation without problem by integrating by parts and substituting continuous functions by their discrete counterparts:

    +\begin{eqnarray*}
   (\nabla {\mathbf v}_h, 2\eta \varepsilon ({\mathbf u}^{n-1}_h))
   -
   (\nabla \cdot {\mathbf v}_h, p^{n-1}_h)
@@ -294,12 +294,12 @@
   -({\mathbf v}_h, \rho\; \beta \; T^{n-1}_h \mathbf{g}),
   \\
   (q_h, \nabla \cdot {\mathbf u}^{n-1}_h) &=& 0,
-\end{eqnarray*} +\end{eqnarray*}" src="form_3979.png"/>

    -

    for all test functions $\mathbf v_h, q_h$. The first term of the first equation is considered as the inner product between tensors, i.e. $(\nabla {\mathbf v}_h, \eta \varepsilon ({\mathbf u}^{n-1}_h))_\Omega
+<p> for all test functions <picture><source srcset=$\mathbf v_h, q_h$. The first term of the first equation is considered as the inner product between tensors, i.e. $(\nabla {\mathbf v}_h, \eta \varepsilon ({\mathbf u}^{n-1}_h))_\Omega
  = \int_\Omega \sum_{i,j=1}^d [\nabla {\mathbf v}_h]_{ij}
-           \eta [\varepsilon ({\mathbf u}^{n-1}_h)]_{ij}\, dx$. Because the second tensor in this product is symmetric, the anti-symmetric component of $\nabla {\mathbf v}_h$ plays no role and it leads to the entirely same form if we use the symmetric gradient of $\mathbf v_h$ instead. Consequently, the formulation we consider and that we implement is

    -\begin{eqnarray*}
+           \eta [\varepsilon ({\mathbf u}^{n-1}_h)]_{ij}\, dx$. Because the second tensor in this product is symmetric, the anti-symmetric component of $\nabla {\mathbf v}_h$ plays no role and it leads to the entirely same form if we use the symmetric gradient of $\mathbf v_h$ instead. Consequently, the formulation we consider and that we implement is

    +\begin{eqnarray*}
   (\varepsilon({\mathbf v}_h), 2\eta \varepsilon ({\mathbf u}^{n-1}_h))
   -
   (\nabla \cdot {\mathbf v}_h, p^{n-1}_h)
@@ -307,32 +307,32 @@
   -({\mathbf v}_h, \rho\; \beta \; T^{n-1}_h \mathbf{g}),
   \\
   (q_h, \nabla \cdot {\mathbf u}^{n-1}_h) &=& 0.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3984.png"/>

    This is exactly the same as what we already discussed in step-22 and there is not much more to say about this here.

    Stabilization, weak form and space discretization for the temperature equation

    The more interesting question is what to do with the temperature advection-diffusion equation. By default, not all discretizations of this equation are equally stable unless we either do something like upwinding, stabilization, or all of this. One way to achieve this is to use discontinuous elements (i.e., the FE_DGQ class that we used, for example, in the discretization of the transport equation in step-12, or in discretizing the pressure in step-20 and step-21) and to define a flux at the interface between cells that takes into account upwinding. If we had a pure advection problem this would probably be the simplest way to go. However, here we have some diffusion as well, and the discretization of the Laplace operator with discontinuous elements is cumbersome because of the significant number of additional terms that need to be integrated on each face between cells. Discontinuous elements also have the drawback that the use of numerical fluxes introduces an additional numerical diffusion that acts everywhere, whereas we would really like to minimize the effect of numerical diffusion to a minimum and only apply it where it is necessary to stabilize the scheme.

    A better alternative is therefore to add some nonlinear viscosity to the model. Essentially, what this does is to transform the temperature equation from the form

    -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   \frac{\partial T}{\partial t}
   +
   {\mathbf u} \cdot \nabla T
   -
   \nabla \cdot \kappa \nabla T &=& \gamma
-\end{eqnarray*} +\end{eqnarray*}" src="form_3985.png"/>

    to something like

    -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   \frac{\partial T}{\partial t}
   +
   {\mathbf u} \cdot \nabla T
   -
   \nabla \cdot (\kappa+\nu(T)) \nabla T &=& \gamma,
-\end{eqnarray*} +\end{eqnarray*}" src="form_3986.png"/> /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_32.html differs (JavaScript source, ASCII text, with very long lines (2514)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_32.html 2023-11-25 15:26:01.893182668 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_32.html 2023-11-25 15:26:01.893182668 +0100 @@ -164,58 +164,58 @@

    In addition to these changes, we also use a slightly different preconditioner, and we will have to make a number of changes that have to do with the fact that we want to solve a realistic problem here, not a model problem. The latter, in particular, will require that we think about scaling issues as well as what all those parameters and coefficients in the equations under consideration actually mean. We will discuss first the issues that affect changes in the mathematical formulation and solver structure, then how to parallelize things, and finally the actual testcase we will consider.

    Using the "right" pressure

    In step-31, we used the following Stokes model for the velocity and pressure field:

    -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   -\nabla \cdot (2 \eta \varepsilon ({\mathbf u})) + \nabla p &=&
   -\rho \; \beta \; T \mathbf{g},
   \\
   \nabla \cdot {\mathbf u} &=& 0.
-\end{eqnarray*} +\end{eqnarray*}" src="form_4126.png"/>

    -

    The right hand side of the first equation appears a wee bit unmotivated. Here's how things should really be. We need the external forces that act on the fluid, which we assume are given by gravity only. In the current case, we assume that the fluid does expand slightly for the purposes of this gravity force, but not enough that we need to modify the incompressibility condition (the second equation). What this means is that for the purpose of the right hand side, we can assume that $\rho=\rho(T)$. An assumption that may not be entirely justified is that we can assume that the changes of density as a function of temperature are small, leading to an expression of the form $\rho(T) = \rho_{\text{ref}}
-[1-\beta(T-T_{\text{ref}})]$, i.e., the density equals $\rho_{\text{ref}}$ at reference temperature and decreases linearly as the temperature increases (as the material expands). The force balance equation then looks properly written like this:

    -\begin{eqnarray*}
+<p> The right hand side of the first equation appears a wee bit unmotivated. Here's how things should really be. We need the external forces that act on the fluid, which we assume are given by gravity only. In the current case, we assume that the fluid does expand slightly for the purposes of this gravity force, but not enough that we need to modify the incompressibility condition (the second equation). What this means is that for the purpose of the right hand side, we can assume that <picture><source srcset=$\rho=\rho(T)$. An assumption that may not be entirely justified is that we can assume that the changes of density as a function of temperature are small, leading to an expression of the form $\rho(T) = \rho_{\text{ref}}
+[1-\beta(T-T_{\text{ref}})]$, i.e., the density equals $\rho_{\text{ref}}$ at reference temperature and decreases linearly as the temperature increases (as the material expands). The force balance equation then looks properly written like this:

    +\begin{eqnarray*}
   -\nabla \cdot (2 \eta \varepsilon ({\mathbf u})) + \nabla p &=&
   \rho_{\text{ref}} [1-\beta(T-T_{\text{ref}})] \mathbf{g}.
-\end{eqnarray*} +\end{eqnarray*}" src="form_4130.png"/>

    -

    Now note that the gravity force results from a gravity potential as $\mathbf g=-\nabla \varphi$, so that we can re-write this as follows:

    -\begin{eqnarray*}
+<p> Now note that the gravity force results from a gravity potential as <picture><source srcset=$\mathbf g=-\nabla \varphi$, so that we can re-write this as follows:

    +\begin{eqnarray*}
   -\nabla \cdot (2 \eta \varepsilon ({\mathbf u})) + \nabla p &=&
   -\rho_{\text{ref}} \; \beta\; T\; \mathbf{g}
   -\rho_{\text{ref}} [1+\beta T_{\text{ref}}] \nabla\varphi.
-\end{eqnarray*} +\end{eqnarray*}" src="form_4132.png"/>

    -

    The second term on the right is time independent, and so we could introduce a new "dynamic" pressure $p_{\text{dyn}}=p+\rho_{\text{ref}}
-[1+\beta T_{\text{ref}}] \varphi=p_{\text{total}}-p_{\text{static}}$ with which the Stokes equations would read:

    -\begin{eqnarray*}
+<p> The second term on the right is time independent, and so we could introduce a new $p_{\text{dyn}}=p+\rho_{\text{ref}}
+[1+\beta T_{\text{ref}}] \varphi=p_{\text{total}}-p_{\text{static}}$ with which the Stokes equations would read:

    +\begin{eqnarray*}
   -\nabla \cdot (2 \eta \varepsilon ({\mathbf u})) + \nabla p_{\text{dyn}} &=&
   -\rho_{\text{ref}} \; \beta \; T \; \mathbf{g},
   \\
   \nabla \cdot {\mathbf u} &=& 0.
-\end{eqnarray*} +\end{eqnarray*}" src="form_4134.png"/>

    This is exactly the form we used in step-31, and it was appropriate to do so because all changes in the fluid flow are only driven by the dynamic pressure that results from temperature differences. (In other words: Any contribution to the right hand side that results from taking the gradient of a scalar field have no effect on the velocity field.)

    On the other hand, we will here use the form of the Stokes equations that considers the total pressure instead:

    -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   -\nabla \cdot (2 \eta \varepsilon ({\mathbf u})) + \nabla p &=&
   \rho(T)\; \mathbf{g},
   \\
   \nabla \cdot {\mathbf u} &=& 0.
-\end{eqnarray*} +\end{eqnarray*}" src="form_4135.png"/>

    There are several advantages to this:

    • This way we can plot the pressure in our program in such a way that it actually shows the total pressure that includes the effects of temperature differences as well as the static pressure of the overlying rocks. Since the pressure does not appear any further in any of the other equations, whether to use one or the other is more a matter of taste than of correctness. The flow field is exactly the same, but we get a pressure that we can now compare with values that are given in geophysical books as those that hold at the bottom of the earth mantle, for example.
    • If we wanted to make the model even more realistic, we would have to take into account that many of the material parameters (e.g. the viscosity, the density, etc) not only depend on the temperature but also the total pressure.
    • -
    • The model above assumed a linear dependence $\rho(T) = \rho_{\text{ref}}
-  [1-\beta(T-T_{\text{ref}})]$ and assumed that $\beta$ is small. In practice, this may not be so. In fact, realistic models are certainly not linear, and $\beta$ may also not be small for at least part of the temperature range because the density's behavior is substantially dependent not only on thermal expansion but by phase changes.
    • +
    • The model above assumed a linear dependence $\rho(T) = \rho_{\text{ref}}
+  [1-\beta(T-T_{\text{ref}})]$ and assumed that $\beta$ is small. In practice, this may not be so. In fact, realistic models are certainly not linear, and $\beta$ may also not be small for at least part of the temperature range because the density's behavior is substantially dependent not only on thermal expansion but by phase changes.
    • A final reason to do this is discussed in the results section and concerns possible extensions to the model we use here. It has to do with the fact that the temperature equation (see below) we use here does not include a term that contains the pressure. It should, however: rock, like gas, heats up as you compress it. Consequently, material that rises up cools adiabatically, and cold material that sinks down heats adiabatically. We discuss this further below.
    -
    Note
    There is, however, a downside to this procedure. In the earth, the dynamic pressure is several orders of magnitude smaller than the total pressure. If we use the equations above and solve all variables to, say, 4 digits of accuracy, then we may be able to get the velocity and the total pressure right, but we will have no accuracy at all if we compute the dynamic pressure by subtracting from the total pressure the static part $p_\text{static}=\rho_{\text{ref}}
-[1+\beta T_{\text{ref}}] \varphi$. If, for example, the dynamic pressure is six orders of magnitude smaller than the static pressure, then we need to solve the overall pressure to at least seven digits of accuracy to get anything remotely accurate. That said, in practice this turns out not to be a limiting factor.
    +
    Note
    There is, however, a downside to this procedure. In the earth, the dynamic pressure is several orders of magnitude smaller than the total pressure. If we use the equations above and solve all variables to, say, 4 digits of accuracy, then we may be able to get the velocity and the total pressure right, but we will have no accuracy at all if we compute the dynamic pressure by subtracting from the total pressure the static part $p_\text{static}=\rho_{\text{ref}}
+[1+\beta T_{\text{ref}}] \varphi$. If, for example, the dynamic pressure is six orders of magnitude smaller than the static pressure, then we need to solve the overall pressure to at least seven digits of accuracy to get anything remotely accurate. That said, in practice this turns out not to be a limiting factor.

    The scaling of discretized equations

    Remember that we want to solve the following set of equations:

    -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   -\nabla \cdot (2 \eta \varepsilon ({\mathbf u})) + \nabla p &=&
   \rho(T) \mathbf{g},
   \\
@@ -226,11 +226,11 @@
   {\mathbf u} \cdot \nabla T
   -
   \nabla \cdot \kappa \nabla T &=& \gamma,
-\end{eqnarray*} +\end{eqnarray*}" src="form_4138.png"/>

    augmented by appropriate boundary and initial conditions. As discussed in step-31, we will solve this set of equations by solving for a Stokes problem first in each time step, and then moving the temperature equation forward by one time interval.

    The problem under consideration in this current section is with the Stokes problem: if we discretize it as usual, we get a linear system

    -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   M \; X
   =
   \left(\begin{array}{cc}
@@ -245,10 +245,10 @@
   \end{array}\right)
   =
   F
-\end{eqnarray*} +\end{eqnarray*}" src="form_4139.png"/>

    which in this program we will solve with a FGMRES solver. This solver iterates until the residual of these linear equations is below a certain tolerance, i.e., until

    -\[
+<picture><source srcset=\[
   \left\|
   \left(\begin{array}{c}
     F_U - A U^{(k)} - B P^{(k)}
@@ -257,35 +257,35 @@
   \end{array}\right)
   \right\|
   < \text{Tol}.
-\] +\]" src="form_4140.png"/>

    -

    This does not make any sense from the viewpoint of physical units: the quantities involved here have physical units so that the first part of the residual has units $\frac{\text{Pa}}{\text{m}}
-\text{m}^{\text{dim}}$ (most easily established by considering the term $(\nabla \cdot \mathbf v, p)_{\Omega}$ and considering that the pressure has units $\text{Pa}=\frac{\text{kg}}{\text{m}\;\text{s}^2}$ and the integration yields a factor of $\text{m}^{\text{dim}}$), whereas the second part of the residual has units $\frac{\text{m}^{\text{dim}}}{\text{s}}$. Taking the norm of this residual vector would yield a quantity with units $\text{m}^{\text{dim}-1} \sqrt{\left(\text{Pa}\right)^2 +
-       \left(\frac{\text{m}}{\text{s}}\right)^2}$. This, quite obviously, does not make sense, and we should not be surprised that doing so is eventually going to come back hurting us.

    -

    So why is this an issue here, but not in step-31? The reason back there is that everything was nicely balanced: velocities were on the order of one, the pressure likewise, the viscosity was one, and the domain had a diameter of $\sqrt{2}$. As a result, while nonsensical, nothing bad happened. On the other hand, as we will explain below, things here will not be that simply scaled: $\eta$ will be around $10^{21}$, velocities on the order of $10^{-8}$, pressure around $10^8$, and the diameter of the domain is $10^7$. In other words, the order of magnitude for the first equation is going to be $\eta\text{div}\varepsilon(\mathbf u) \approx 10^{21} \frac{10^{-8}}{(10^7)^2}
-\approx 10^{-1}$, whereas the second equation will be around $\text{div}{\mathbf u}\approx \frac{10^{-8}}{10^7} \approx 10^{-15}$. Well, so what this will lead to is this: if the solver wants to make the residual small, it will almost entirely focus on the first set of equations because they are so much bigger, and ignore the divergence equation that describes mass conservation. That's exactly what happens: unless we set the tolerance to extremely small values, the resulting flow field is definitely not divergence free. As an auxiliary problem, it turns out that it is difficult to find a tolerance that always works; in practice, one often ends up with a tolerance that requires 30 or 40 iterations for most time steps, and 10,000 for some others.

    -

    So what's a numerical analyst to do in a case like this? The answer is to start at the root and first make sure that everything is mathematically consistent first. In our case, this means that if we want to solve the system of Stokes equations jointly, we have to scale them so that they all have the same physical dimensions. In our case, this means multiplying the second equation by something that has units $\frac{\text{Pa}\;\text{s}}{\text{m}}$; one choice is to multiply with $\frac{\eta}{L}$ where $L$ is a typical lengthscale in our domain (which experiments show is best chosen to be the diameter of plumes — around 10 km — rather than the diameter of the domain). Using these numbers for $\eta$ and $L$, this factor is around $10^{17}$. So, we now get this for the Stokes system:

    -\begin{eqnarray*}
+<p> This does not make any sense from the viewpoint of physical units: the quantities involved here have physical units so that the first part of the residual has units <picture><source srcset=$\frac{\text{Pa}}{\text{m}}
+\text{m}^{\text{dim}}$ (most easily established by considering the term $(\nabla \cdot \mathbf v, p)_{\Omega}$ and considering that the pressure has units $\text{Pa}=\frac{\text{kg}}{\text{m}\;\text{s}^2}$ and the integration yields a factor of $\text{m}^{\text{dim}}$), whereas the second part of the residual has units $\frac{\text{m}^{\text{dim}}}{\text{s}}$. Taking the norm of this residual vector would yield a quantity with units $\text{m}^{\text{dim}-1} \sqrt{\left(\text{Pa}\right)^2 +
+       \left(\frac{\text{m}}{\text{s}}\right)^2}$. This, quite obviously, does not make sense, and we should not be surprised that doing so is eventually going to come back hurting us.

    +

    So why is this an issue here, but not in step-31? The reason back there is that everything was nicely balanced: velocities were on the order of one, the pressure likewise, the viscosity was one, and the domain had a diameter of $\sqrt{2}$. As a result, while nonsensical, nothing bad happened. On the other hand, as we will explain below, things here will not be that simply scaled: $\eta$ will be around $10^{21}$, velocities on the order of $10^{-8}$, pressure around $10^8$, and the diameter of the domain is $10^7$. In other words, the order of magnitude for the first equation is going to be $\eta\text{div}\varepsilon(\mathbf u) \approx 10^{21} \frac{10^{-8}}{(10^7)^2}
+\approx 10^{-1}$, whereas the second equation will be around $\text{div}{\mathbf u}\approx \frac{10^{-8}}{10^7} \approx 10^{-15}$. Well, so what this will lead to is this: if the solver wants to make the residual small, it will almost entirely focus on the first set of equations because they are so much bigger, and ignore the divergence equation that describes mass conservation. That's exactly what happens: unless we set the tolerance to extremely small values, the resulting flow field is definitely not divergence free. As an auxiliary problem, it turns out that it is difficult to find a tolerance that always works; in practice, one often ends up with a tolerance that requires 30 or 40 iterations for most time steps, and 10,000 for some others.

    +

    So what's a numerical analyst to do in a case like this? The answer is to start at the root and first make sure that everything is mathematically consistent first. In our case, this means that if we want to solve the system of Stokes equations jointly, we have to scale them so that they all have the same physical dimensions. In our case, this means multiplying the second equation by something that has units $\frac{\text{Pa}\;\text{s}}{\text{m}}$; one choice is to multiply with $\frac{\eta}{L}$ where $L$ is a typical lengthscale in our domain (which experiments show is best chosen to be the diameter of plumes — around 10 km — rather than the diameter of the domain). Using these numbers for $\eta$ and $L$, this factor is around $10^{17}$. So, we now get this for the Stokes system:

    +\begin{eqnarray*}
   -\nabla \cdot (2 \eta \varepsilon ({\mathbf u})) + \nabla p &=&
   \rho(T) \; \mathbf{g},
   \\
   \frac{\eta}{L} \nabla \cdot {\mathbf u} &=& 0.
-\end{eqnarray*} +\end{eqnarray*}" src="form_4156.png"/>

    -

    The trouble with this is that the result is not symmetric any more (we have $\frac{\eta}{L} \nabla \cdot$ at the bottom left, but not its transpose operator at the top right). This, however, can be cured by introducing a scaled pressure $\hat p = \frac{L}{\eta}p$, and we get the scaled equations

    -\begin{eqnarray*}
+<p> The trouble with this is that the result is not symmetric any more (we have <picture><source srcset=$\frac{\eta}{L} \nabla \cdot$ at the bottom left, but not its transpose operator at the top right). This, however, can be cured by introducing a scaled pressure $\hat p = \frac{L}{\eta}p$, and we get the scaled equations

    +\begin{eqnarray*}
   -\nabla \cdot (2 \eta \varepsilon ({\mathbf u})) +
   \nabla \left(\frac{\eta}{L} \hat p\right) &=&
   \rho(T) \; \mathbf{g},
   \\
   \frac{\eta}{L} \nabla \cdot {\mathbf u} &=& 0.
-\end{eqnarray*} +\end{eqnarray*}" src="form_4159.png"/>

    -

    This is now symmetric. Obviously, we can easily recover the original pressure $p$ from the scaled pressure $\hat p$ that we compute as a result of this procedure.

    -

    In the program below, we will introduce a factor EquationData::pressure_scaling that corresponds to $\frac{\eta}{L}$, and we will use this factor in the assembly of the system matrix and preconditioner. Because it is annoying and error prone, we will recover the unscaled pressure immediately following the solution of the linear system, i.e., the solution vector's pressure component will immediately be unscaled to retrieve the physical pressure. Since the solver uses the fact that we can use a good initial guess by extrapolating the previous solutions, we also have to scale the pressure immediately before solving.

    +

    This is now symmetric. Obviously, we can easily recover the original pressure $p$ from the scaled pressure $\hat p$ that we compute as a result of this procedure.

    +

    In the program below, we will introduce a factor EquationData::pressure_scaling that corresponds to $\frac{\eta}{L}$, and we will use this factor in the assembly of the system matrix and preconditioner. Because it is annoying and error prone, we will recover the unscaled pressure immediately following the solution of the linear system, i.e., the solution vector's pressure component will immediately be unscaled to retrieve the physical pressure. Since the solver uses the fact that we can use a good initial guess by extrapolating the previous solutions, we also have to scale the pressure immediately before solving.

    Changes to the Stokes preconditioner and solver

    -

    In this tutorial program, we apply a variant of the preconditioner used in step-31. That preconditioner was built to operate on the system matrix $M$ in block form such that the product matrix

    -\begin{eqnarray*}
+<p>In this tutorial program, we apply a variant of the preconditioner used in <a class=step-31. That preconditioner was built to operate on the system matrix $M$ in block form such that the product matrix

    +\begin{eqnarray*}
   P^{-1} M
   =
   \left(\begin{array}{cc}
@@ -294,24 +294,24 @@
   \left(\begin{array}{cc}
     A & B^T \\ B & 0
   \end{array}\right)
-\end{eqnarray*} +\end{eqnarray*}" src="form_4161.png"/>

    -

    is of a form that Krylov-based iterative solvers like GMRES can solve in a few iterations. We then replaced the exact inverse of $A$ by the action of an AMG preconditioner $\tilde{A}$ based on a vector Laplace matrix, approximated the Schur complement $S = B A^{-1} B^T$ by a mass matrix $M_p$ on the pressure space and wrote an InverseMatrix class for implementing the action of $M_p^{-1}\approx S^{-1}$ on vectors. In the InverseMatrix class, we used a CG solve with an incomplete Cholesky (IC) preconditioner for performing the inner solves.

    -

    An observation one can make is that we use just the action of a preconditioner for approximating the velocity inverse $A^{-1}$ (and the outer GMRES iteration takes care of the approximate character of the inverse), whereas we use a more or less exact inverse for $M_p^{-1}$, realized by a fully converged CG solve. This appears unbalanced, but there's system to this madness: almost all the effort goes into the upper left block to which we apply the AMG preconditioner, whereas even an exact inversion of the pressure mass matrix costs basically nothing. Consequently, if it helps us reduce the overall number of iterations somewhat, then this effort is well spent.

    +

    is of a form that Krylov-based iterative solvers like GMRES can solve in a few iterations. We then replaced the exact inverse of $A$ by the action of an AMG preconditioner $\tilde{A}$ based on a vector Laplace matrix, approximated the Schur complement $S = B A^{-1} B^T$ by a mass matrix $M_p$ on the pressure space and wrote an InverseMatrix class for implementing the action of $M_p^{-1}\approx S^{-1}$ on vectors. In the InverseMatrix class, we used a CG solve with an incomplete Cholesky (IC) preconditioner for performing the inner solves.

    +

    An observation one can make is that we use just the action of a preconditioner for approximating the velocity inverse $A^{-1}$ (and the outer GMRES iteration takes care of the approximate character of the inverse), whereas we use a more or less exact inverse for $M_p^{-1}$, realized by a fully converged CG solve. This appears unbalanced, but there's system to this madness: almost all the effort goes into the upper left block to which we apply the AMG preconditioner, whereas even an exact inversion of the pressure mass matrix costs basically nothing. Consequently, if it helps us reduce the overall number of iterations somewhat, then this effort is well spent.

    That said, even though the solver worked well for step-31, we have a problem here that is a bit more complicated (cells are deformed, the pressure varies by orders of magnitude, and we want to plan ahead for more complicated physics), and so we'll change a few things slightly:

    • For more complex problems, it turns out that using just a single AMG V-cycle as preconditioner is not always sufficient. The outer solver converges just fine most of the time in a reasonable number of iterations (say, less than 50) but there are the occasional time step where it suddenly takes 700 or so. What exactly is going on there is hard to determine, but the problem can be avoided by using a more accurate solver for the top left block. Consequently, we'll want to use a CG iteration to invert the top left block of the preconditioner matrix, and use the AMG as a preconditioner for the CG solver.
    • The downside of this is that, of course, the Stokes preconditioner becomes much more expensive (approximately 10 times more expensive than when we just use a single V-cycle). Our strategy then is this: let's do up to 30 GMRES iterations with just the V-cycle as a preconditioner and if that doesn't yield convergence, then take the best approximation of the Stokes solution obtained after this first round of iterations and use that as the starting guess for iterations where we use the full inner solver with a rather lenient tolerance as preconditioner. In all our experiments this leads to convergence in only a few additional iterations.
    • -
    • One thing we need to pay attention to is that when using a CG with a lenient tolerance in the preconditioner, then $y = \tilde A^{-1} r$ is no longer a linear function of $r$ (it is, of course, if we have a very stringent tolerance in our solver, or if we only apply a single V-cycle). This is a problem since now our preconditioner is no longer a linear operator; in other words, every time GMRES uses it the preconditioner looks different. The standard GMRES solver can't deal with this, leading to slow convergence or even breakdown, but the F-GMRES variant is designed to deal with exactly this kind of situation and we consequently use it.
    • +
    • One thing we need to pay attention to is that when using a CG with a lenient tolerance in the preconditioner, then $y = \tilde A^{-1} r$ is no longer a linear function of $r$ (it is, of course, if we have a very stringent tolerance in our solver, or if we only apply a single V-cycle). This is a problem since now our preconditioner is no longer a linear operator; in other words, every time GMRES uses it the preconditioner looks different. The standard GMRES solver can't deal with this, leading to slow convergence or even breakdown, but the F-GMRES variant is designed to deal with exactly this kind of situation and we consequently use it.
    • On the other hand, once we have settled on using F-GMRES we can relax the tolerance used in inverting the preconditioner for $S$. In step-31, we ran a preconditioned CG method on $\tilde S$ until the residual had been reduced by 7 orders of magnitude. Here, we can again be more lenient because we know that the outer preconditioner doesn't suffer.
    • In step-31, we used a left preconditioner in which we first invert the top left block of the preconditioner matrix, then apply the bottom left (divergence) one, and then invert the bottom right. In other words, the application of the preconditioner acts as a lower left block triangular matrix. Another option is to use a right preconditioner that here would be upper right block triangulation, i.e., we first invert the bottom right Schur complement, apply the top right (gradient) operator and then invert the elliptic top left block. To a degree, which one to choose is a matter of taste. That said, there is one significant advantage to a right preconditioner in GMRES-type solvers: the residual with which we determine whether we should stop the iteration is the true residual, not the norm of the preconditioned equations. Consequently, it is much simpler to compare it to the stopping criterion we typically use, namely the norm of the right hand side vector. In writing this code we found that the scaling issues we discussed above also made it difficult to determine suitable stopping criteria for left-preconditioned linear systems, and consequently this program uses a right preconditioner.
    • In step-31, we used an IC (incomplete Cholesky) preconditioner for the pressure mass matrix in the Schur complement preconditioner and for the solution of the temperature system. Here, we could in principle do the same, but we do choose an even simpler preconditioner, namely a Jacobi preconditioner for both systems. This is because here we target at massively parallel computations, where the decompositions for IC/ILU would have to be performed block-wise for the locally owned degrees of freedom on each processor. This means, that the preconditioner gets more like a Jacobi preconditioner anyway, so we rather start from that variant straight away. Note that we only use the Jacobi preconditioners for CG solvers with mass matrices, where they give optimal (h-independent) convergence anyway, even though they usually require about twice as many iterations as an IC preconditioner.
    -

    As a final note, let us remark that in step-31 we computed the Schur complement $S=B A^{-1} B^T$ by approximating $-\text{div}(-\eta\Delta)^{-1}\nabla \approx \frac 1{\eta} \mathbf{1}$. Now, however, we have re-scaled the $B$ and $B^T$ operators. So $S$ should now approximate $-\frac{\eta}{L}\text{div}(-\eta\Delta)^{-1}\nabla \frac{\eta}{L} \approx
-\left(\frac{\eta}{L}\right)^2 \frac 1{\eta} \mathbf{1}$. We use the discrete form of the right hand side of this as our approximation $\tilde S$ to $S$.

    +

    As a final note, let us remark that in step-31 we computed the Schur complement $S=B A^{-1} B^T$ by approximating $-\text{div}(-\eta\Delta)^{-1}\nabla \approx \frac 1{\eta} \mathbf{1}$. Now, however, we have re-scaled the $B$ and $B^T$ operators. So $S$ should now approximate $-\frac{\eta}{L}\text{div}(-\eta\Delta)^{-1}\nabla \frac{\eta}{L} \approx
+\left(\frac{\eta}{L}\right)^2 \frac 1{\eta} \mathbf{1}$. We use the discrete form of the right hand side of this as our approximation $\tilde S$ to $S$.

    Changes to the artificial viscosity stabilization

    -

    Similarly to step-31, we will use an artificial viscosity for stabilization based on a residual of the equation. As a difference to step-31, we will provide two slightly different definitions of the stabilization parameter. For $\alpha=1$, we use the same definition as in step-31:

    -\begin{eqnarray*}
+<p>Similarly to <a class=step-31, we will use an artificial viscosity for stabilization based on a residual of the equation. As a difference to step-31, we will provide two slightly different definitions of the stabilization parameter. For $\alpha=1$, we use the same definition as in step-31:

    +\begin{eqnarray*}
   \nu_\alpha(T)|_K
   =
   \nu_1(T)|_K
@@ -323,76 +323,76 @@
     1,
     \frac{\|R_1(T)\|_{L^\infty(K)}}{c(\mathbf{u},T)}
   \right\}
-\end{eqnarray*} +\end{eqnarray*}" src="form_4168.png"/>

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_33.html differs (JavaScript source, Unicode text, UTF-8 text, with very long lines (1500)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_33.html 2023-11-25 15:26:01.943181652 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_33.html 2023-11-25 15:26:01.943181652 +0100 @@ -163,17 +163,17 @@ While this program demonstrates the use of automatic differentiation well, it does not express the state of the art in Euler equation solvers. There are much faster and more accurate method for this equation, and you should take a look at step-67 and step-69 to see how this equation can be solved more efficiently.

    Introduction

    Euler flow

    -

    The equations that describe the movement of a compressible, inviscid gas (the so-called Euler equations of gas dynamics) are a basic system of conservation laws. In spatial dimension $d$ they read

    -\[
+<p>The equations that describe the movement of a compressible, inviscid gas (the so-called Euler equations of gas dynamics) are a basic system of conservation laws. In spatial dimension <picture><source srcset=$d$ they read

    +\[
 \partial_t \mathbf{w} + \nabla \cdot \mathbf{F}(\mathbf{w}) =
 \mathbf{G}(\mathbf w),
-\] +\]" src="form_4265.png"/>

    -

    with the solution $\mathbf{w}=(\rho v_1,\ldots,\rho v_d,\rho,
-E)^{\top}$ consisting of $\rho$ the fluid density, ${\mathbf v}=(v_1,\ldots v_d)^T$ the flow velocity (and thus $\rho\mathbf v$ being the linear momentum density), and $E$ the energy density of the gas. We interpret the equations above as $\partial_t \mathbf{w}_i + \nabla \cdot \mathbf{F}_i(\mathbf{w}) = \mathbf
-G_i(\mathbf w)$, $i=1,\ldots,dim+2$.

    -

    For the Euler equations, the flux matrix $\mathbf F$ (or system of flux functions) is defined as (shown here for the case $d=3$)

    -\begin{eqnarray*}
+<p> with the solution <picture><source srcset=$\mathbf{w}=(\rho v_1,\ldots,\rho v_d,\rho,
+E)^{\top}$ consisting of $\rho$ the fluid density, ${\mathbf v}=(v_1,\ldots v_d)^T$ the flow velocity (and thus $\rho\mathbf v$ being the linear momentum density), and $E$ the energy density of the gas. We interpret the equations above as $\partial_t \mathbf{w}_i + \nabla \cdot \mathbf{F}_i(\mathbf{w}) = \mathbf
+G_i(\mathbf w)$, $i=1,\ldots,dim+2$.

    +

    For the Euler equations, the flux matrix $\mathbf F$ (or system of flux functions) is defined as (shown here for the case $d=3$)

    +\begin{eqnarray*}
   \mathbf F(\mathbf w)
   =
   \left(
@@ -185,10 +185,10 @@
     (E+p) v_1 & (E+p) v_2 & (E+p) v_3
   \end{array}
   \right),
-\end{eqnarray*} +\end{eqnarray*}" src="form_4272.png"/>

    and we will choose as particular right hand side forcing only the effects of gravity, described by

    -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   \mathbf G(\mathbf w)
   =
   \left(
@@ -200,43 +200,43 @@
     \rho \mathbf g \cdot \mathbf v
   \end{array}
   \right),
-\end{eqnarray*} +\end{eqnarray*}" src="form_4273.png"/>

    -

    where $\mathbf g=(g_1,g_2,g_3)^T$ denotes the gravity vector. With this, the entire system of equations reads:

    -\begin{eqnarray*}
+<p> where <picture><source srcset=$\mathbf g=(g_1,g_2,g_3)^T$ denotes the gravity vector. With this, the entire system of equations reads:

    +\begin{eqnarray*}
   \partial_t (\rho v_i) + \sum_{s=1}^d \frac{\partial(\rho v_i v_s +
   \delta_{is} p)}{\partial x_s} &=& g_i \rho, \qquad i=1,\dots,d, \\
   \partial_t \rho + \sum_{s=1}^d \frac{\partial(\rho v_s)}{\partial x_s} &=& 0,  \\
   \partial_t E + \sum_{s=1}^d \frac{\partial((E+p)v_s)}{\partial x_s} &=&
   \rho \mathbf g \cdot \mathbf v.
-\end{eqnarray*} +\end{eqnarray*}" src="form_4275.png"/>

    -

    These equations describe, respectively, the conservation of momentum, mass, and energy. The system is closed by a relation that defines the pressure: $p =
-(\gamma -1)(E-\frac{1}{2} \rho |\mathbf v|^2)$. For the constituents of air (mainly nitrogen and oxygen) and other diatomic gases, the ratio of specific heats is $\gamma=1.4$.

    +

    These equations describe, respectively, the conservation of momentum, mass, and energy. The system is closed by a relation that defines the pressure: $p =
+(\gamma -1)(E-\frac{1}{2} \rho |\mathbf v|^2)$. For the constituents of air (mainly nitrogen and oxygen) and other diatomic gases, the ratio of specific heats is $\gamma=1.4$.

    This problem obviously falls into the class of vector-valued problems. A general overview of how to deal with these problems in deal.II can be found in the Handling vector valued problems module.

    Discretization

    -

    Discretization happens in the usual way, taking into account that this is a hyperbolic problem in the same style as the simple one discussed in step-12: We choose a finite element space $V_h$, and integrate our conservation law against our (vector-valued) test function $\mathbf{z} \in V_h$. We then integrate by parts and approximate the boundary flux with a numerical flux $\mathbf{H}$,

    -\begin{eqnarray*}
+<p>Discretization happens in the usual way, taking into account that this is a hyperbolic problem in the same style as the simple one discussed in <a class=step-12: We choose a finite element space $V_h$, and integrate our conservation law against our (vector-valued) test function $\mathbf{z} \in V_h$. We then integrate by parts and approximate the boundary flux with a numerical flux $\mathbf{H}$,

    +\begin{eqnarray*}
 &&\int_{\Omega} (\partial_t \mathbf{w}, \mathbf{z}) + (\nabla \cdot \mathbf{F}(\mathbf{w}), \mathbf{z}) \\
 &\approx &\int_{\Omega} (\partial_t \mathbf{w}, \mathbf{z}) - (\mathbf{F}(\mathbf{w}), \nabla \mathbf{z}) + h^{\eta}(\nabla \mathbf{w} , \nabla \mathbf{z}) + \int_{\partial \Omega} (\mathbf{H}(\mathbf{w}^+, \mathbf{w}^-, \mathbf{n}), \mathbf{z}^+),
-\end{eqnarray*} +\end{eqnarray*}" src="form_4280.png"/>

    -

    where a superscript $+$ denotes the interior trace of a function, and $-$ represents the outer trace. The diffusion term $h^{\eta}(\nabla \mathbf{w} , \nabla \mathbf{z})$ is introduced strictly for stability, where $h$ is the mesh size and $\eta$ is a parameter prescribing how much diffusion to add.

    -

    On the boundary, we have to say what the outer trace $\mathbf{w}^-$ is. Depending on the boundary condition, we prescribe either of the following:

      +

      where a superscript $+$ denotes the interior trace of a function, and $-$ represents the outer trace. The diffusion term $h^{\eta}(\nabla \mathbf{w} , \nabla \mathbf{z})$ is introduced strictly for stability, where $h$ is the mesh size and $\eta$ is a parameter prescribing how much diffusion to add.

      +

      On the boundary, we have to say what the outer trace $\mathbf{w}^-$ is. Depending on the boundary condition, we prescribe either of the following:

      • -Inflow boundary: $\mathbf{w}^-$ is prescribed to be the desired value.
      • +Inflow boundary: $\mathbf{w}^-$ is prescribed to be the desired value.
      • -Supersonic outflow boundary: $\mathbf{w}^- = \mathbf{w}^+$
      • +Supersonic outflow boundary: $\mathbf{w}^- = \mathbf{w}^+$
      • -Subsonic outflow boundary: $\mathbf{w}^- = \mathbf{w}^+$ except that the energy variable is modified to support a prescribed pressure $p_o$, i.e. $\mathbf{w}^- =(\rho^+, \rho v_1^+, \dots, \rho v_d^+, p_o/(\gamma -1) + 0.5 \rho |\mathbf{v}^+|^2)$
      • +Subsonic outflow boundary: $\mathbf{w}^- = \mathbf{w}^+$ except that the energy variable is modified to support a prescribed pressure $p_o$, i.e. $\mathbf{w}^- =(\rho^+, \rho v_1^+, \dots, \rho v_d^+, p_o/(\gamma -1) + 0.5 \rho |\mathbf{v}^+|^2)$
      • -Reflective boundary: we set $\mathbf{w}^-$ so that $(\mathbf{v}^+ + \mathbf{v}^-) \cdot \mathbf{n} = 0$ and $\rho^- = \rho^+,E^-=E^+$.
      • +Reflective boundary: we set $\mathbf{w}^-$ so that $(\mathbf{v}^+ + \mathbf{v}^-) \cdot \mathbf{n} = 0$ and $\rho^- = \rho^+,E^-=E^+$.

      More information on these issues can be found, for example, in Ralf Hartmann's PhD thesis ("Adaptive Finite Element Methods for the Compressible Euler Equations", PhD thesis, University of Heidelberg, 2002).

      -

      We use a time stepping scheme to substitute the time derivative in the above equations. For simplicity, we define $ \mathbf{B}({\mathbf{w}_{n}})(\mathbf z) $ as the spatial residual at time step $n$ :

      +

      We use a time stepping scheme to substitute the time derivative in the above equations. For simplicity, we define $ \mathbf{B}({\mathbf{w}_{n}})(\mathbf z) $ as the spatial residual at time step $n$ :

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
  \mathbf{B}(\mathbf{w}_{n})(\mathbf z)  &=&
 - \int_{\Omega} \left(\mathbf{F}(\mathbf{w}_n),
 \nabla\mathbf{z}\right) +  h^{\eta}(\nabla \mathbf{w}_n , \nabla \mathbf{z}) \\
@@ -246,43 +246,43 @@
 -
 \int_{\Omega} \left(\mathbf{G}(\mathbf{w}_n),
 \mathbf{z}\right) .
-\end{eqnarray*} +\end{eqnarray*}" src="form_4291.png"/>

      -

      At each time step, our full discretization is thus that the residual applied to any test function $\mathbf z$ equals zero:

      -\begin{eqnarray*}
+<p>At each time step, our full discretization is thus that the residual applied to any test function <picture><source srcset=$\mathbf z$ equals zero:

      +\begin{eqnarray*}
 R(\mathbf{W}_{n+1})(\mathbf z) &=&
 \int_{\Omega} \left(\frac{{\mathbf w}_{n+1} - \mathbf{w}_n}{\delta t},
 \mathbf{z}\right)+
 \theta \mathbf{B}({\mathbf{w}}_{n+1}) +  (1-\theta) \mathbf{B}({\mathbf w}_{n}) \\
 &=& 0
-\end{eqnarray*} +\end{eqnarray*}" src="form_4293.png"/>

      -

      where $ \theta \in [0,1] $ and $\mathbf{w}_i = \sum_k \mathbf{W}_i^k \mathbf{\phi}_k$. Choosing $\theta=0$ results in the explicit (forward) Euler scheme, $\theta=1$ in the stable implicit (backward) Euler scheme, and $\theta=\frac 12$ in the Crank-Nicolson scheme.

      -

      In the implementation below, we choose the Lax-Friedrichs flux for the function $\mathbf H$, i.e. $\mathbf{H}(\mathbf{a},\mathbf{b},\mathbf{n}) =
+<p> where <picture><source srcset=$ \theta \in [0,1] $ and $\mathbf{w}_i = \sum_k \mathbf{W}_i^k \mathbf{\phi}_k$. Choosing $\theta=0$ results in the explicit (forward) Euler scheme, $\theta=1$ in the stable implicit (backward) Euler scheme, and $\theta=\frac 12$ in the Crank-Nicolson scheme.

      +

      In the implementation below, we choose the Lax-Friedrichs flux for the function $\mathbf H$, i.e. $\mathbf{H}(\mathbf{a},\mathbf{b},\mathbf{n}) =
 \frac{1}{2}(\mathbf{F}(\mathbf{a})\cdot \mathbf{n} +
-\mathbf{F}(\mathbf{b})\cdot \mathbf{n} + \alpha (\mathbf{a} - \mathbf{b}))$, where $\alpha$ is either a fixed number specified in the input file, or where $\alpha$ is a mesh dependent value. In the latter case, it is chosen as $\frac{h}{2\delta T}$ with $h$ the diameter of the face to which the flux is applied, and $\delta T$ the current time step.

      -

      With these choices, equating the residual to zero results in a nonlinear system of equations $R(\mathbf{W}_{n+1})=0$. We solve this nonlinear system by a Newton iteration (in the same way as explained in step-15), i.e. by iterating

      -\begin{eqnarray*}
+\mathbf{F}(\mathbf{b})\cdot \mathbf{n} + \alpha (\mathbf{a} - \mathbf{b}))$, where $\alpha$ is either a fixed number specified in the input file, or where $\alpha$ is a mesh dependent value. In the latter case, it is chosen as $\frac{h}{2\delta T}$ with $h$ the diameter of the face to which the flux is applied, and $\delta T$ the current time step.

      +

      With these choices, equating the residual to zero results in a nonlinear system of equations $R(\mathbf{W}_{n+1})=0$. We solve this nonlinear system by a Newton iteration (in the same way as explained in step-15), i.e. by iterating

      +\begin{eqnarray*}
 R'(\mathbf{W}^k_{n+1},\delta \mathbf{W}_{n+1}^k)(\mathbf z) & = & -
 R(\mathbf{W}^{k}_{n+1})(\mathbf z) \qquad \qquad \forall \mathbf z\in V_h \\
 \mathbf{W}^{k+1}_{n+1} &=& \mathbf{W}^k_{n+1} + \delta \mathbf{W}^k_{n+1},
-\end{eqnarray*} +\end{eqnarray*}" src="form_4300.png"/>

      -

      until $|R(\mathbf{W}^k_{n+1})|$ (the residual) is sufficiently small. By testing with the nodal basis of a finite element space instead of all $\mathbf z$, we arrive at a linear system for $\delta \mathbf W$:

      -\begin{eqnarray*}
+<p> until <picture><source srcset=$|R(\mathbf{W}^k_{n+1})|$ (the residual) is sufficiently small. By testing with the nodal basis of a finite element space instead of all $\mathbf z$, we arrive at a linear system for $\delta \mathbf W$:

      +\begin{eqnarray*}
 \mathbf R'(\mathbf{W}^k_{n+1})\delta \mathbf{W}^k_{n+1} & = & -
 \mathbf R(\mathbf{W}^{k}_{n+1}).
-\end{eqnarray*} +\end{eqnarray*}" src="form_4303.png"/>

      This linear system is, in general, neither symmetric nor has any particular definiteness properties. We will either use a direct solver or Trilinos' GMRES implementation to solve it. As will become apparent from the results shown below, this fully implicit iteration converges very rapidly (typically in 3 steps) and with the quadratic convergence order expected from a Newton method.

      Automatic differentiation

      -

      Since computing the Jacobian matrix $\mathbf R'(\mathbf W^k)$ is a terrible beast, we use an automatic differentiation package, Sacado, to do this. Sacado is a package within the Trilinos framework and offers a C++ template class Sacado::Fad::DFad (Fad standing for "forward automatic +

      Since computing the Jacobian matrix $\mathbf R'(\mathbf W^k)$ is a terrible beast, we use an automatic differentiation package, Sacado, to do this. Sacado is a package within the Trilinos framework and offers a C++ template class Sacado::Fad::DFad (Fad standing for "forward automatic differentiation") that supports basic arithmetic operators and functions such as sqrt, sin, cos, pow, etc. In order to use this feature, one declares a collection of variables of this type and then denotes some of this collection as degrees of freedom, the rest of the variables being functions of the independent variables. These variables are used in an algorithm, and as the variables are used, their sensitivities with respect to the degrees of freedom are continuously updated.

      -

      One can imagine that for the full Jacobian matrix as a whole, this could be prohibitively expensive: the number of independent variables are the $\mathbf W^k$, the dependent variables the elements of the vector $\mathbf
-R(\mathbf W^k)$. Both of these vectors can easily have tens of thousands of elements or more. However, it is important to note that not all elements of $\mathbf R$ depend on all elements of $\mathbf W^k$: in fact, an entry in $\mathbf R$ only depends on an element of $\mathbf W^k$ if the two corresponding shape functions overlap and couple in the weak form.

      -

      Specifically, it is wise to define a minimum set of independent AD variables that the residual on the current cell may possibly depend on: on every element, we define those variables as independent that correspond to the degrees of freedom defined on this cell (or, if we have to compute jump terms between cells, that correspond to degrees of freedom defined on either of the two adjacent cells), and the dependent variables are the elements of the local residual vector. Not doing this, i.e. defining all elements of $\mathbf W^k$ as independent, will result a very expensive computation of a lot of zeros: the elements of the local residual vector are independent of almost all elements of the solution vector, and consequently their derivatives are zero; however, trying to compute these zeros can easily take 90% or more of the compute time of the entire program, as shown in an experiment inadvertently made by a student a few years after this program was first written.

      -

      Coming back to the question of computing the Jacobian automatically: The author has used this approach side by side with a hand coded Jacobian for the incompressible Navier-Stokes problem and found the Sacado approach to be just as fast as using a hand coded Jacobian, but infinitely simpler and less error prone: Since using the auto-differentiation requires only that one code the residual $R(\mathbf{W})$, ensuring code correctness and maintaining code becomes tremendously more simple – the Jacobian matrix $\mathbf R'$ is computed by essentially the same code that also computes the residual $\mathbf
-R$.

      +

      One can imagine that for the full Jacobian matrix as a whole, this could be prohibitively expensive: the number of independent variables are the $\mathbf W^k$, the dependent variables the elements of the vector $\mathbf
+R(\mathbf W^k)$. Both of these vectors can easily have tens of thousands of elements or more. However, it is important to note that not all elements of $\mathbf R$ depend on all elements of $\mathbf W^k$: in fact, an entry in $\mathbf R$ only depends on an element of $\mathbf W^k$ if the two corresponding shape functions overlap and couple in the weak form.

      +

      Specifically, it is wise to define a minimum set of independent AD variables that the residual on the current cell may possibly depend on: on every element, we define those variables as independent that correspond to the degrees of freedom defined on this cell (or, if we have to compute jump terms between cells, that correspond to degrees of freedom defined on either of the two adjacent cells), and the dependent variables are the elements of the local residual vector. Not doing this, i.e. defining all elements of $\mathbf W^k$ as independent, will result a very expensive computation of a lot of zeros: the elements of the local residual vector are independent of almost all elements of the solution vector, and consequently their derivatives are zero; however, trying to compute these zeros can easily take 90% or more of the compute time of the entire program, as shown in an experiment inadvertently made by a student a few years after this program was first written.

      +

      Coming back to the question of computing the Jacobian automatically: The author has used this approach side by side with a hand coded Jacobian for the incompressible Navier-Stokes problem and found the Sacado approach to be just as fast as using a hand coded Jacobian, but infinitely simpler and less error prone: Since using the auto-differentiation requires only that one code the residual $R(\mathbf{W})$, ensuring code correctness and maintaining code becomes tremendously more simple – the Jacobian matrix $\mathbf R'$ is computed by essentially the same code that also computes the residual $\mathbf
+R$.

      All this said, here's a very simple example showing how Sacado can be used:

      #href_anchor"line">#include <iostream>
      @@ -305,8 +305,8 @@
      std::cout << "dc/da = " << derivs[0] << ", dc/db=" << derivs[1] << std::endl;
      }
      -

      The output are the derivatives $\frac{\partial c(a,b)}{\partial a},
-\frac{\partial c(a,b)}{\partial b}$ of $c(a,b)=2a+\cos(ab)$ at $a=1,b=2$.

      +

    The output are the derivatives $\frac{\partial c(a,b)}{\partial a},
+\frac{\partial c(a,b)}{\partial b}$ of $c(a,b)=2a+\cos(ab)$ at $a=1,b=2$.

    It should be noted that Sacado provides more auto-differentiation capabilities than the small subset used in this program. However, understanding the example above is enough to understand the use of Sacado in this Euler flow program.

    Trilinos solvers

    The program uses either the Aztec iterative solvers, or the Amesos sparse direct solver, both provided by the Trilinos package. This package is inherently designed to be used in a parallel program, however, it may be used in serial just as easily, as is done here. The Epetra package is the basic vector/matrix library upon which the solvers are built. This very powerful package can be used to describe the parallel distribution of a vector, and to define sparse matrices that operate on these vectors. Please view the commented code for more details on how these solvers are used within the example.

    @@ -323,8 +323,8 @@

    Implementation

    The implementation of this program is split into three essential parts:

    • -

      The EulerEquations class that encapsulates everything that completely describes the specifics of the Euler equations. This includes the flux matrix $\mathbf F(\mathbf W)$, the numerical flux $\mathbf F(\mathbf
-  W^+,\mathbf W^-,\mathbf n)$, the right hand side $\mathbf G(\mathbf W)$, boundary conditions, refinement indicators, postprocessing the output, and similar things that require knowledge of the meaning of the individual components of the solution vectors and the equations.

      +

      The EulerEquations class that encapsulates everything that completely describes the specifics of the Euler equations. This includes the flux matrix $\mathbf F(\mathbf W)$, the numerical flux $\mathbf F(\mathbf
+  W^+,\mathbf W^-,\mathbf n)$, the right hand side $\mathbf G(\mathbf W)$, boundary conditions, refinement indicators, postprocessing the output, and similar things that require knowledge of the meaning of the individual components of the solution vectors and the equations.

    • @@ -433,12 +433,12 @@

    Transformations between variables

    -

    Next, we define the gas constant. We will set it to 1.4 in its definition immediately following the declaration of this class (unlike integer variables, like the ones above, static const floating point member variables cannot be initialized within the class declaration in C++). This value of 1.4 is representative of a gas that consists of molecules composed of two atoms, such as air which consists up to small traces almost entirely of $N_2$ and $O_2$.

    +

    Next, we define the gas constant. We will set it to 1.4 in its definition immediately following the declaration of this class (unlike integer variables, like the ones above, static const floating point member variables cannot be initialized within the class declaration in C++). This value of 1.4 is representative of a gas that consists of molecules composed of two atoms, such as air which consists up to small traces almost entirely of $N_2$ and $O_2$.

      static const double gas_gamma;
     
     
    -

    In the following, we will need to compute the kinetic energy and the pressure from a vector of conserved variables. This we can do based on the energy density and the kinetic energy $\frac 12 \rho |\mathbf v|^2
-   = \frac{|\rho \mathbf v|^2}{2\rho}$ (note that the independent variables contain the momentum components $\rho v_i$, not the velocities $v_i$).

    +

    In the following, we will need to compute the kinetic energy and the pressure from a vector of conserved variables. This we can do based on the energy density and the kinetic energy $\frac 12 \rho |\mathbf v|^2
+   = \frac{|\rho \mathbf v|^2}{2\rho}$ (note that the independent variables contain the momentum components $\rho v_i$, not the velocities $v_i$).

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_34.html differs (JavaScript source, Unicode text, UTF-8 text, with very long lines (2381)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_34.html 2023-11-25 15:26:01.979847571 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_34.html 2023-11-25 15:26:01.979847571 +0100 @@ -137,7 +137,7 @@

    Irrotational flow

    The incompressible motion of an inviscid fluid past a body (for example air past an airplane wing, or air or water past a propeller) is usually modeled by the Euler equations of fluid dynamics:

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   \frac{\partial }{\partial t}\mathbf{v} + (\mathbf{v}\cdot\nabla)\mathbf{v}
   &=
   -\frac{1}{\rho}\nabla p + \mathbf{g}
@@ -145,12 +145,12 @@
   \\
   \nabla \cdot \mathbf{v}&=0
   &\text{in } \mathbb{R}^n\backslash\Omega
-\end{align*} +\end{align*}" src="form_4379.png"/>

    -

    where the fluid density $\rho$ and the acceleration $\mathbf{g}$ due to external forces are given and the velocity $\mathbf{v}$ and the pressure $p$ are the unknowns. Here $\Omega$ is a closed bounded region representing the body around which the fluid moves.

    +

    where the fluid density $\rho$ and the acceleration $\mathbf{g}$ due to external forces are given and the velocity $\mathbf{v}$ and the pressure $p$ are the unknowns. Here $\Omega$ is a closed bounded region representing the body around which the fluid moves.

    The above equations can be derived from Navier-Stokes equations assuming that the effects due to viscosity are negligible compared to those due to the pressure gradient, inertial forces and the external forces. This is the opposite case of the Stokes equations discussed in step-22 which are the limit case of dominant viscosity, i.e. where the velocity is so small that inertia forces can be neglected. On the other hand, owing to the assumed incompressibility, the equations are not suited for very high speed gas flows where compressibility and the equation of state of the gas have to be taken into account, leading to the Euler equations of gas dynamics, a hyperbolic system.

    For the purpose of this tutorial program, we will consider only stationary flow without external forces:

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   (\mathbf{v}\cdot\nabla)\mathbf{v}
   &=
   -\frac{1}{\rho}\nabla p
@@ -158,159 +158,159 @@
   \\
   \nabla \cdot \mathbf{v}&=0
   &\text{in } \mathbb{R}^n\backslash\Omega
-\end{align*} +\end{align*}" src="form_4380.png"/>

    Uniqueness of the solution of the Euler equations is ensured by adding the boundary conditions

    -\[
+<picture><source srcset=\[
   \label{eq:boundary-conditions}
   \begin{aligned}
     \mathbf{n}\cdot\mathbf{v}& = 0 \qquad && \text{ on } \partial\Omega \\
     \mathbf{v}& = \mathbf{v}_\infty && \text{ when } |\mathbf{x}| \to \infty,
   \end{aligned}
-\] +\]" src="form_4381.png"/>

    -

    which is to say that the body is at rest in our coordinate systems and is not permeable, and that the fluid has (constant) velocity $\mathbf{v}_\infty$ at infinity. An alternative viewpoint is that our coordinate system moves along with the body whereas the background fluid is at rest at infinity. Notice that we define the normal $\mathbf{n}$ as the outer normal to the domain $\Omega$, which is the opposite of the outer normal to the integration domain.

    +

    which is to say that the body is at rest in our coordinate systems and is not permeable, and that the fluid has (constant) velocity $\mathbf{v}_\infty$ at infinity. An alternative viewpoint is that our coordinate system moves along with the body whereas the background fluid is at rest at infinity. Notice that we define the normal $\mathbf{n}$ as the outer normal to the domain $\Omega$, which is the opposite of the outer normal to the integration domain.

    For both stationary and non stationary flow, the solution process starts by solving for the velocity in the second equation and substituting in the first equation in order to find the pressure. The solution of the stationary Euler equations is typically performed in order to understand the behavior of the given (possibly complex) geometry when a prescribed motion is enforced on the system.

    -

    The first step in this process is to change the frame of reference from a coordinate system moving along with the body to one in which the body moves through a fluid that is at rest at infinity. This can be expressed by introducing a new velocity $\mathbf{\tilde{v}}=\mathbf{v}-\mathbf{v}_\infty$ for which we find that the same equations hold (because $\nabla\cdot
-\mathbf{v}_\infty=0$) and we have boundary conditions

    -\[
+<p>The first step in this process is to change the frame of reference from a coordinate system moving along with the body to one in which the body moves through a fluid that is at rest at infinity. This can be expressed by introducing a new velocity <picture><source srcset=$\mathbf{\tilde{v}}=\mathbf{v}-\mathbf{v}_\infty$ for which we find that the same equations hold (because $\nabla\cdot
+\mathbf{v}_\infty=0$) and we have boundary conditions

    +\[
   \label{eq:boundary-conditions-tilde}
   \begin{aligned}
     \mathbf{n}\cdot\mathbf{\tilde{v}}& = -\mathbf{n}\cdot\mathbf{v}_\infty \qquad && \text{ on } \partial\Omega \\
     \mathbf{\tilde{v}}& = 0 && \text{ when } |\mathbf{x}| \to \infty,
   \end{aligned}
-\] +\]" src="form_4385.png"/>

    -

    If we assume that the fluid is irrotational, i.e., $\nabla \times
-\mathbf{v}=0$ in $\mathbb{R}^n\backslash\Omega$, we can represent the velocity, and consequently also the perturbation velocity, as the gradient of a scalar function:

    -\[
+<p>If we assume that the fluid is irrotational, i.e., <picture><source srcset=$\nabla \times
+\mathbf{v}=0$ in $\mathbb{R}^n\backslash\Omega$, we can represent the velocity, and consequently also the perturbation velocity, as the gradient of a scalar function:

    +\[
   \mathbf{\tilde{v}}=\nabla\phi,
-\] +\]" src="form_4388.png"/>

    -

    and so the second part of Euler equations above can be rewritten as the homogeneous Laplace equation for the unknown $\phi$:

    -\begin{align*}
+<p> and so the second part of Euler equations above can be rewritten as the homogeneous Laplace equation for the unknown <picture><source srcset=$\phi$:

    +\begin{align*}
 \label{laplace}
 \Delta\phi &= 0 \qquad &&\text{in}\ \mathbb{R}^n\backslash\Omega,
            \\
            \mathbf{n}\cdot\nabla\phi &= -\mathbf{n}\cdot\mathbf{v}_\infty
            && \text{on}\ \partial\Omega
-\end{align*} +\end{align*}" src="form_4389.png"/>

    -

    while the momentum equation reduces to Bernoulli's equation that expresses the pressure $p$ as a function of the potential $\phi$:

    -\[
+<p> while the momentum equation reduces to Bernoulli's equation that expresses the pressure <picture><source srcset=$p$ as a function of the potential $\phi$:

    +\[
 \frac{p}{\rho} +\frac{1}{2} | \nabla \phi |^2 = 0 \in \Omega.
-\] +\]" src="form_4390.png"/>

    So we can solve the problem by solving the Laplace equation for the potential. We recall that the following functions, called fundamental solutions of the Laplace equation,

    -\[ \begin{aligned}
+<picture><source srcset=\[ \begin{aligned}
 \label{eq:3} G(\mathbf{y}-\mathbf{x}) = &
 -\frac{1}{2\pi}\ln|\mathbf{y}-\mathbf{x}| \qquad && \text{for } n=2 \\
 G(\mathbf{y}-\mathbf{x}) = &
 \frac{1}{4\pi}\frac{1}{|\mathbf{y}-\mathbf{x}|}&& \text{for } n=3,
 \end{aligned}
-\] +\]" src="form_4391.png"/>

    satisfy in a distributional sense the equation:

    -\[
+<picture><source srcset=\[
 -\Delta_y G(\mathbf{y}-\mathbf{x}) = \delta(\mathbf{y}-\mathbf{x}),
-\] +\]" src="form_4392.png"/>

    -

    where the derivative is done in the variable $\mathbf{y}$. By using the usual Green identities, our problem can be written on the boundary $\partial\Omega = \Gamma$ only. We recall the general definition of the second Green identity:

    +

    where the derivative is done in the variable $\mathbf{y}$. By using the usual Green identities, our problem can be written on the boundary $\partial\Omega = \Gamma$ only. We recall the general definition of the second Green identity:

    -\[\label{green}
+<picture><source srcset=\[\label{green}
   \int_{\omega}
   (-\Delta u)v\,dx + \int_{\partial\omega} \frac{\partial u}{\partial \tilde{\mathbf{n}} }v \,ds
   =
   \int_{\omega}
   (-\Delta v)u\,dx + \int_{\partial\omega} u\frac{\partial v}{\partial \tilde{\mathbf{n}}} \,ds,
-\] +\]" src="form_4395.png"/>

    -

    where $\tilde{\mathbf{n}}$ is the normal to the surface of $\omega$ pointing outwards from the domain of integration $\omega$.

    -

    In our case the domain of integration is the domain $\mathbb{R}^n\backslash\Omega$, whose boundary is $ \Gamma_\infty \cup
-\Gamma$, where the "boundary" at infinity is defined as

    +

    where $\tilde{\mathbf{n}}$ is the normal to the surface of $\omega$ pointing outwards from the domain of integration $\omega$.

    +

    In our case the domain of integration is the domain $\mathbb{R}^n\backslash\Omega$, whose boundary is $ \Gamma_\infty \cup
+\Gamma$, where the "boundary" at infinity is defined as

    -\[
+<picture><source srcset=\[
 \Gamma_\infty \dealcoloneq \lim_{r\to\infty} \partial B_r(0).
-\] +\]" src="form_4398.png"/>

    -

    In our program the normals are defined as outer to the domain $\Omega$, that is, they are in fact inner to the integration domain, and some care is required in defining the various integrals with the correct signs for the normals, i.e. replacing $\tilde{\mathbf{n}}$ by $-\mathbf{n}$.

    -

    If we substitute $u$ and $v$ in the Green identity with the solution $\phi$ and with the fundamental solution of the Laplace equation respectively, as long as $\mathbf{x}$ is chosen in the region $\mathbb{R}^n\backslash\Omega$, we obtain:

    -\[
+<p>In our program the normals are defined as <em>outer</em> to the domain <picture><source srcset=$\Omega$, that is, they are in fact inner to the integration domain, and some care is required in defining the various integrals with the correct signs for the normals, i.e. replacing $\tilde{\mathbf{n}}$ by $-\mathbf{n}$.

    +

    If we substitute $u$ and $v$ in the Green identity with the solution $\phi$ and with the fundamental solution of the Laplace equation respectively, as long as $\mathbf{x}$ is chosen in the region $\mathbb{R}^n\backslash\Omega$, we obtain:

    +\[
   \phi(\mathbf{x}) -
   \int_{\Gamma\cup\Gamma_\infty}\frac{\partial G(\mathbf{y}-\mathbf{x})}{\partial \mathbf{n}_y}\phi(\mathbf{y})\,ds_y
   =
   -\int_{\Gamma\cup\Gamma_\infty}G(\mathbf{y}-\mathbf{x})\frac{\partial \phi}{\partial \mathbf{n}_y}(\mathbf{y})\,ds_y
   \qquad \forall\mathbf{x}\in \mathbb{R}^n\backslash\Omega
-\] +\]" src="form_4400.png"/>

    where the normals are now pointing inward the domain of integration.

    -

    Notice that in the above equation, we also have the integrals on the portion of the boundary at $\Gamma_\infty$. Using the boundary conditions of our problem, we have that $\nabla \phi$ is zero at infinity (which simplifies the integral on $\Gamma_\infty$ on the right hand side).

    -

    The integral on $\Gamma_\infty$ that appears on the left hand side can be treated by observing that $\nabla\phi=0$ implies that $\phi$ at infinity is necessarily constant. We define its value to be $\phi_\infty$. It is an easy exercise to prove that

    +

    Notice that in the above equation, we also have the integrals on the portion of the boundary at $\Gamma_\infty$. Using the boundary conditions of our problem, we have that $\nabla \phi$ is zero at infinity (which simplifies the integral on $\Gamma_\infty$ on the right hand side).

    +

    The integral on $\Gamma_\infty$ that appears on the left hand side can be treated by observing that $\nabla\phi=0$ implies that $\phi$ at infinity is necessarily constant. We define its value to be $\phi_\infty$. It is an easy exercise to prove that

    -\[
+<picture><source srcset=\[
 -\int_{\Gamma_\infty} \frac{\partial G(\mathbf{y}-\mathbf{x})}
 {\partial \mathbf{n}_y}\phi_\infty \,ds_y =
 \lim_{r\to\infty} \int_{\partial B_r(0)} \frac{\mathbf{r}}{r} \cdot \nabla G(\mathbf{y}-\mathbf{x})
 \phi_\infty \,ds_y = -\phi_\infty.
-\] +\]" src="form_4405.png"/>

    Using this result, we can reduce the above equation only on the boundary $\Gamma$ using the so-called Single and Double Layer Potential operators:

    -\[\label{integral}
+<picture><source srcset=\[\label{integral}
   \phi(\mathbf{x}) - (D\phi)(\mathbf{x}) = \phi_\infty
   -\left(S \frac{\partial \phi}{\partial n_y}\right)(\mathbf{x})
   \qquad \forall\mathbf{x}\in \mathbb{R}^n\backslash\Omega.
-\] +\]" src="form_4406.png"/>

    -

    (The name of these operators comes from the fact that they describe the electric potential in $\mathbb{R}^n$ due to a single thin sheet of charges along a surface, and due to a double sheet of charges and anti-charges along the surface, respectively.)

    -

    In our case, we know the Neumann values of $\phi$ on the boundary: $\mathbf{n}\cdot\nabla\phi = -\mathbf{n}\cdot\mathbf{v}_\infty$. Consequently,

    -\[
+<p>(The name of these operators comes from the fact that they describe the electric potential in <picture><source srcset=$\mathbb{R}^n$ due to a single thin sheet of charges along a surface, and due to a double sheet of charges and anti-charges along the surface, respectively.)

    +

    In our case, we know the Neumann values of $\phi$ on the boundary: $\mathbf{n}\cdot\nabla\phi = -\mathbf{n}\cdot\mathbf{v}_\infty$. Consequently,

    +\[
   \phi(\mathbf{x}) - (D\phi)(\mathbf{x}) = \phi_\infty +
    \left(S[\mathbf{n}\cdot\mathbf{v}_\infty]\right)(\mathbf{x})
    \qquad \forall\mathbf{x} \in \mathbb{R}^n\backslash\Omega.
-\] +\]" src="form_4408.png"/>

    -

    If we take the limit for $\mathbf{x}$ tending to $\Gamma$ of the above equation, using well known properties of the single and double layer operators, we obtain an equation for $\phi$ just on the boundary $\Gamma$ of $\Omega$:

    +

    If we take the limit for $\mathbf{x}$ tending to $\Gamma$ of the above equation, using well known properties of the single and double layer operators, we obtain an equation for $\phi$ just on the boundary $\Gamma$ of $\Omega$:

    -\[\label{SD}
+<picture><source srcset=\[\label{SD}
   \alpha(\mathbf{x})\phi(\mathbf{x}) - (D\phi)(\mathbf{x}) = \phi_\infty +
   \left(S [\mathbf{n}\cdot\mathbf{v}_\infty]\right)(\mathbf{x})
   \quad \mathbf{x}\in \partial\Omega,
-\] +\]" src="form_4409.png"/>

    /usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_35.html differs (JavaScript source, Unicode text, UTF-8 text, with very long lines (1192)) --- old//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_35.html 2023-11-25 15:26:02.016513492 +0100 +++ new//usr/share/doc/packages/dealii-openmpi4/doxygen/deal.II/step_35.html 2023-11-25 15:26:02.016513492 +0100 @@ -137,57 +137,57 @@

    Introduction

    Motivation

    The purpose of this program is to show how to effectively solve the incompressible time-dependent Navier-Stokes equations. These equations describe the flow of a viscous incompressible fluid and read

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   u_t + u \cdot \nabla u - \nu \Delta u + \nabla p = f, \\
   \nabla \cdot u = 0,
-\end{align*} +\end{align*}" src="form_4489.png"/>

    -

    where $u$ represents the velocity of the flow and $p$ the pressure. This system of equations is supplemented by the initial condition

    -\[
+<p> where <picture><source srcset=$u$ represents the velocity of the flow and $p$ the pressure. This system of equations is supplemented by the initial condition

    +\[
   u |_{t=0} = u_0,
-\] +\]" src="form_4490.png"/>

    -

    with $u_0$ sufficiently smooth and solenoidal, and suitable boundary conditions. For instance, an admissible boundary condition, is

    -\[
+<p> with <picture><source srcset=$u_0$ sufficiently smooth and solenoidal, and suitable boundary conditions. For instance, an admissible boundary condition, is

    +\[
   u|_{\partial\Omega} = u_b.
-\] +\]" src="form_4491.png"/>

    -

    It is possible to prescribe other boundary conditions as well. In the test case that we solve here the boundary is partitioned into two disjoint subsets $\partial\Omega = \Gamma_1 \cup \Gamma_2$ and we have

    -\[
+<p> It is possible to prescribe other boundary conditions as well. In the test case that we solve here the boundary is partitioned into two disjoint subsets <picture><source srcset=$\partial\Omega = \Gamma_1 \cup \Gamma_2$ and we have

    +\[
   u|_{\Gamma_1} = u_b,
-\] +\]" src="form_4493.png"/>

    and

    -\[
+<picture><source srcset=\[
  u\times n|_{\Gamma_2} = 0, \quad p|_{\Gamma_2} = 0
-\] +\]" src="form_4494.png"/>

    -

    where $n$ is the outer unit normal. The boundary conditions on $\Gamma_2$ are often used to model outflow conditions.

    +

    where $n$ is the outer unit normal. The boundary conditions on $\Gamma_2$ are often used to model outflow conditions.

    In previous tutorial programs (see for instance step-20 and step-22) we have seen how to solve the time-independent Stokes equations using a Schur complement approach. For the time-dependent case, after time discretization, we would arrive at a system like

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   \frac1\tau u^k - \nu \Delta u^k + \nabla p^k = F^k, \\
   \nabla \cdot u^k = 0,
-\end{align*} +\end{align*}" src="form_4495.png"/>

    -

    where $\tau$ is the time-step. Although the structure of this system is similar to the Stokes system and thus it could be solved using a Schur complement approach, it turns out that the condition number of the Schur complement is proportional to $\tau^{-2}$. This makes the system very difficult to solve, and means that for the Navier-Stokes equations, this is not a useful avenue to the solution.

    +

    where $\tau$ is the time-step. Although the structure of this system is similar to the Stokes system and thus it could be solved using a Schur complement approach, it turns out that the condition number of the Schur complement is proportional to $\tau^{-2}$. This makes the system very difficult to solve, and means that for the Navier-Stokes equations, this is not a useful avenue to the solution.

    Projection methods

    Rather, we need to come up with a different approach to solve the time-dependent Navier-Stokes equations. The difficulty in their solution comes from the fact that the velocity and the pressure are coupled through the constraint

    -\[
+<picture><source srcset=\[
   \nabla \cdot u = 0,
-\] +\]" src="form_4497.png"/>

    for which the pressure is the Lagrange multiplier. Projection methods aim at decoupling this constraint from the diffusion (Laplace) operator.

    -

    Let us shortly describe how the projection methods look like in a semi-discrete setting. The objective is to obtain a sequence of velocities $\{u^k\}$ and pressures $\{p^k\}$. We will also obtain a sequence $\{\phi^k\}$ of auxiliary variables. Suppose that from the initial conditions, and an application of a first order method we have found $(u^0,p^0,\phi^0=0)$ and $(u^1,p^1,\phi^1=p^1-p^0)$. Then the projection method consists of the following steps: