~/f/dealii/RPMS.2017 ~/f/dealii ~/f/dealii RPMS.2017/deal_II-devel-9.5.1-0.0.x86_64.rpm RPMS/deal_II-devel-9.5.1-0.0.x86_64.rpm differ: char 225, line 1 Comparing deal_II-devel-9.5.1-0.0.x86_64.rpm to deal_II-devel-9.5.1-0.0.x86_64.rpm comparing the rpm tags of deal_II-devel --- old-rpm-tags +++ new-rpm-tags @@ -10151 +10151 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/DEALGlossary.html f69c6dae4ee6fc63a4d041c8558afe4aa3fe1423ccd1cb8cc4e86d905dd18199 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/DEALGlossary.html ed3b823b49614dcaa42b138e94a630468791f74e3b8f4b3b641c79bc81eeab03 2 @@ -10154,3 +10154,3 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/Tutorial.html 3d6748d1073d70df612a7e71cb746663956b1caee6b21f8581d474c9e0de38c3 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/_formulas.tex 5aab7209cc921476facc8fb250029b775876ecdd1b1b936ffd809c20d5422c2f 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/_formulas_dark.tex 26ee93543b4035902c6cf9081a025ec4e355c3c61439eb3d1d47d1ae0efd35aa 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/Tutorial.html 08060bc5d2992461f8691b50d9c9109481f92a50e1c5ebabe2213aea0b8b8645 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/_formulas.tex 453206fb2b7f14e22a41d3b99815b474ced1038854ef0208eff1228ea8d47ec3 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/_formulas_dark.tex dbfa81338e0e3d9298857ab12eab7177222dbdbb447f521965a6b81c577b5e50 2 @@ -10322 +10322 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_6_1_and_6_2.html d7db8fc7eccb6566eeea3ee060a2f99921977b3dcd5f8555634da3c0f93a4a06 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_6_1_and_6_2.html f19f371027d617de067c3c3129a3975900d3d1019332ec44493549534163ecb5 2 @@ -10324 +10324 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_6_2_and_6_3.html 04de4d08f79f84d8d844311dbddad75719c6d18f751b7a799983e9b62db22a18 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_6_2_and_6_3.html cee522617733f4282183ce64bafab9b20c65f268a28f68170d87446ad03673dd 2 @@ -10332 +10332 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_8_1_and_8_2.html 2d52690041a79bffdafb13efe07f83536e31d09826a393ff1d4cd302302fc622 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_8_1_and_8_2.html a2ad6649d302abbc8a134f258797effc01d3ba19ec2e97c76ce8763747a03b54 2 @@ -10339 +10339 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_8_5_0_and_9_0_0.html 2b44c0414c2116f250e51faf190325136ae82f721ed9be7d50481d552a8d7d81 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_8_5_0_and_9_0_0.html 3f160529cd617e9d3195bdb26707d1933d4474803736bf23fc1c8c674d28b1a5 2 @@ -10343 +10343 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_9_1_1_and_9_2_0.html 74af42588d3ab19276c88c91f11a5e84acebb1c7048808560d3fc6aaf2126655 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_9_1_1_and_9_2_0.html da521ff062eb567325740745c8b2a7e21edb7ae8b6ca6b88de424bf79fed0215 2 @@ -10370 +10370 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classAffineConstraints.html 52d676dfc0d803040e41feeea6100fb23a74fb193a2506c4b4c1a03ba8e1aa9b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classAffineConstraints.html 4c0bb1427f1eae15689594ba9b13fdd3a616feb437f714d3df749f544dd6548f 2 @@ -10388 +10388 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classAlgorithms_1_1ThetaTimestepping.html ecca830d16d05e32856d13238f67568397c9d3d25fd3f7202ccfcb000baf3364 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classAlgorithms_1_1ThetaTimestepping.html c660debc728184dfee74e2872c24a1e55e6968c0561c6d074928ce074aeee6c9 2 @@ -10405 +10405 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classAnisotropicPolynomials.html a943cfe5d27f59b9af4cc9544240e1de6e4a94d11832f5e365646bc54ac2ea8b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classAnisotropicPolynomials.html 495c00cc68809e88ed2f05cd2c4d9ab92dca1d26ead1fb59d3d3354b9b59086a 2 @@ -10442 +10442 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classArpackSolver.html ff871ebdfa8676c7ca80b30189301b28c81d822e047d4393eb97a5f91a7bb0d9 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classArpackSolver.html 178557eafd0fc9a3c3ca0ff8fc29d0b9d163b378ea29bd3eeb4a8f37082a15eb 2 @@ -10445 +10445 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classArrayView.html 5a46dabe65810f8e28178695b7bbd22fc5244c67838fe1c61a5b361e4aca4484 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classArrayView.html dbeb9d3a3522c2aeeaff257ed97191a13205d3708dd91d5f205740939122ffcb 2 @@ -10448 +10448 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classAutoDerivativeFunction.html 2e6ed7a6d2584f0deaed276ca94aecf725726e4718a06c7965364d15e4d70412 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classAutoDerivativeFunction.html 57851191efd241f0c13ec402c06cdf4191ff844679bdadedffe219aeaaa0b029 2 @@ -10451 +10451 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classBarycentricPolynomial.html 1a7cfd17b704ee9af81aed2b691b3cb42153548b6c2db0527902d2a57c4d0396 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classBarycentricPolynomial.html 057b676d7b3403066c057c28b0c2d28c8385ff9e0d2988e51cedec678c378007 2 @@ -10458 +10458 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classBaseQR.html 817ffc94feeedaac9a0fa7c6b11efd9540ba2c37d1eb9fda549e05f760c80480 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classBaseQR.html a29dbe391d4454f2cbda4541fca020de00bb2a55394706a9b0cf0eeb3569b5b9 2 @@ -10464 +10464 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classBlockIndices.html 0b5ac9f89083d35eb62fdec2aae08474ba9636098d38d8b2367f6271eb523df0 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classBlockIndices.html ec246b0fcb00ae6999f25a8eb79e2c7c9023e737199eaa5f9a2facb4549f9832 2 @@ -10470 +10470 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classBlockLinearOperator.html e9776fa23699a2f081eef2398fe4ac6127705d51e0edc889457da43313dbe9eb 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classBlockLinearOperator.html b3d568d6c0f6055f5f6ac071f0b6896513c0e556ec510322b909a1c155932da5 2 @@ -10475 +10475 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classBlockMatrixBase.html c7ed98b1f79aa87ef46c9e9e3046ed43f32329cbcf47661c6966c8354d8a0d90 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classBlockMatrixBase.html a36fc4eaa76a7556c853d0c56ffcb24f12b2dc85a38c1ab43a7370e289effa9e 2 @@ -10488 +10488 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classBlockSparseMatrix.html 1469ff6d22740bf7bb9511d3cdf578cc742581226be3cd914aec15fd0d006e7c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classBlockSparseMatrix.html d14ed9025876da3479fc9830d150a3e7c8ee25cc0174ebc190d99a13b58623e4 2 @@ -10490 +10490 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classBlockSparseMatrixEZ.html 6a2211286347860425bdb487d41e564754d71c3533745bde4d061580c9da483f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classBlockSparseMatrixEZ.html 8e366f1958b960c5e20a566530a35c847a418398289063527a2f6b3dc9e23a50 2 @@ -10500 +10500 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classBlockVector.html 3600b293ff191b05b16f571f79abd22c685dd740c28813e8975f6e52764ba9ab 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classBlockVector.html f7d6cfffc1360d5f62863144ef7fe068cfb501665b222c0fb64ec9716113c281 2 @@ -10502 +10502 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classBlockVectorBase.html 5a817997c045270cf42ca16accb5966f256f0ca5528182a79183fcdf42ea5d22 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classBlockVectorBase.html e00d469c25f070b3264a084801b7bb6d7f2b5e8d380cc6fe35e5ce79a648b022 2 @@ -10506 +10506 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classBoundingBox.html 8b7526f2bf5b533f3f30ba97209ada251ae487448c33bb3264e7e8a21c930727 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classBoundingBox.html 31a2dd1cf2e04543b42978889ddb2da01cd8ebe3b72ee4a5ddc5a71ebbc20669 2 @@ -10519 +10519 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classCUDAWrappers_1_1PreconditionIC.html 8f2ff58a3708ccbe2e16495cb8ac1ca6c635728f5b7bb621ad9eee7afccc986c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classCUDAWrappers_1_1PreconditionIC.html 8fb9ea9ed1a8caeed87a712b6db9157ef665f6bc7716856cd904ba5841bf6aac 2 @@ -10521 +10521 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classCUDAWrappers_1_1PreconditionILU.html a012f46861ee90806087fc7198f5e1ce54d55ec5d4cdfd8cabd9d566b33ac6aa 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classCUDAWrappers_1_1PreconditionILU.html 4ffd10e0ad053209d62186a9ce0e7d3b60ff8b6d6f61576fa435f6317ba9b645 2 @@ -10525 +10525 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classCUDAWrappers_1_1SparseMatrix.html 06f370df7247a6493dc43d665e3527b6d27ed017ecd160d65c8adf332b70c7e3 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classCUDAWrappers_1_1SparseMatrix.html b8978329dde3d7957a0812f44cdb5991488373dc47cb4f55113bc4f939f7a4de 2 @@ -10528 +10528 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classCellAccessor.html 142cd0984904689b421838d422622fd7d1208621a6472ada1b793fc48b036710 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classCellAccessor.html 5b35488982b331eca3eb995b22ec9b642bef73ab895bfceeebf19c39e7ba9893 2 @@ -10538 +10538 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classChartManifold.html 0bf34d4a72bf1bbec9fa606945076da5bfd2258452c19c0701ca228a523c0cb3 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classChartManifold.html 93b0552b8031fd09b7e3827de5b29316b0688ffbef89f188f792a4843cd56157 2 @@ -10541 +10541 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classChunkSparseMatrix.html 08f24deae96e0541ecafb7ffbac1ca79ccb92f0a6b6d352dfb98e4f8503b3c15 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classChunkSparseMatrix.html c9f173b713311702d49db8e257e0f4f74aa2f276d874402cc6f53caa7f726ceb 2 @@ -10557 +10557 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classChunkSparsityPattern.html b0587db4db7a799311906f5758dd867062adf204734174873426ff926f7f32aa 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classChunkSparsityPattern.html 21c7c90f299094f8bf7adb54e22142af5d9baab28c8e0a7b3e5969ed9724d830 2 @@ -10570 +10570 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classCompositionManifold.html 1fa718b9ab33c0872a0d5f1da80b077faf23e0d26a1f463e96a6231d516c0b20 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classCompositionManifold.html 4331a1b5aab48d7631402fbc2391e615c9fbf431bf9f88b8a88e38ee86abe144 2 @@ -10584 +10584 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classCylindricalManifold.html 96255500bb2fec3057aaf26e5d4582ee5b6d24bf9353a30a717fe1d133b15ac7 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classCylindricalManifold.html a232c5c38b787241a304138c03a111a2b3afc4ea63e2709b431554a275d9dc08 2 @@ -10613 +10613 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classDataPostprocessor.html a5c2af8672b36ae94957c94f9f611c42bd4f6d6e52b24c5725eb785bf71fcc7c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classDataPostprocessor.html 66e721de8b3f7c954385a63bcdcd6f302ecb7085bd80009dde1c12004097a86f 2 @@ -10618 +10618 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classDataPostprocessorTensor.html 29d3863a62ae3eaa892aebdbced232065f7575a43e5d4478905a002c3aad7cbf 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classDataPostprocessorTensor.html 3ed2a5765f4cd5de8d77a2f98f718e5fa83a45259669ae25c41e5347c58a92a6 2 @@ -10638 +10638 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classDerivativeApproximation_1_1internal_1_1SecondDerivative.html 1a72c003854e552eb65ec04deecbf5508cbf5c760cdcda9a35e5b0fee7ad2ebe 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classDerivativeApproximation_1_1internal_1_1SecondDerivative.html e7473f2a2fa7002d99bd7239d26196a5369725be6ce9b04cbfd57964f86d8684 2 @@ -10640 +10640 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classDerivativeApproximation_1_1internal_1_1ThirdDerivative.html 6c48ca8ae65155f77a641f7ec2957934aab9ea2a408a5c0b59ede6445ba4e55b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classDerivativeApproximation_1_1internal_1_1ThirdDerivative.html 7ce44820b9d7e54c6e23b80e4924240c7c82e0acd0d0e04368f89928a77c7eaf 2 @@ -10642 +10642 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classDerivativeForm.html 9ee0139be2102ca5fc785a311955fe022d3c7a73a3dc15e4d00ce4b5b4712ed7 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classDerivativeForm.html 3c67aa449fa60885be2f085a5b0a9e95c58e44a4414f72a4610e8f1f87426454 2 @@ -10648 +10648 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1CellLevelBase.html d45e690ceef835bcf503b3ac7f3158303a3904c74823ef97f98d923899a08d7f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1CellLevelBase.html d3230b8c1e235fd1ba0d574c70ca0b8b836759780aca4e1e99f94edcc7394d3f 2 @@ -10651 +10651 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1EnergyFunctional.html 5e9ddb4f539a01d58cac9632d83ad84d7f80b8fc00c5674fe00cc63017c16e8a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1EnergyFunctional.html 37d0b0c41af29c707c57f0b1cc2eac21c8b5bc7e3ac648bd6bb69dc3567f7fd7 2 @@ -10654 +10654 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1HelperBase.html ad2689204029d72d9eaf53761d55fde8639dce1fc1819f2e5d86db3c8bca0b6f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1HelperBase.html 7ef8e76c8a2d55d714811fbb0db386a6ac4d718c167a2d3936511be426013956 2 @@ -10657 +10657 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1PointLevelFunctionsBase.html 2600f93b27fb602f07f83ec887836ddb63f331dd3df95eb4e82a88c5baaf499f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1PointLevelFunctionsBase.html 9f1b596f83bfbe90fc07de6480eda66ef4b230358d689ac74821a684639bc499 2 @@ -10660 +10660 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1ResidualLinearization.html ae154d2b27c7e0fac3aead5c75519d05c1db2541e10ded409a1084cbd2e7005e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1ResidualLinearization.html e5c248b2b36bb10ba7f01941f82732ebc2b2210ea2580247dbc0f90c47a1f010 2 @@ -10663 +10663 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1ScalarFunction.html 648842100f218a1aa1e79126e5256d8864e50485129b3e5902d6c13ef493c933 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1ScalarFunction.html 56c1daf3f9e1c519b0cab8917787c63db79baa64a0cb55ccfcff9ba561d36963 2 @@ -10666 +10666 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1VectorFunction.html 82a0724adc8bbb8bd86c9da95efef4c175acecaa8be722f1f5949836d2a349fa 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1VectorFunction.html e38353bad36f2bb7fd9619776b2d4bdf0987917094b713c63ac1066ba5f14c45 2 @@ -10678 +10678 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classDiscreteTime.html 9d20482bacb96ec42cbc7a38fe22c28dada0d0e9e11317871a8ebb00c1eed61e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classDiscreteTime.html 303eb8f2bf80b63cef9f03b3ff2a581b41940ae03b7d75657513ca673f382ef0 2 @@ -10689 +10689 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classDoFHandler.html 098871f8c2ab2820ee0a85d09bbe6ed934504409d9557d18f5b10c5b4f64cb30 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classDoFHandler.html d0213c1cc50a81dad3cd2462f7c77a8e92337a237e436061f7f7cb9b3220861b 2 @@ -10700 +10700 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classDynamicSparsityPattern.html 1b9d13bc03b89b2e738df6f9ac513574995a163726531c4642433d2f7a9afc23 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classDynamicSparsityPattern.html 8b488f09ede50b8ef4df22c0a51e302b233a32bf7207e3e9c5180d19a18e5ac8 2 @@ -10707 +10707 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classEigenInverse.html acaecc5c66978457c40d8139d4f7c32129a48c6e118371fd007690d3e9c4076e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classEigenInverse.html 7dd18f015db6595207d266654129f0dd7a1664c4488693c0161495f024ca998c 2 @@ -10710 +10710 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classEigenPower.html 976580d5e9e6e5c7e9052c0afcb0ac324a1fbe3bf4ccd65e64ed4b112a1272d2 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classEigenPower.html e4875927a3fc990e1583a8ee7c35619ae076f426a592e9ddaa556993c8b15727 2 @@ -10713 +10713 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classEllipticalManifold.html 3db4599a37eb3118f0c86b21c70d5407b56b806aa4bda244d05fa14c14dd45f3 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classEllipticalManifold.html 7b3a4f2bdc05c83245984f062319f5f10b75beb4fa52d3d2a50a49e4991eaca5 2 @@ -10721 +10721 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluation.html 7ff5ed12ac161ee362a4b8894ef73a4b866675ff4edf5099e658dc88b2a4c679 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluation.html d883ba77c8ea259a274ba77f37d6756228b54b42e5efb1f3415315b13393fc03 2 @@ -10723 +10723 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluationAccess.html 3c1cc2c57dfa70d4b140617cf8f88c319305d207165828c3dbc7fcc910df80ec 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluationAccess.html 1af9e7b7a708be13ad15fced4a165e14b106d8e73d697fca0e311c545cf0a7fc 2 @@ -10725 +10725 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluationAccess_3_011_00_011_00_01Number_00_01is__face_00_01VectorizedArrayType_01_4.html 711df23ad1a68fc27725ab7cf8a9a30cba70d321fd7d422d1012b9c515d84b3b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluationAccess_3_011_00_011_00_01Number_00_01is__face_00_01VectorizedArrayType_01_4.html 5cdcde7db1ac44327158d3bf8fcb4575d0a4b18ea5421c0186a0efcc8acb52f0 2 @@ -10728 +10728 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluationAccess_3_01dim_00_011_00_01Number_00_01is__face_00_01VectorizedArrayType_01_4.html b6e92ff8960eeb71237015e80b0a5d018bf61a6e2318fa8fa5469549480c085c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluationAccess_3_01dim_00_011_00_01Number_00_01is__face_00_01VectorizedArrayType_01_4.html bc62ed4943d401c0c916b787c237195d262c72a25eb60512b1d39200397c4e5d 2 @@ -10731 +10731 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluationAccess_3_01dim_00_01dim_00_01Number_00_01is__face_00_01VectorizedArrayType_01_4.html 6775ee11f0af903d226486041db5c3aab69fd3b0e25e4195652ba5aca51e824c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluationAccess_3_01dim_00_01dim_00_01Number_00_01is__face_00_01VectorizedArrayType_01_4.html 02a221b63b2bd70d44ac15f43e330cc7eb231057762f061d7e771d754b64bfe3 2 @@ -10735 +10735 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluationBase.html e2f41a2d9823b631d265cadbab76591921244a7c5117c0691649981390e5df05 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluationBase.html e93fae5e3e044233d227b62a40811d4dff0458ec836dcbd5e685b692f1591faa 2 @@ -10738 +10738 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluationData.html 69c3d70dc191a9f2eb03af37e9eba960eb92372bd6a0962d967c68be794a4fe1 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluationData.html 81ffa6996ae845bb9f7aa01538a5804d32706d0afe0cdbfe22ccb58d570332f2 2 @@ -10742 +10742 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEFaceEvaluation.html 1f5a80c122700d9fd29494e587ad5586affa3cd798b3c65d4d030a4a41dfe477 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEFaceEvaluation.html 6a57ec5a9b351dbd551905ca893aedcadc4d4dc3bebea0933a5663bdf7825ccc 2 @@ -10745 +10745 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEFaceValues.html 9aa7e4b4dbe89d2f545398d6377efa7e46a7fe36865882948fddfab699212b2e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEFaceValues.html 2770e0bc76c3ca15da013b2a2f9c83526c0af8f60df8b9d08753c844d6bea725 2 @@ -10747 +10747 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEFaceValuesBase.html 1dc529ba3808d3ea9591c929a9847d3d17729d3b175ef9b047c75f196de88e92 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEFaceValuesBase.html ad31d161c592ddbda530135c4f50d46f72f5a60bc07d29a283b6b6e4462c7e1c 2 @@ -10751 +10751 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEInterfaceValues.html 239ea795deb1c444d93924bd94fad10eb45ca93adc15a45c8520832dccda42c0 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEInterfaceValues.html 2bd3e8d3083817ce60cf84e7fa5787ede4bd279517c9883b67d8c70521d3a58a 2 @@ -10757 +10757 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEInterfaceViews_1_1Scalar.html e02259e2341fdc50c5ee5c971c66f98386112e4c9dbad44d7c2633c5d4a846d7 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEInterfaceViews_1_1Scalar.html b23aab8c3e698d38eebf6029101556b930e383432fef22fc8a2471778b2cdd3c 2 @@ -10760 +10760 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEInterfaceViews_1_1Vector.html e79857c71feffb4fd9d07f52853e0a635d55a00eebfff13031fd9117a6e13ff2 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEInterfaceViews_1_1Vector.html ce0effa32b0549597c5a850f726447290a565604e20f64ba44625a409a366ae0 2 @@ -10765 +10765 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFESeries_1_1Fourier.html 6b96269427111af19e9915f5c13decf07f0f97557fd3a25c7f238d898c2d374b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFESeries_1_1Fourier.html 3e76830070d605c6fe076e2454af536784a815e99024240fa47c5a89c7c4d0ab 2 @@ -10768 +10768 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFESeries_1_1Legendre.html 5b780a1bc044635187d68d427cd535e346d5154a0870d911bf8fa54165ef09eb 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFESeries_1_1Legendre.html 686a560f17bcc88bc18ecd7d257c4437644622c31e75cb9f8fe327aafa2a95da 2 @@ -10771 +10771 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFESubfaceValues.html 6811dac4da7471f66cb7f7a608a4692ef1169a0c3511d9b6239c27b086052276 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFESubfaceValues.html 5342a47b0a833c1d3fb73c76def26f48194612c3e79f54a79d4c88e762004954 2 @@ -10774 +10774 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFESystem.html 5f14a62c852f97e4e948f6e3b3127df18e2095c79810733b21ec312f9a47d847 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFESystem.html 9fd5758ed1a6598c93253c43bbb9c7441f284ef4a83c5deb75c5e49d1a6aa732 2 @@ -10786 +10786 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEValues.html 5b00dc9017484b25f2268baff1d08b2363e4688481cdde11d064803742d64b60 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEValues.html 65bdd368ae9975085687dd7815960cfbf4946ff3c7c89452926e227e584aa010 2 @@ -10788 +10788 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesBase.html 5705c2a2d60841fbd4c9e3d6e95df804f08c316ad0c786a63e928d60765760e5 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesBase.html f93d8934b9ce0b7e49c9162d15fcaa275e8cc6aac59c51f0a308442b60fbc5bf 2 @@ -10793 +10793 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1Scalar.html e4f7631e7c4e53d3f75d146184ca15443ece7fa6268c81b70c38c7a689f1672e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1Scalar.html 82547322dd26ff93879f43b4550ff54aa24b926113ca49974fe8ad13677cfceb 2 @@ -10797 +10797 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1SymmetricTensor_3_012_00_01dim_00_01spacedim_01_4.html aa98bbf9caa54f4cffa54f8dff17952cbd75ff6b2a6fb0ed5c9cf2ecf4be88b2 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1SymmetricTensor_3_012_00_01dim_00_01spacedim_01_4.html 915495254f2f87a7f2ee61631e691107835b68f939a726e8b41bbfc5dbcc3786 2 @@ -10800 +10800 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1Tensor_3_012_00_01dim_00_01spacedim_01_4.html 832278e93a5b802423b96bd2b38c14eb0fc7c23eb65aadc455299c6b73efc801 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1Tensor_3_012_00_01dim_00_01spacedim_01_4.html af923e01bf27951cc8f4f5039b0a4bc9c93d09db9115a6ec0a4b763a62d35139 2 @@ -10802 +10802 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1Vector.html 04df6378ad7d299289303cbc2440dae155622d7492cd6fc9021cdb8ea6f2abf9 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1Vector.html 2eaa8ea6817a27b3a84fc2ed96d586c8c5106f76abefee7a19f056c2057e882a 2 @@ -10806 +10806 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__ABF.html 8517cb38b9a0ef49f466bb657791bffbe9dbd88383c756a482e6c1584a84af78 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__ABF.html 0ea25c70a70da4a14f692896a66e776c82adedda64a3e9d553fffc66c8f11d5b 2 @@ -10812 +10812 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__BDM.html fc9d0a6b574f07b10be16ba4e9d63d3b67507794883215879628d561c36a4127 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__BDM.html 5ed34098f1034d09ea8cfb520dc50de8a377e7fd91d8bf2ebeda9c1d8cda9dfe 2 @@ -10815 +10815 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__BernardiRaugel.html 3976aa0acff48476eb5e047c5b440a985414b6f69f54e3b0635c86910f15ec45 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__BernardiRaugel.html 90c114cea2666f1091df22f077cfcde7e3b37f72ff5d77c33ca4744fe8f2bc77 2 @@ -10818 +10818 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Bernstein.html b7f43ab5fd7b3566c153143fbbe61d5c156c336f39384b90c5081367fde1368b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Bernstein.html cc7281b18dbe31502da0c8398c20275284afd457c8391208948c636b30fd20fe 2 @@ -10821 +10821 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGBDM.html b54f8e4bb0ee9feb0ce38fcc56083c5797d0033597eaaa74e662befa4aaa116b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGBDM.html 37f8a0e5396841b0b3eda2c2b6797eb857007c1179c8422ab466f63415748973 2 @@ -10824 +10824 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGNedelec.html 1b8b1cb36ee2872eb3b3a9a530a4ee6907586a71aa1546f77c2559ce1d312b86 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGNedelec.html c7070fa52951602ea70a177c8f82809d04957a22e95ae7a75a2f2e3e8fad13ec 2 @@ -10827 +10827 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGP.html dee9cf01aa3b362ef02238d7ff0b80007f32b40b73853f47fa94dddfedb30d84 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGP.html dee7c5f11980087e7c0db870504f2fcdffc7687b823db8a1ec6b64f31faf6267 2 @@ -10829 +10829 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGPMonomial.html 9aa9a4df5af4d2bab096019599340ff7560f1c47d49f5803b9cfd20bfc718bb8 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGPMonomial.html b793acd1fb98553832703af72afdf501db03b9e6431c9cde116a2be95eaae33f 2 @@ -10832 +10832 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGPNonparametric.html 43edd3b9152fef54ca5d2127dc5fe9c1b02a4bdeeb36a2eff472c2ed5a3417fe 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGPNonparametric.html c51e687c2d2005eb826fb143618deaccac2fc1078916ffb6ba06b418a81d40a1 2 @@ -10836 +10836 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQ.html 300c2197fad4b55b55c4ebe03f96471d9c9c8dbc20a850efb17f6dcf5c36c033 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQ.html d43f6cd297cec5b709e13d9b572ffb9d69c2f31a0f1e29eaa784eac6463d0714 2 @@ -10838 +10838 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQArbitraryNodes.html b909d856910a17b0472022930340f7d7f0c8cfd1b9fde59c221630203be785d4 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQArbitraryNodes.html f9a2415556be9a0bbda9d47f54889d9c0a20911d97a9838ee8981c7f1a05b2c3 2 @@ -10841 +10841 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQHermite.html bbc5cd12a940c047e3e41e2957bf641734c861e8786b0e0a956d66b69a433ca3 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQHermite.html 88f31420bce0c1b35a2e9603d3fcb9e921c7ece3ea3f88e99b63f8197a41a659 2 @@ -10844 +10844 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQLegendre.html d61a95dc919fcf77faa45a0e749566c91cfbce4d5450394a6e88276ce74ac883 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQLegendre.html d7350b582f0b50e1bc53c9500559329837e420e878331a25595164509d12a16f 2 @@ -10848 +10848 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGRaviartThomas.html 852e2b6c3e2219ef9c3eb394f35cd8e1f31a8f287f5c7223b551b162fd5de525 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGRaviartThomas.html 076ead5390f184c31419677fe9845c7fc42fb85c3aec0b2ef32a92485fcf1445 2 @@ -10851 +10851 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGVector.html 28cdc3cadabefe0e160508df4e90c151f9c17de19b290b3bd1f2ba1dbe014b5c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGVector.html 89703d8976d3e6a161de5d30a9cb1a6abe05ab39bae7f86aed909924740afc80 2 @@ -10857 +10857 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Enriched.html f662a2bb33b6e9cd1c25215dd3d42208008cda52e22b144359c773326148f02e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Enriched.html 79a93d3a889de790a077e8960ead2b3066867c9f305f593e26d9ffbe195909e4 2 @@ -10863 +10863 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceP.html 6679a3d5c9c821770c0e6b7897e304637cc90f6b4941769a0fd888f604e13f40 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceP.html 395bf73b4431b721c62bbb57dac91f4a45ea9b3eb427308597ff354872c12ece 2 @@ -10865 +10865 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceP_3_011_00_01spacedim_01_4.html 8be01d59f29c9d47ffe4a4f145a22bd6d23d33e043827d7591976a8c5f4af448 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceP_3_011_00_01spacedim_01_4.html 394c2e389bf99f374a51d32527434fd153309ddc3d30115bfde7fefb7cc0ed74 2 @@ -10869 +10869 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceQ.html 6df0901ce8db83020fc565e898df0166b4fb7fe696da13a6ea73ef62461ad931 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceQ.html 0583094f725ab096c42e29b18b49e206975559599a9228db1f19b5e51e76658a 2 @@ -10871 +10871 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceQ_3_011_00_01spacedim_01_4.html dda021ddc9058fc2f1957ba68f769efc0e5617ce3f018ee6157f92e6292bf9c4 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceQ_3_011_00_01spacedim_01_4.html 35ea6d30642a8111c328d4268cf1be26b6909d68edc497a6f33e26a922367783 2 @@ -10875 +10875 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Nedelec.html c96b058931dc34e86d9d9f8ddc3603a6f1c9d056301e7ea384e20164596e15fa 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Nedelec.html 0165e73babc4394e7a9122380496a463e037a429115f84fa325328b6227fb942 2 @@ -10877 +10877 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__NedelecSZ.html c7ebd9fc77d9c51b28f56f1785db5a64c32749b835170f79bda3a40cbbd5a306 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__NedelecSZ.html 32d60c448c7b1112ebda62b424f0f1bedf07f3b371fcb887033020f00ce0a15b 2 @@ -10879 +10879 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__NedelecSZ_1_1InternalData.html cefe11d6f8f358b8c3d1af1281fb0c5eb31fce92367b15b984ccbe858fcd4b74 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__NedelecSZ_1_1InternalData.html ffe31a83ebbf37a9a2ab0548c71ce3b5a43a233d0af42441cc09e8c0eb6f35f0 2 @@ -10884 +10884 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Nothing.html b74158e44cd30f522f4ff56d59ec69daf89ae1dd797cff7cc27f0fb13c3cd2ec 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Nothing.html 44c6b9b5ef18612e74e07f590d988f7a2b1418218378dcc88b7ebe13a6f124e3 2 @@ -10888 +10888 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__P1NC.html 2c82cfde07ac296ef2878942ed957de3e5c80904abd2d864c37690b0162750ce 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__P1NC.html ba8b23ed193a65c1ff5ad79e72b2c8f4b808610878272f5e2689f14ade348cdd 2 @@ -10891 +10891 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Poly.html 1bec6115dab577de828ce7db5e7c7ed00812553bbede72b53097ee2d814b68d2 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Poly.html 159a6c01bee1d78c0968ca61eb4b9e82fb8c9e43bb4b5d7de7affa3baf70f58e 2 @@ -10893 +10893 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PolyFace.html ac42b142224b6124a44890e6a94030ce83955b64a4f4cca49de3bdebbbe41cbc 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PolyFace.html e5e8fbf0393983e7cf6049ee832a16c82224fb4cd5762c9a0cfe75a94d78a486 2 @@ -10899 +10899 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PolyTensor.html 9b221b2496cb101d99466d182d65890e25b87b6769d11e14d98661766c8ca504 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PolyTensor.html 5fde64386d4ebc51e3dd674e425be6f9e453a56501f8a184f41871dc21a95f28 2 @@ -10909 +10909 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PyramidDGP.html e881f2f21f2f291ef8bfe6d76087e5a900315b83975f77a67a31c0191c250bac 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PyramidDGP.html 45cf57fea27d19230c07ed68d2f6957d0a6322a94ab5fb2484906a6c35e51c2c 2 @@ -10912 +10912 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PyramidP.html d8a73f85677544b716bc63f26bc7a045f853b2eb04cf74105f2ebbb998d1bae7 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PyramidP.html 85c50eb12e0b4937b202e682c37f1e959962b39f523158b8fb5131254b01eac0 2 @@ -10915 +10915 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PyramidPoly.html 6bd9a833eb2f545461ddd09dbb18aef0b2faa74f2197a502a8ce7edf5ce3e85e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PyramidPoly.html 75be030829ef8743ade5ff1dae98d32df025e4f055796737b71097bb77f37b23 2 @@ -10918 +10918 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q.html 1a76c69d1b57222b8fdd224236891bbd3073847e428458e677bec7b5d45f0d0f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q.html 33af5c1eb03b16adc4e48b28fb6e308c2b98a5a90eabc903f48cf155fda819b7 2 @@ -10920 +10920 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__Base.html 6b2d5262ad0870ee244e92b6cc5394a3da1eeb76d2999f144c2947bfbde29672 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__Base.html bf9d48e7a9664ed3faaefb0b28af22cddf4f051deca5e4766cea944c35eccd3b 2 @@ -10923 +10923 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__Bubbles.html f4ee49df6a031e291069f44bf24f12658f5b849cfc410c7c977499d50aca3d67 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__Bubbles.html d420a2287861125958c3c7d7d5706fb11882b0a7cf59c6409b521db754f026f5 2 @@ -10926 +10926 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__DG0.html 30e02f83f5a323dbc77409509e8699bbe2488c17593e7ed20d750c0429ed9f00 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__DG0.html 39aa196064a33cc8bb337691563375b37f383bf0070131e47f69c89654aa2d27 2 @@ -10929 +10929 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__Hierarchical.html 23e35881e821cece31d50004e084b0902e74c19d35d262e215bab4ba3d5b8294 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__Hierarchical.html 7363b28ff5dcedceb545aaf7034101f644e3eaba8821a0b52fd0090fdfc331f2 2 @@ -10933 +10933 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__iso__Q1.html 5fb04cf0158dc678172f4ef4ea5b95a81c71ea4c99b9399539384485660e2293 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__iso__Q1.html 1c1ee3871b883e2e16b2c869ac07ca1a83624c3878a23fb79a411ef07c49bd62 2 @@ -10936 +10936 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__RT__Bubbles.html 704e470b0aac3c028023fe841207bf0d8db18b5059129b273595840de4263d6f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__RT__Bubbles.html 6f3f9096ee0928bc95672fbd7bbf467dc7bc67c0a91a447bf9b5779c8e68cb8c 2 @@ -10939 +10939 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__RannacherTurek.html 2b02ceb2c8eb34b6872e43dacbe62954c01c8c724f7f5384e5ba5a57e5d14fbd 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__RannacherTurek.html ed02cd5bfb62be5e813ac358e96db179a435cb309f89110c656c19e8851bd500 2 @@ -10942 +10942 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__RaviartThomas.html 89705c6183bfd6688a7b5572d232377ef1b4903d2560c7c99c357710a75b8a3c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__RaviartThomas.html c2d6f2fa436ec516a3dc81313f751ca6ad99c1f1b144689a956ed82eaa4effda 2 @@ -10944 +10944 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__RaviartThomasNodal.html fd950a8fbe7dd2530503f59f9c4d6757192a019cb61d85848a0b7db4e7bf22a3 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__RaviartThomasNodal.html 10a591453460a0fd3e8d2703963ad8e67a942b664306c0be1ae485add9d769a3 2 @@ -10948 +10948 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__SimplexDGP.html 946369ce94fbbea31d77a2ad6b719234cf6b01b591d526e9f8bbe027f7ce5305 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__SimplexDGP.html b58cd41514346bf38f8c4458b76506df533ccd8d8873b1d6d464583fde4e2c3e 2 @@ -10951 +10951 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__SimplexP.html 564894ea51d0196c491f4ba0195306865dcc8741292e109a0398ba20edc66014 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__SimplexP.html ce0aaf1d41cb1537462ad7f06a1369829bfb7030fefcb8244ef87bdb2a58b033 2 @@ -10953 +10953 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__SimplexP__Bubbles.html b5b24167d8d27e8b0aea811c794aa69c3e4e247560e7724283cf074bf6a19c22 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__SimplexP__Bubbles.html 4373362b7be3971c36a79be72824978831eef4c7c65bdf4d9efab42bccb75646 2 @@ -10957 +10957 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__SimplexPoly.html b816043981473a75770c3f7c0156af2c178a9c4482baa3a168ee57d4ede7e635 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__SimplexPoly.html 01ee0123bf494dc7ac3f84df3f92551d66bd3884c5e1274fcdc061dde3a50d12 2 @@ -10960 +10960 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__TraceQ.html 62a8a9e5d57dc08c906d9bbef8687a2f830e973bdeed74241874fcb0a017e831 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__TraceQ.html 4006dd0b714af3baa111ea4834caec5910abe3dcce87b74c7d1342cfb39f5091 2 @@ -10962 +10962 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__TraceQ_3_011_00_01spacedim_01_4.html 435282a0158ce6cdfeca2efb020d4427a56b4888ce32c6edfa8e2c64fd295fdb 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__TraceQ_3_011_00_01spacedim_01_4.html fa4a18f2f0e9065d3db8ec4db33d77ca7763c5b30c884189e2e163c61c15ead5 2 @@ -10966 +10966 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__WedgeDGP.html 985beb0c972cb5de9019ce794dec3a9451d3cb24935b498d3941cffc6cd634ff 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__WedgeDGP.html b226e4fba867acbfa9db4e7a85f56edcb3eb74853cf8b53f1d5711f9d0f5951b 2 @@ -10969 +10969 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__WedgeP.html 0a15b927ec1776dab552d37b50675a562b3d0f967b860c011635fd67342d28a3 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__WedgeP.html 71fe44269c9b3304092a8adca2b45c647b9b24dc10c3bed308148bb6f1bfe6b6 2 @@ -10972 +10972 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__WedgePoly.html ef95a82343516aea0cac39714f583bda4c60bf338dd0a73283b2cd0ab5017aea 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__WedgePoly.html bbf67635a15152ecd42cd7c5b997d1ec2f2f2364a6a92cc91ada8249a74a4a32 2 @@ -10984 +10984 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFiniteElement.html 543d9510b718c9a30d4f768e6c5c6d54d8e5b9c0652659bbb84acbe9dc7bd1c2 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFiniteElement.html 2ff74a0ec9a18b381d74d82163fb745e939ebd7b64fca1f2047bea568e80c039 2 @@ -10993 +10993 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFiniteSizeHistory.html 2213e8da0bc4bae6b3a1cff046c922c213c3a1542b2da8a2378c556150ad4283 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFiniteSizeHistory.html 8ca2f12f8ff2e34f17bbb0db1ea52dd1bebe75e4dc8b303c1677302f159a046b 2 @@ -10995 +10995 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFlatManifold.html f6932fd7b0bc850bbecc6838ecaa3c70825ed4205ef13a54f57bc66cc8121ebb 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFlatManifold.html 11bf1f5a3aff685845be604eb51f36c30354f1fc2f7be34cb0149ba835e2a640 2 @@ -10998 +10998 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFullMatrix.html 2922d4b6a933a1ae42aabcd24a50870d8e2e50d86efa33717887b012bf4d9997 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFullMatrix.html f0035b4235941591bf053ef0c7cca33d4a2a7efb922f74d9363095733790c62c 2 @@ -11002 +11002 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunction.html 07646946674be5981bfa7f427555f8cf3e4558e97ada45298be30cdeeb3df72d 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunction.html ee3bd47c205cec6376413dc77f914b1c50304424fffaa52d0e6b7558fb651d9f 2 @@ -11004 +11004 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctionDerivative.html 269bbcad6f4f667f06c144ed1e1df76599f6c08437fda88e2a0f75c23e2d5ac0 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctionDerivative.html d7358744004ca8daf1ad383453ab58d4c634e17b92e32652caa6e16d7b22adfe 2 @@ -11010 +11010 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctionManifold.html e234def7213c946884977b269f8abc06d7d8eb913bddfe778fa10d9baa2e25b9 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctionManifold.html c37d2145053db9748e93b1ebc9dc53207f84fa963519f25abb5c84db6d1af2d9 2 @@ -11013 +11013 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctionParser.html b282dde52fa3d309ce67787365a572eeb37be50c689b600bed553ef8ed65e3b6 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctionParser.html 90d81d1ec7a3681465333e94f696b7781d635faec890b945e2725bc8022892bd 2 @@ -11029 +11029 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1CoordinateRestriction.html 357f86a8cc8119ee843fa0199d659672844aba4dc1e90df0ee06b28894e5d6e6 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1CoordinateRestriction.html d60781dc81246e8469b82eb21c8580f60cb7c5438eefaec16523501ebac7fb69 2 @@ -11032 +11032 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1CosineFunction.html 00adf8a825686ad303fe28e708c83e1f49effcc059edf603dec99a3fb51ae9af 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1CosineFunction.html 374ab6243b5b3a4f483c3648fb6eb3aa62961d31d35fe4acd80396fcab3f1615 2 @@ -11041 +11041 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1CutOffFunctionC1.html 081f05b14a2fbfda645bce1fb638357f7a9a49381e67e01bc31bfd84c58b209f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1CutOffFunctionC1.html 2072e34e534b2296e8e997513237e5a2696cd5d727017536a9859003eecdace8 2 @@ -11044 +11044 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1CutOffFunctionCinfty.html adeed88d21354fb54446e42ae6e0d96a3a38fd0c7c1b9a8addb9e8f5496dbd38 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1CutOffFunctionCinfty.html 7180101e64309e5191b2cb3583f9300caae178df30307c63d5301145d54e3c7a 2 @@ -11047 +11047 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1CutOffFunctionLinfty.html f37d4bc9bf34e31011d31e838c91232ffc06ff1819e438cabcb6003e107579f7 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1CutOffFunctionLinfty.html 7d9a2cd3ff98a3841969f64f63e7fd944fc8a705ae3c0a6d8360d3ae5bf58217 2 @@ -11065 +11065 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1FourierCosineFunction.html db28dadc65e5c0670aadd40c3c3a0f8511b7ca47d82185aef513be9d7ec9e758 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1FourierCosineFunction.html 7a70829bcd653fcaf86548bb79e16ce5b9442326d668683880daa13eba492bdf 2 @@ -11068 +11068 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1FourierCosineSum.html 58a4e7c13a9222f1e9bfb253e2935b20b7d1e3db180d8ed5a6d9dee09d0e75c5 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1FourierCosineSum.html 3f3defbed2ffab825d4eadf87ce26302f19048b8789daf8eb29d7e0058c4edc0 2 @@ -11071 +11071 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1FourierSineFunction.html 6d4a43a46b69d2c313da851c10121ab586fde31897956da1f7ed05b2d79fa060 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1FourierSineFunction.html 5d89906a7c6895fb16f5080326e848788a2ba9f27059e2c1334a90ff2f7b566a 2 @@ -11074 +11074 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1FourierSineSum.html db12a9cc8bedf5f5b9c54080afe35b13ed7057b7c663bb11c5e5fec051c8463a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1FourierSineSum.html cd985b2412c5a9109570395180cf065aa102a3caefeb57487099d107893ccf59 2 @@ -11083 +11083 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1InterpolatedTensorProductGridData.html a804e63bcb88ab37fe1e737013da20c2fff2b46bbe50d9cbefd17672aa436836 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1InterpolatedTensorProductGridData.html 33f2eef41917dc30944c0cda7abf479c07f1ec6a320ab079adfdc3b93ef9c2aa 2 @@ -11086 +11086 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1InterpolatedUniformGridData.html 8593c9a18861bdfff7ddc5ae88d8e7c4987853f7382cfce359461b0120f5f9c2 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1InterpolatedUniformGridData.html 99d3dde663c36c8911b89dbfed85550e13550b43e8244ae28c37b5e92bcb8cf5 2 @@ -11095 +11095 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1LSingularityFunction.html df603982aa8e70f50bb7d10bc3be60f4cce0ca3629777b77f9778fdaebb22f17 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1LSingularityFunction.html 0630184abee05b8ae165d4b4b2d999090fa222b233dcc6fe28e72e975d5bbc98 2 @@ -11101 +11101 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1Monomial.html 7576262722c68eb12131be5b429502d7a73bf75eaa986a64ef90f00dea764db5 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1Monomial.html 8c840952d654b9367ad3ff0e7c34e3efe3d063c53f732fc544c8b9a2c9e988c7 2 @@ -11104 +11104 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1ParsedFunction.html e4b8def8b719b6148931df3c835b79347e2d6cbcb02aa7a89fd75408bda35fbf 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1ParsedFunction.html 02c9d39d6a3dac82b671d1241f21134826bd6a368f00999746086e4d0d4261d5 2 @@ -11107 +11107 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1PillowFunction.html 9b11eeb6a19906f4686e1c5da532e6897ca72b890d506f7aecb3546adac5a43e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1PillowFunction.html ffce7f89aadfea45d1f526fe380e1f206e623a1c04b40047a08a024cc1cee24d 2 @@ -11110 +11110 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1PointRestriction.html 501b97416457d3f85769c8e3cc86eccb0998ea7df8904307959052c82abcbc32 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1PointRestriction.html 11f62a536945842bcb38ebde20dfa4aa9188323a410e38c9cd6a7b9364aa40d4 2 @@ -11116 +11116 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1Polynomial.html 8a2e001bd0c101cbb0907417b2d1b982c28361ca719a620d317f10c634031070 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1Polynomial.html 1a9e6d39f26804b7e19304331b0ccfb268b4fea98df094e7059d0fcf425a268e 2 @@ -11122 +11122 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Ellipsoid.html 70483d559bfdaf3a6ae6829521bc6b3f235fe6e2c5b5a786c13ec50b100dc02d 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Ellipsoid.html 33f9c4f90bc16a9595ff1e0cff72ce96d518820b99e7f9b73e69efac0bf9858e 2 @@ -11125 +11125 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Plane.html 59c3f3c75ef99833cc874ee07308c7991083e2b15e85f6cda2737860b69639ab 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Plane.html 7c42c1d3db1a1c35910eaf634266dae1623547c188b65069915fe9cf6c1ec792 2 @@ -11128 +11128 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Rectangle.html ca07511a316256385df41bb4166fc3908cdcea4f57602887a726db6475855081 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Rectangle.html 0068b90d8a381980e9d209e235a608dd0ed3d6880735a62cd7fe2ed7aa04647b 2 @@ -11131 +11131 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Sphere.html 34673abfafd07b03f9f7f9eb134b01f755fe04be79d9f345f454b95ea5964099 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Sphere.html 6a254823f2b05682ec33a4ee527361f3509c3ff6cda85b472a58b8565bc7a23d 2 @@ -11134 +11134 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1ZalesakDisk.html ec707058e52fce596da79716210fabb72fa9d6a6a4e3a7ab3bbbcf84140b69a0 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1ZalesakDisk.html 0c58045f25be6525fd63c87ae2211c85659d8abc1c9d88a3ea9795e7db21e8bb 2 @@ -11143 +11143 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1Spherical.html 11495751dc54e3cd43954b8658072edd8586d351f14c2b7f721a4ff05a44bc60 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1Spherical.html 7e6558c888c517f23ab752004734ca245b6dd5a9fd8694a9d3bc40e31141cb71 2 @@ -11152 +11152 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1StokesLSingularity.html b264c585341543fadbb22bf37d654b2a3e82a91aa06e1949007024febb576658 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1StokesLSingularity.html fb57ff6465787a8907d7914ab93a0c6f13c365ac6050eedd257a219092af15f6 2 @@ -11189 +11189 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classGridIn.html 8429a9d093c1e5063ae2a1926d144b7246e05df1582a5ca5eeb77b445f57672d 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classGridIn.html 8c8f47b10e28071c945ca3130d18331cb4053fafe36998e9d148087ba0660fa1 2 @@ -11229 +11229 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classHouseholder.html 68352da887386576760d0547cf17618a2a2875bae09ff8657a7c2cc0fe9eae4b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classHouseholder.html c94e69340df3222f9b0b18f85a52fb8149ea8f1abeecee9daae0d995afff6779 2 @@ -11232 +11232 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classIdentityMatrix.html 6f229ca1cc7ee80337e69023bceb166aed3df736b1674644201076097a2bb817 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classIdentityMatrix.html fb0a1ae4c0a125d31ca1f5b5b386f0f8d55cbaeccaeac8720ef1639df9e93b65 2 @@ -11234 +11234 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classImplicitQR.html c61c02e3a901055c3bb7211401afc4ef56a7df1b8a815885d828b07fafb824ec 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classImplicitQR.html 7cee836837e0bf829db7fab1070fc3a3fb115664cc02ce04d208a367582a9bac 2 @@ -11237 +11237 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classIndexSet.html 22844d643d856f2713cb3bf25e7f31d0f2e63e2f5e1daa0c2670132a57704561 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classIndexSet.html 0766344a489285054c9364c46b1c2ca6242b86712c31226adb6d692016f69d8c 2 @@ -11241 +11241 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classIndexSet_1_1IntervalAccessor.html b52852212090364d2cde91a5b59a16698cb7950961557141d8a4a7f1a8b58ef1 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classIndexSet_1_1IntervalAccessor.html 6eb3740ad698dd8de033118a85e0422e94bafd5019f7c6e5714dbbd8c1e18544 2 @@ -11243 +11243 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classIndexSet_1_1IntervalIterator.html 51bb713a135ba681e0152150fe79bea52d97b75086c5cc3b4b459ee5d2b26db5 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classIndexSet_1_1IntervalIterator.html b42dbc8ee8635d0ce62d49338dcebbe6412b84c5f3f224577a4a014895931509 2 @@ -11247 +11247 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classIntegratedLegendreSZ.html 74be18d2b69e707013ebe583da95e45518ae6b7b159e28933b87963d09faf83a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classIntegratedLegendreSZ.html 6a647cbb415621040afbcdf1cab9f9279efd19573c165b294006a144e8e37773 2 @@ -11289 +11289 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classKellyErrorEstimator.html 459ffa34e4d7e6f6cd4960f1743b4b789bd4fe2b8e7224ae55bd8a25ceb72439 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classKellyErrorEstimator.html 65a619ad5abf86c3c44dd7cd9f7d6e97afff65a0e588ee84d83cd956bcaf752d 2 @@ -11291 +11291 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classKellyErrorEstimator_3_011_00_01spacedim_01_4.html 2c7c008889de92896809450817013c7fcb82400c1026a2c98f927e0af3f620cd 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classKellyErrorEstimator_3_011_00_01spacedim_01_4.html 92e63179b700ad2e4c19d3f6dc2a4239a2f13132ecce05cd1a0de6a779a59af5 2 @@ -11302 +11302 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classLAPACKFullMatrix.html 4db8d6887a7282c0fd681abbc9d644d906c46588378a60ca6fe051dfe30933e4 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classLAPACKFullMatrix.html d030eeb11e87a48e165fa55134e5766866ef1a4ceaa40d95d36da11b4fb4c913 2 @@ -11306 +11306 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1CUDAWrappers_1_1Vector.html 109e83ba5165e150f5e48f7cf7ab6c9be854dfa9488f46c0c67d1b4b4abc0426 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1CUDAWrappers_1_1Vector.html 321f38e49fa5f585555a0f6ab54c10be07cfa35f1a1c56eedc04096d4ae50d44 2 @@ -11315 +11315 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1ReadWriteVector.html ff3612bae413231ce62a77d1ee170801366879df8848e8ea3ef2806b150007ef 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1ReadWriteVector.html f96411403e0228bc25b391f948ab07a3706795fbee86f0d8b2b18f96a6720523 2 @@ -11332 +11332 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1distributed_1_1BlockVector.html 86c5884697363428da9fbe8b375b874f9c6b5bca1d1a79c2bc989d6df42e435b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1distributed_1_1BlockVector.html 750362d867d42d88bb307002f71d34148e3928f164bc898e36c7c54dbb70c75a 2 @@ -11335 +11335 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1distributed_1_1Vector.html 7bfd2ce0baf314ff0f2da2e7f6a44d09bcd59b2c0c5a403ba4b3cde20a1dee98 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1distributed_1_1Vector.html 719e0c69c89f646c170f12d3722295a51b34e08cddb98cba4f3f0c0cc2d952e3 2 @@ -11338 +11338 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classLinearIndexIterator.html beed98ef7a8e959bde6a1f42345aee32128d629d7ada4c5eb7e762b1fe819298 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classLinearIndexIterator.html 2d98c0c69e6991ec5cfc63018ad37a0bb0995f13182389067001a382a90449a0 2 @@ -11341 +11341 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classLinearOperator.html f2b7e8ef66046c9c8bb236e979c22538df73e95075b8cd1a3e09696d8902e4f0 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classLinearOperator.html 16194620b610792d609cc82ad41bfafbcde9741b9ef6db8c2b8ade1c47da6d82 2 @@ -11459 +11459 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classManifold.html da2d5bcc32aac6f16b64062a0ecd5948be98793a5c1661b85bd4a61020391a53 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classManifold.html 5ddc062b21b4849da0c117eaaf4dd2d253ed285c4b6be2e881a76383f7f7a2e4 2 @@ -11462 +11462 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classMapping.html 629e2887b8c4ced3da80d8c08260beb5e400e12696cc76c54363001bb16799ec 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classMapping.html f0ba1e9b4d12c206244b68869a522f624a98325761ded3a256f0676f3ae857bd 2 @@ -11464 +11464 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingC1.html a5c0c5f1bae89ec7a9ea1fdf7c9dbd2dd1a294cb2b1934e4cbdf422595a14539 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingC1.html bfe82e98a413077e64d31b47345240d05aa2f23a34c3f8b4beed62d32a5eb811 2 @@ -11467 +11467 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingCartesian.html 321b793a91d96fa7930dd9692df2fde717a746a7dc676682a800c133a70eed14 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingCartesian.html 0ee9a8031c3838f6d2da65963597c739f69d40ee56ac9561e4c10bb4e4c4236f 2 @@ -11473 +11473 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingFE.html ca29f5f60d832327e3ad7369560fa1da5b3a528e19e73f992eca02f8c131cc64 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingFE.html db3c406e50434cf0395a5b9312169254300b31437b106b7092adaf04024efcb2 2 @@ -11475 +11475 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingFEField.html 980c1cd8a80a047ea63cba9fe32d15e7bad01f89422b5a72226167f22adda2fa 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingFEField.html 0fac9fd035fe095a9a59d35458f0f9cad231f32b17b2678496a2bef0f52fa63e 2 @@ -11477 +11477 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingFEField_1_1InternalData.html 1dacd9d90dc9c008ee62fd82f421e46f8d8262e42c8d21c8cd4aae2e524b72c4 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingFEField_1_1InternalData.html 2ac22cc65706cee7b8cbfe7d8abbccc551f874bb11cecfc0700e6d2877dd6b27 2 @@ -11481 +11481 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingFE_1_1InternalData.html 740eeae45a0d0a90eea191b49be94810233dcef60a27f98ca28e436b8b707112 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingFE_1_1InternalData.html 2a68198486fa8bc5db365e00f6551fc468c5909483f4adae249db11fc72bfa82 2 @@ -11485 +11485 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingManifold.html d074066cefeb1cd191e40ea7830c8306a2c0fec0f8df71d94ae18202acee0e7d 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingManifold.html 6a1b38fb0621cf35fc78fc3c0d1673809c4f2014e26224d41fd12ed5b5acb706 2 @@ -11487 +11487 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingManifold_1_1InternalData.html b4cddd39a2cf411e43a2765b1e14ead4b288ab8e85b739cbf6d0b71d70a59b68 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingManifold_1_1InternalData.html c68a2a96ef537023f79ae1ac5c13a2c19eda3a9dff9f6a93ae8cd41561787405 2 @@ -11492 +11492 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQ.html aec4e264b3915d06359d541e7b32be2d7b325af96837288e88b9f8cb0c75e199 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQ.html bc6bb07cbfc50f47d36a0c39f61a14e1584b3c5f3cdce83f38656a486890e4a4 2 @@ -11494 +11494 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQ1.html b3e9c153d2d717f90b7c03493f76b176834b019c9ed9f42f85447b1f81517679 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQ1.html 12149c302b8ffd9b26b61faf7662760550f5dea848e79beacddc9232657bd0fc 2 @@ -11496 +11496 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQ1Eulerian.html fefd4033beec801b60c4c1a6246403e863217da7e39a6656fb1cb392de5fe82f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQ1Eulerian.html 349855921c23d87b70805f25de4f036f07086f688edbf2d83efebc64f7328f54 2 @@ -11500 +11500 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQCache.html 003f1008344c6001ee8db21055f4a8ee000c6728b55f6d0fee412965c01cbab2 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQCache.html bf722b0b15acae9152973486d06877456ae7a2a9af73186390770a07862a3619 2 @@ -11503 +11503 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQEulerian.html 6ab7ade22b86625c7104429e4c14552e2bd9f05932db7925e8327ad372b84492 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQEulerian.html 33f435de2636822f7ccb56f4122ab2c4ef13cc3721ad87dcebf8c34f0e3390c4 2 @@ -11509 +11509 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQ_1_1InternalData.html 398f04a3aa021d8bc3a9dde1e2ef9f43295f3a2edd9c4a93bf1b4b5782a41a5d 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQ_1_1InternalData.html 3397206de4b9760a2091fddf3825a819287c9220b364d6fc370c328202aa5138 2 @@ -11531 +11531 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classMatrixFreeOperators_1_1LaplaceOperator.html e7950d418551951ed4857e70d82294a791327ac290ec698ca51c82115ca8b6e1 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classMatrixFreeOperators_1_1LaplaceOperator.html 84fb0ffbb9fda2cc064abc250e17589f734c3ca7f8c4e41a607e16ee69ce4e72 2 @@ -11576 +11576 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classMeshWorker_1_1Assembler_1_1ResidualLocalBlocksToGlobalBlocks.html 5a4904c6c7168c1bd854a313146bf8d1192687720cd43ad8d75306312ad743ea 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classMeshWorker_1_1Assembler_1_1ResidualLocalBlocksToGlobalBlocks.html 5fe6931d5ae84087fd98df94489adba8e17a19e8b2ffa6c59c965cba43e8d7a2 2 @@ -11586 +11586 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classMeshWorker_1_1DoFInfoBox.html 84fbaff632b0ec81e1cc0f65e5846ffe3e7917061d533470eda3506327bb95c3 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classMeshWorker_1_1DoFInfoBox.html 716ceb502842e815700a2518845586f4664d28f5749ad25fb05f5df991a04d9c 2 @@ -11631 +11631 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1DiscreteFaceQuadratureGenerator.html 1ccd31a7e5ff961183c0dee1804ceeae26fd94e820091ee04e53ff8d5e26f14c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1DiscreteFaceQuadratureGenerator.html 346cc5c8c502767a074a03d46b9af49a49a5c9414600669ee4c563c92f673bec 2 @@ -11634 +11634 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1DiscreteQuadratureGenerator.html c957e052be74867d5de43563820883a3489763e98bc3fd7de26e51a3bd3f0f12 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1DiscreteQuadratureGenerator.html f5ee005a811cdeaf9b2b4c07046eb79872d13ef085d29af322ba2de3dc4d65b6 2 @@ -11637 +11637 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FEImmersedSurfaceValues.html 435782079a43467439bcdce730b9a5f8dbb549ced2f2a9cf8786daec21c33358 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FEImmersedSurfaceValues.html 481f85175493fd3d61c90f161b7979dd0b310eac9a97a2a4042046ca2f8ec755 2 @@ -11640 +11640 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FEInterfaceValues.html 523dbb3f36f2d3ad3ec27ef793e44f7d1f243c2cc2cef6c398f0d600b08c8978 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FEInterfaceValues.html d81028e937910bc5727fd780672953d1f922ac589756b989f8e6fdab2911abe4 2 @@ -11642 +11642 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FEValues.html 2f7b06ef670e2a940c1b3c3845e07a0524b7b9c75c22a5e32d5b63aac36297c8 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FEValues.html 2f8a2129fb7b0aeb98eac02a4a0edf66ad13e11109f67678ec6fb3dcb7c84198 2 @@ -11644 +11644 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FaceQuadratureGenerator.html 3d842b15d6bc6a05e3ac2bdf5bb63a0768ea212699c640d9134c05d0e69da436 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FaceQuadratureGenerator.html 6f74bd876546850632231ab70a9b5f208cf29aeb4fc1b768a4c5ea11710b39b4 2 @@ -11646 +11646 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FaceQuadratureGenerator_3_011_01_4.html 026869b33c1258482f058c565130472fa175ab2e0c2377baf0bca4fb817634f1 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FaceQuadratureGenerator_3_011_01_4.html 9c4bb2719c5f397a81b210b2ca2666c5be84518e391476684fdaeed138f9cc4e 2 @@ -11649 +11649 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1ImmersedSurfaceQuadrature.html 97966e83e7f1011bc4848c89c8eb0f75048e740365c4a72fac0cec68d9a2c49d 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1ImmersedSurfaceQuadrature.html f229dc282f2a1960ae87800d0d43ce8c7471fd1341f6c09ce3e46bd9437ec733 2 @@ -11658 +11658 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1QuadratureGenerator.html 853043523aa3558b5072e1caa3715058003b2b2e07f8ca892acc4bd7458c811b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1QuadratureGenerator.html c7778649a98cf3fead454afa9345dc6c599f9603d03a0135203a2e6111fd3b97 2 @@ -11666 +11666 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1DiscreteQuadratureGeneratorImplementation_1_1RefSpaceFEFieldFunction.html 1a43edffaaf095ae90487008cbe36ebdacb804d83aa27c8357e641d16ac751f0 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1DiscreteQuadratureGeneratorImplementation_1_1RefSpaceFEFieldFunction.html 65410a3dca364e8045d8bbb8cd9151945ea82c9016de55fe9758ed0d17c7fdce 2 @@ -11681 +11681 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1QGenerator.html 0cafd0b4405fbb2ff36a1497ef673fd7e45413143c2f2c84f7f6b1acaa852b90 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1QGenerator.html 62d727e513e552da0e43b8fe569b0a3b076410c05994d899dbfff9dfa5e8df46 2 @@ -11686 +11686 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1QGenerator_3_011_00_01spacedim_01_4.html ce6128ef14e6ff322e2f035c1b5de8c796385df2914a72e4e06292de7ca63714 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1QGenerator_3_011_00_01spacedim_01_4.html 916e7ea5293d237b6657b70c7806423ee4c2c32a9cd89de8aff18fddea6175d2 2 @@ -11690 +11690 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1QPartitioning.html bd916f855f39537c8a6b086ae22265293f1cddda0c82c00137c6c9b6b0212ab4 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1QPartitioning.html e7ca97628bd9c826d9be6565381be102ef9dd73583c5f14ef512b53fbc8160c2 2 @@ -11692 +11692 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1RootFinder.html 3578a2efc39e4dbdd080458d6c4c2ca4b5b7ce212567f812058c2279c902af1b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1RootFinder.html 7f28efed3788ee4dd470596d421e708535a05e7d6f4c6bff3b0b3ec8afb57d3a 2 @@ -11694 +11694 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1UpThroughDimensionCreator.html 9db4ee3d774fd25f398c4518e89ae1c3a19fe6293f739979d1a5fa9bf100e763 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1UpThroughDimensionCreator.html fcc578014c98146f6ee0117f9a7712c0393104abedc1d9063f62b9e6724dd481 2 @@ -11696 +11696 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classNonlinearSolverSelector.html a80c751afb8bc35970d51b246872cdc811461377ba2c1fdf1aa149e373fa678d 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classNonlinearSolverSelector.html 5d6794eaa0e4d34859b2fc8ec4f13d322ac415c1543e5c0e0dbb271894e63860 2 @@ -11698 +11698 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classNonlinearSolverSelector_1_1AdditionalData.html 8ab406c0c6536d1342b32031bebcc3e3f68d4b7c9d9c85dc2c2d8ae8f00eb2d4 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classNonlinearSolverSelector_1_1AdditionalData.html 99a01f4e6d62b21d5985237b15b775bd5020fcd4db79e377cf524f1cfb333ae6 2 @@ -11700 +11700 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1ArclengthProjectionLineManifold.html 398908948cdc5e47b86c516fc132c1362c74f726c8ad56fc39167f5a3412050f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1ArclengthProjectionLineManifold.html fb393de211c703b0c311e4ef91b65c69f15820cc2d883ffb958209bc43a890ac 2 @@ -11703 +11703 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1DirectionalProjectionManifold.html 8ff7f6d48df80c44cf4a15a62000976d12e29017033ee1a3604f3a0864d3cba9 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1DirectionalProjectionManifold.html de08e66da9b574152b4e35c48928c37b4d4c7826090e2bb0af451d4504e80f81 2 @@ -11706 +11706 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1NURBSPatchManifold.html 660dfe58dc943fce9cf906639129c593b65d1ff211de7f0854b2208de6263349 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1NURBSPatchManifold.html 6527696b065940e4ae5fe18066f85343798741acbee33f7aa635a3e9e159d8b7 2 @@ -11709 +11709 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1NormalProjectionManifold.html 7fbd6dcc7b345d5d6eace218fd7dec3252ccb90f7228bb905e145b1e750ed641 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1NormalProjectionManifold.html dc2e4e2bc870f1fd2777e1f637ee74d1a793c5c9f002cee45cc732f2c7babfdf 2 @@ -11712 +11712 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1NormalToMeshProjectionManifold.html 50bfdf1fd8832f2ad518a19f9de55ef4f8c9484e18b9717dfecd82dabaabb923 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1NormalToMeshProjectionManifold.html 1c9c437b99a75e0099f07859f00627b2e0291d87e3eefd307929e052a751c8e4 2 @@ -11715 +11715 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPArpackSolver.html 737b82719285d6c92e7c93d548efcb74df3fb3dfb4c873661668fff966578ed0 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPArpackSolver.html a04deee944ba10b1f2b57fea8c839336bcc022fb02f525a97414016ec1c2f552 2 @@ -11718 +11718 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1CommunicationPattern.html 71a75902a3c27dfc717643d012158590655fd24751fc885f8babd335c6d385c8 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1CommunicationPattern.html 6d8d5c3addae99b9b1c96bdd27473e3157d70be56460add98aa5daaa17acaa88 2 @@ -11721 +11721 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1FullMatrix.html f2d2736a04c8f03955b2fffb0a3e862dad036ed1f722cf895193fee4ee7e9ee9 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1FullMatrix.html 168fb3868873f378d66a31d4f17ccce950d0ef77a16dd7067f5f7903b0833b4d 2 @@ -11724 +11724 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1BlockSparseMatrix.html 5bddb5d971d93476597e0500502f6b81bfd2ff0e65016769ed47afd4b009eb8c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1BlockSparseMatrix.html b07415c1cf6c29e02a2ac6593f486f59cd619dde18630659b1523a13cc742ad2 2 @@ -11727 +11727 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1BlockVector.html 69b28b90faeb32856d305251545059379f5897b2aa23b2564816ff7fe7e10966 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1BlockVector.html fbce0cfb56a144f80073072d74cc511192348a381263ab4cd9e041c100a54cee 2 @@ -11730 +11730 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1SparseMatrix.html 2e32392fba54174a5575d95c41b0101c12a5c5451b4223b381ffef245dde8f60 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1SparseMatrix.html c3c461390d804ce96507153d588e8ee6e2a7eab81b60fa17cf2a3a19a28535cb 2 @@ -11733 +11733 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1Vector.html c0be3260d0452cf3d9e641e0d3e4c47c3b546cbe950522bc608cf296fc6d87c5 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1Vector.html 5af586d93dc187c1c0c4c673985ddff608d4eec5c2056ee9a6b2daa0d4b4e020 2 @@ -11736 +11736 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MatrixBase.html 4f5b4ac11c951626dcc86d5f83eb67950173dfa36ef0c629d2aef3239bf96718 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MatrixBase.html 62777ae6aaf16d68ba18e2245f65e6f9622d387d27b9792e2c12e59a170e516b 2 @@ -11739 +11739 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MatrixFree.html 0c889583a31338c07b605e98543063f3bca48d6cfa50c621d8ebd16b9a4dca9a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MatrixFree.html 8d4a2956201b35627cde5c4aaa7bb687d64a8f2cf211d153468206ebe7f879fa 2 @@ -11746 +11746 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1NonlinearSolver.html 1f3d9bcc6c5a4442e19cdbe5c5727ef563d8b0045fa4bc24f81481edeb114464 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1NonlinearSolver.html c11ad176b0bc7b5c74fc33ba7acb116661c1336a5b15af24394f6a7d4a5a72d0 2 @@ -11834 +11834 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1SparseMatrix.html 4d7e88fb6a97f203b0013be0ebfdb7e791ea2981f57b2ddeb3fc62e1f8ef927a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1SparseMatrix.html c415eeca8a3004a472cfab9d8d0a8da3d1342b3f8992500bdad1de68de90f2fe 2 @@ -11837 +11837 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1TimeStepper.html 7321ff5b97514cbc6cd704bd6ace098a094c5a8326f3273a111ebf95c48e2b70 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1TimeStepper.html 0cdc70384050b17fa9f6d08be04ec4486512103b7b77e884bc31e22d6b68be8f 2 @@ -11841 +11841 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1VectorBase.html 7e4b27e5428bfd5fd53547136ea89a2368bba8952c6115d7ddc00f378161e654 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1VectorBase.html 3496791a2df3d8dfef1ab7888419fa963d6b6591deec80c2be903132f39382ae 2 @@ -11846 +11846 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPackagedOperation.html e963085a5a65b7eda2dce277f2e7fe10c02c5c6bb2163c1992fb937c829789f2 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPackagedOperation.html 79882520177adc4154fcaa365ea83a9aad1f4f9636d5259f419b5569718e1971 2 @@ -11867 +11867 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classParticles_1_1ParticleHandler.html 93868acd403bfff8150f44d8e781e01e99e3d4e3619a89ac10f37256752a5435 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classParticles_1_1ParticleHandler.html 09f491a9560da85c2233f7069ae2e1bba6baa74cde01a203bf2b751235e61c4b 2 @@ -11872 +11872 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classParticles_1_1PropertyPool.html f601576018fa5ea8935eb8f6b3d8de0d876d3d393fdd35c6204800b10d08fdc1 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classParticles_1_1PropertyPool.html fd367c7121af8b0692c99a6b00c4ea27337afcf387c02eb6f9ef98a9f07f1e55 2 @@ -11913 +11913 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPersistentTriangulation.html 80db2cbfd56ed98a7464212f991a18b80d1ea26aabddc28b44ef9bf71142d68d 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPersistentTriangulation.html ddd738b786e9d01f9c442a83cb848098cfb030bdb90ba365535593f37f0dfdc0 2 @@ -11918 +11918 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPoint.html 5f5a2324b67bc1934fbcd30c03e2bbaea3d201ec0ba25cc780821127abe5f769 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPoint.html fe15a8a3675e50293f4cb02fa358769e82b5dd34cbc53c6248b76dfcbdd8cb9c 2 @@ -11923 +11923 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPolarManifold.html a2d59916d324a4e97eec33de7246e819b985c1aa813604892d1dd5807ded3485 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPolarManifold.html 8938d88efbde4611945057a1b71213f868bfa58eebcf70903bd9d0ddbe93248b 2 @@ -11935 +11935 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomialsBDM.html 07c61e5446cb4a752478e04b7b18430e1b128969ccb7172d0c3360016f6c0529 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomialsBDM.html 143a998c07e881fc597fd49974f9a7ac4d42b969739c38e3d82da1941e8e13d6 2 @@ -11938 +11938 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomialsBernardiRaugel.html eea3292f52e1ec3e9ae7465cb90f6996d9fc03bb597756a7b60814f66ff3fd0d 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomialsBernardiRaugel.html dac2687b57219049778298bf9e3746d7d3a124dfcd24e21743f8fee57ccc712c 2 @@ -11941 +11941 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomialsBernstein.html 65ff5960c4f6199c0a533798f181c8a8578f607d2ec9f3e599441a1df2a55bb2 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomialsBernstein.html 3dc5f27026dc50bf13083b6b4bf761579a709095db24a123eb9d22783f0f5c52 2 @@ -11959 +11959 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1HermiteInterpolation.html f67edd4a474e31092477b9821b8de07ac7aaafd40df0eb05f2b977afcaf60a38 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1HermiteInterpolation.html 1c45573804aa4c495bcd58c9a5700fb52cbcd90366fbbd3c5174c609950c6a8d 2 @@ -11962 +11962 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1HermiteLikeInterpolation.html df8b783afde5b81db19eaf6615142e044c2ce53fec73b5e7c691344204c9a12c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1HermiteLikeInterpolation.html 7acc020bcb1b7490f0014c84dc81b317db483b381f79821691625c5c75407e4c 2 @@ -11965 +11965 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Hierarchical.html 2900afda1322cf2bc9d02723f2e047113f996b62abaebeec0ad2c87a0b48ffe8 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Hierarchical.html 3847ebccdaf21a75a797508d9c63dfd7521796ce409d50d13161b26b8a430250 2 @@ -11968 +11968 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1LagrangeEquidistant.html 50d882a2dd726a54a57d228c35a01d7ffc406d7155b963e13287a46b14b17d57 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1LagrangeEquidistant.html f421ff497736cec954cc0db88dfbc86f1c8826c6c4b7fb3f7f265018057f3d3c 2 @@ -11971 +11971 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Legendre.html 2c61e2873b59e37e22ffecd28019bc875fa6dbbc49729498dfc470767cf52d06 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Legendre.html 72338823e586127a3d078be2f4eadc5cc3578eac1b2471af6e0222d0c0f9306b 2 @@ -11974 +11974 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Lobatto.html b3c03e914957374fc3a59f3fe4c2a726d91bc5cef33a92aed9afc6546e1538b3 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Lobatto.html d8c748c1ea7f3afac7b9294aa4fd97e1da503ce508f24cc26c60e4a045b9068a 2 @@ -11977 +11977 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Monomial.html 74f3032a1618fdaeb67dfcc2392fb61d2e1b549815c6bbb2568d5d3cb9a9becc 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Monomial.html 2d3d9656237685f6229850eb2c344bf1ae9079105b9cd06c54b68af59bb1a850 2 @@ -11983 +11983 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Polynomial.html 8908e0487fd7bbff55bd8e712a9f7d7edafec6669159d01f9ba057bd1c746581 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Polynomial.html e9f84fe029f053ea502cb6ff1590c8752f0335b49deb8c8a4e6873b62d4293e6 2 @@ -11986 +11986 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1PolynomialsHermite.html fe6f16a7f7da63300605d3a52b040636683c0ef6d1b67432f211edd03cdd9e15 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1PolynomialsHermite.html a1282f31b327e0561717b621eb1b48cd30acc6268239ae398af92690359e6be1 2 @@ -12010 +12010 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPreconditionChebyshev.html 32d999c1826ffd1dbb8bd14531e78790bc6851fcf61fccc37f13a52eb2bd8d8f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPreconditionChebyshev.html f58d0c93e1be0869cac9994a40c31ba208c4da4d2178c95c4701b621b94da6f4 2 @@ -12013 +12013 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPreconditionIdentity.html 9b1f88e09703b45cbe31f9a5ae4f432a023f0f797fffa3ad8a7fc4a0ab581336 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPreconditionIdentity.html b635768465cafcd2c135ac258ede83e13164292d1f8b555143330438c0905057 2 @@ -12016 +12016 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPreconditionJacobi.html 30226edef99ad9104854ad458e95b84589e3a5a1f7c11f9db6e5c1c1f7ce45ec 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPreconditionJacobi.html 5586f200cc0db2bb4d94e690e75714bf1c06d7505fa6ad1f7018a5a76fe4a102 2 @@ -12025 +12025 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPreconditionPSOR.html 42bf8055c4d038a55718c9604b6441f70f709b32bf810e8bcbe20382b8c7854d 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPreconditionPSOR.html 2c1b0a50ac47e404f9238f27d8b80c3fdd80efec9d3947696650899b3d4f2cf9 2 @@ -12030 +12030 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPreconditionRelaxation.html 8d4f82633e8d8dccfb35186af8014ab2050903f68c21aac6f2c7e44f98b9a8cd 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPreconditionRelaxation.html d2495a01dad7fee189afa00f9090dfa964884d463601feb701c0048f00775f98 2 @@ -12035 +12035 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPreconditionRichardson.html 2fcc3eea0c1e93320641bc17f942cde938b84e4f92dd06f26118e32e94faa492 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPreconditionRichardson.html dddef3fa5adbe2676961465951e6bbd8d219a18b3f7824cdddc2ebf2e3a11301 2 @@ -12040 +12040 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPreconditionSOR.html 52d92e48850b823bac590501c56316b0e67110edc7ad70170ebff5717acf2844 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPreconditionSOR.html 6bec3f5959e2d8f1926fbfa01ce229faef9b1fb97c086a1506537de13e3723cd 2 @@ -12043 +12043 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPreconditionSSOR.html b4e9584adbba189cb8ca2991d48d7663bf98abc3f0983b182053e80d18a86f03 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPreconditionSSOR.html 676ddb20950269ff6cc1a6e40b5dd202674614fe0d48a8e48f0273542ab19a9a 2 @@ -12046 +12046 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPreconditionSelector.html 778f3b5d90ff31c73df582fb56f071809bd5fb5ddeea5974256125d7be47ebfd 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPreconditionSelector.html c77ba38c128a5363066ed5b2a6e852aa29fec3b77333c6da0b1c15d9042489bb 2 @@ -12063 +12063 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussChebyshev.html 5a956e4f6e153e9288b1202826e328f44f6a034250c630c49240b83834a01bc8 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussChebyshev.html efda8bb72ad79e6cf57ab33669ee4244eff414f31d5497c3778cad16ccc2353e 2 @@ -12066 +12066 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussLobatto.html 8b549352a480fa8e0352fe44b1dcf409368972be8636fb0de6de33c234e101a5 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussLobatto.html 62bfe0d5571a5e3aa9bb97e40b0a85c860cc72c1a5216d8e80b0bacc0c3cee7e 2 @@ -12068 +12068 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussLobattoChebyshev.html 536e6aa768defbbdc5331557a331601d5514dbde83e10fd2bf1c2e5e4a243d7b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussLobattoChebyshev.html 45af40b87c5dacce27933ebcd782bb607d2efa5ccd16be36b1e848cb574f0ba2 2 @@ -12072 +12072 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussLog.html 1ee6899dc58ec06134ade83f4dad54e4ebbc5e3792191bf2859a889e4136dd49 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussLog.html ddeee83d9bf73bf0777e71e1872ca576284622b4e9310fc19295383bf1455dd4 2 @@ -12074 +12074 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussLogR.html 9b44ef4353de8dd2dd18ce62b7d2d8873c9471a0f35ea1562173bee06761295f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussLogR.html 1de0bacfa1771245804b86fb60e5672abfb89c9e58459f66bf2ef2320b8857fb 2 @@ -12078 +12078 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussOneOverR.html 551cad4748927870253c28f41833145f656261521d663a277806af3726bc9b1c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussOneOverR.html 9f7f8e9698ed787bca548674f3d22f3e8d5c78a79a47eaade6920748eb9f1dec 2 @@ -12084 +12084 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussRadauChebyshev.html 77ddca14bbcb5d199553b46915b829efe11249f4a48feeb338f11aa8948bb9db 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussRadauChebyshev.html b8d7174337177d529e912240458c5ea2fa6d21fb24fec8fa1b9d035ae76ab9b3 2 @@ -12110 +12110 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classQR.html 4c81ea0035af778a69141c8fb8f612b51615501e4bb55d44d58cbec8d2490f78 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classQR.html fb5a9179c956848f32ace54942aa21bc2a7442c0133d697253c337c70b1600c2 2 @@ -12125 +12125 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classQTelles.html 719c56a8ae7567814bee8018c13842715cfa1d4dfb7a5b09026505b022c84025 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classQTelles.html c62b678f19a37b4eb91c39a929a0e7a8a9e4fa94e7f4f39970d1f64f6a372d50 2 @@ -12137 +12137 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classQWitherdenVincentSimplex.html cf7642468a4bf0326ac74be61289456c2c11d5db088af6629cf5e17c10d924d0 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classQWitherdenVincentSimplex.html b017a6961a6122d1c1ddc7526e2b83095e738b7030770bf9ba7e24a1217e9730 2 @@ -12140 +12140 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classQuadrature.html 1bf99c4c3d4fe011fac70f0d2357ef78c8a0000d4809deeb0a34f40ee35b814a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classQuadrature.html a405c5b08b5005b854770fd1e6a87670a40d82f076e4e5ceb49abff293e2f276 2 @@ -12151 +12151 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classReferenceCell.html 53d62f62b00fc2efa042a091b2e153c58755212d90917cf10005bf1681bcc355 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classReferenceCell.html d02864516699652a181f328eb9d9e8044d2eba3b6b221a6c8518e160037435f2 2 @@ -12189 +12189 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverArnoldi.html 610a6a136e62f27a2cc1fb78786225ff6a56f8d7acd4aa346a5e8bd86d7d6140 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverArnoldi.html bca4ec4f99925ce2c6c352b200e2e8fa28626c4876460d7954b75ad45449cbee 2 @@ -12192 +12192 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverBase.html cc14ec6fad48c55f1aab03c71df8b13db317d6710a9b1615084b26f8b2320be3 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverBase.html dfabaa77eb662a9992ff2633e9955318ab2ec20c47b4cf44df5061af866855c0 2 @@ -12195 +12195 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverGeneralizedDavidson.html a066326f1bdb1e70f751ca0c5cf94a4dc6ba096c32882dda0bf7cffcfa0448f4 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverGeneralizedDavidson.html 14cc610eda3d9563adc11fe51f756cb289a6d852d27fa5460610bc7cf79208d4 2 @@ -12198 +12198 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverJacobiDavidson.html 71c991306d745246419f44780acbe2fb762089df4d64fa1e4d9e991c5173f7f9 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverJacobiDavidson.html 6060b0e2e28a7662d6eefed01596a55076d706523ca6a8974f47246f7734ec47 2 @@ -12201 +12201 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverKrylovSchur.html ba36b503b2a3bf4403db60888550efb26cb8ec9882757d24df00d12eb89ed81f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverKrylovSchur.html 4ee029fbc2eccbc1df232e8c44b3ff8228f7320c4156394d33bf7af2ace380b9 2 @@ -12204 +12204 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverLAPACK.html e5dac94abfdeae9608fbdf33de462d133f64f2d985a3e2ade8753dab03bf3301 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverLAPACK.html c9dff97ea2fc94c68039def6f8fec620a50005877834afa4a1ee03e899b0d646 2 @@ -12207 +12207 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverLanczos.html a5a6e55b63a6c5f40afb5a6e3673999079bc68f38a7ac00c5f7cb22bc221bdbf 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverLanczos.html e5e77126921a16cdcbd71512fe1f4183005ec8a6d1911611a28bcd6443cb13a5 2 @@ -12210 +12210 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverPower.html fb4f8bdd18c44ae8f75e23b36ac3e324018c2623b2b127920167e1c70087038c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverPower.html 28d610ecbe1a470b2201822e7eb5b3e3ad1cf07c0cf431519aaf2e134ce0fabf 2 @@ -12229 +12229 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1ARKode.html 3cb344f76bba75e11f121ae359754a59c366b4c02b769e337390422b32f9d991 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1ARKode.html 191671896957b7218ca34a7e92a29b96629dfc53e07c5f8265f72d5821e4c45f 2 @@ -12233 +12233 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1IDA.html af7000d325c25ecff4e1e764439eb58cbbd2dde75efceb202768ed3e5cf0f5f6 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1IDA.html df75e397594bebcf0d22f3ac6fd108055a72858279ca2ad537a8ab88f3833aae 2 @@ -12235 +12235 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1IDA_1_1AdditionalData.html 528bc7e4780c289a35f36c1004cbcc51f5fcff609a24e21122cca0716bb1be61 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1IDA_1_1AdditionalData.html 89c1cfba9fc7c48552038abdc56c272e923d6b2a9210339216a14e24a57d74a4 2 @@ -12237 +12237 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1KINSOL.html 3e88b99c52b271db29c84ebfa26fb5c5a8058e3d2fa91696a5aa6e2e7c0c55d3 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1KINSOL.html 9e83a79254fcf8c4b340f6e212b6168ea9e417965d047cb7a70c40509661c9a0 2 @@ -12248 +12248 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classScaLAPACKMatrix.html f7ab6f15fced01be807cea1ec1a22994ea4d2a6951a942f5b29177475e1a2ed3 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classScaLAPACKMatrix.html 90d4b185bf888b85211d9dd4fd1f0834231e5f7a4af55f2bd1506737625f94e4 2 @@ -12251 +12251 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classScalarFunctionFromFunctionObject.html 8a9fb9b83cfaeb688362c3fbb8ecb33a30bc2b683cde0c2d68a088740a07cc7f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classScalarFunctionFromFunctionObject.html 6344e95ae8cd5d292f8a0150b9cc81e4e0ba2207332265ff70d6c7bdf30732a7 2 @@ -12269 +12269 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSolverBFGS.html 8fcb1623cfae3fa288f4e437d6b887a89c166d7f373aa15fce6f6776d130ff7b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSolverBFGS.html 4479d5b020c25ff407c732db11ab209e6694b00f8793cda0c46b95e1cdeff65c 2 @@ -12290 +12290 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSolverFIRE.html 5fd0f6a159ef5a0c98e2198f46deacfdeaa7c2e9240736e62ef6bfe0b1570229 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSolverFIRE.html f386e63760d2a2a85b9dd3999e0efb9d42c6d139ad77eb5720cd649b26fc3e6c 2 @@ -12293 +12293 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSolverFlexibleCG.html c2e643aab34e024e671cc507e680e66c0ba9da0b76724c38b98966106f5271d1 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSolverFlexibleCG.html 62ba3f00b2373c17e0edf7b93bda054d24627e63ef4b0f35c92e69f48f100f4d 2 @@ -12311 +12311 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSolverRichardson.html 8cfeeac30fb1f22f4147037fa5da6a75e398538648685195507f5f05a10d93b1 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSolverRichardson.html 83e908a1e1c88cc16c7a8d5f4b404f8776b8a47aa889a720038ce85fbedc03cc 2 @@ -12320 +12320 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSparseBlockVanka.html c3c458b07a75f9dcbc05bf5db3cce0b4571220fe38010837a5f5b006a653049a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSparseBlockVanka.html 8341020f9f5a94ea2661d20e415e44cba8b4ba7f519b6042b43a802f874f1ac4 2 @@ -12323 +12323 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSparseDirectUMFPACK.html a0570d12a352d02a11f07df807235154d99e43fe6c9ba2358647f5674bd05d9e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSparseDirectUMFPACK.html a5544592fcb9ce7d365962ad9ecf97f96205f13bc3a646553a0060ad686aaf06 2 @@ -12327 +12327 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSparseILU.html a1c1f7232629bdb3447d6afb7b9d186bc486421162560623e5ff48c16fb5cde4 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSparseILU.html edf4678b760565bec9f2703c64b8a753081299e58c0f6d80dccb59a12477f17b 2 @@ -12330 +12330 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSparseLUDecomposition.html bcb23af823fc6a66f9abba0efe1d130edee937cddcb7c39682b3ac7d51e6d4d3 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSparseLUDecomposition.html 42b07a9608c77f69dba7e9ae0e7c49255a11d2966016bafd499e40505a7d4811 2 @@ -12335 +12335 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMIC.html bf09e5d0e8510792a48ca097ef975add96a0631f22c43d5a1492e36cdfbd123f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMIC.html fb66884d65525b9e70aef4d12e04b248c7810af71893f2c9d172d0c2a29414a4 2 @@ -12338 +12338 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMatrix.html 364b51f3cea8edf727c9d2ccd337e4f78877efea8fc716e3f9c72b763684bb78 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMatrix.html 243202678585777fd7625ac13faa98cae1d3a4a8d4acb5c5cdc033d1d639155a 2 @@ -12340 +12340 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMatrixEZ.html 5b0c99d88b53cf68dcd4dbe729a5cdd720222577e52c464fdab98efead4d0114 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMatrixEZ.html 59bf7def87a936937382dfd346d1c675c7ecac093067d9fb5bed2ab5bee98037 2 @@ -12358 +12358 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMatrixIterators_1_1Iterator.html 6408a0dbd8c373c2d38d46c7d8d96935b343475e305bfc61dd0c4d205ea8d8cd 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMatrixIterators_1_1Iterator.html e6bcdd90d8e25ce88ba3621af8560800744f72275608e944fb6c83a59b6665cd 2 @@ -12361 +12361 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSparseVanka.html 135eb43a7ef400a91ce322efcd6fbbe2de065bd68f27afeac991d7ab63ab3f75 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSparseVanka.html 55e90597c8cfb59077b206953e651f11670843a7bb0569b7d114151b34e82235 2 @@ -12366 +12366 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSparsityPattern.html 528b02fe97c5c78a274aaa5e9c1a0c7bf8b968d54564a510a7e366bacbe28cff 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSparsityPattern.html 7ae9138d3df50ec16591104e4b46363322b24603c461f47fe3ab18322ec90354 2 @@ -12374 +12374 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSparsityPatternIterators_1_1Iterator.html 94e7ef03689217d1cc1a8f49da95e6fa9fdea12c42c9bf4e2b860faa5c7d5180 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSparsityPatternIterators_1_1Iterator.html e17fdc68759bc34782993eb1870f65f8a4cc85d915b74265ed12026df5cc499c 2 @@ -12378 +12378 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSphericalManifold.html b7d3689c8c311407e0c1e940856625bdf200f5bfc6e4ccb13a0981568fd091cb 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSphericalManifold.html 13440622948a45f0bb719d71b036c312984c3f29fa9613796c2cdf7021963080 2 @@ -12386 +12386 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSymmetricTensor.html 7c540e24b715029af37e9a936a993eebdeb3b05950c8618f2148c5352fc50f34 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSymmetricTensor.html c148a5de962a31f757787bbe80e28f67df1d20383d032873b86657de02e1d45d 2 @@ -12390 +12390 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classTableBase.html d8dca06bb263efd7ecb98a2e599b94cde9d3548fd84fc8f64181ca0d240c4c3e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classTableBase.html 276c884a9b7ff0c9a5ccec41cd867f38b85c5dd25ff0aa84b22f329c8ef88de0 2 @@ -12393 +12393 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classTableHandler.html d97277f6ff312775aa10d9c82f86215ebeb2b8f62756c395fd280b0159838767 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classTableHandler.html 9372a95c25861dbf085ea58481c6f83eb73ec658177b01ae31abeeb038d6116f 2 @@ -12421 +12421 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classTensor.html aa58b6ade7c3ed17029876681655af3e948fa2a9e3f2eb47365fd833fd67f765 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classTensor.html 1bab0c28fd86feed5dd9dae417ab6d80b47039e69a68f7ed712678295aba6d8f 2 @@ -12460 +12460 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classTensorProductManifold.html 7d7d03991c064b5bc57808007f90d8a1ec570cbbfea5976e69fb2deb918d7890 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classTensorProductManifold.html e1697da1d5321bf765af8db66ef9a06a6cd4c8bfe131aabe8a9fecefeaf7a9d1 2 @@ -12463 +12463 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classTensorProductMatrixSymmetricSum.html 29cc9b416a9183eec6b67c268bc80ed52924085b771b81ed1f64a94e4464122d 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classTensorProductMatrixSymmetricSum.html 8b6dda83523c71c75f1fd6ff078b7cf31ae9c423ea3c049bc98e9d52fad9a355 2 @@ -12469 +12469 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classTensorProductPolynomialsBubbles.html 8afd2ea3d0d9d8d1d04eb36c264f30fa43e837a4e511f073cf0724c66083baeb 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classTensorProductPolynomialsBubbles.html bbe7c006403d87926472497caf271f3bca66ec31b8dec79f7217cd695e73476f 2 @@ -12540 +12540 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classTorusManifold.html 51f95560655ca4eb62082dc42b1839492043861c1f0941adc3d2c748fca4c647 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classTorusManifold.html f377912b9ebd66c471d24f3ad2f4600fa6472de46922bd06b827e19ae6fa6854 2 @@ -12545 +12545 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classTransfiniteInterpolationManifold.html aac772518ead946fb0978fa2d0315e4587f00bfa6548ad3952e2670dcda7a2e4 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classTransfiniteInterpolationManifold.html 7addc4727ad85feea4640fb5ff0bafb540172f3bb0f9b58109fa3ce2f7b03e8b 2 @@ -12551 +12551 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classTriaAccessor.html 05bbfbce13d3bb3cac2c484c44e376375043c7080fd870435c52a7f08612752e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classTriaAccessor.html cd17b94bc46ec9239dafca158eff6c9824a6cdf1774b8aca1b2d7f796b604c6e 2 @@ -12556 +12556 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classTriaAccessor_3_010_00_011_00_01spacedim_01_4.html 8abf74424a044d367effb54eee0c4873cf09e22cf5e8269ca2ef099c4fe714b5 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classTriaAccessor_3_010_00_011_00_01spacedim_01_4.html 85b87355ce9eecabceb084eb0f45371eb95e7baa0803f81a52e91ca5c6c2b011 2 @@ -12559 +12559 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classTriaAccessor_3_010_00_01dim_00_01spacedim_01_4.html 8902fb42050e33cb2169304174148c3e9f70ea32e459e1b10afa6d29c950a11e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classTriaAccessor_3_010_00_01dim_00_01spacedim_01_4.html 20f44b2f19e51b38d1415a179793c138c1f410671bb04caa82aa4bb9b6667126 2 @@ -12571 +12571 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classTriangulation.html 491d2a547e0beb85b7dd27d40ca8543e26a5d3ee5b5150031932e91603c114f1 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classTriangulation.html f7b7fc7b5b19a14a0847f84fcd3b14b78aefe360845cca40b00e2108fc14a7a2 2 @@ -12578 +12578 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1BlockSparseMatrix.html b9c18c9f4cbe94c9c71b05fae9406e799809bbf2b823f234a361b8dfb14ab64e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1BlockSparseMatrix.html ed61ad2738b2e397d0b8fdf87cd73e80eb1c23b562ce382832b48ef989616ed6 2 @@ -12584 +12584 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1MPI_1_1BlockVector.html 6b5132ce03bee843c63c347bc50997a5640d15e898e7ffebc3e46d89055d316d 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1MPI_1_1BlockVector.html e3ad24afbb37c326c3f928184366bffeec23694e48ad113a50006732f8706d63 2 @@ -12587 +12587 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1MPI_1_1Vector.html 5999d4d007bafc69dbeea19dd7081dd6a3ba7e2c7407ad785d41743ac39f24ab 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1MPI_1_1Vector.html 44bb2f3e936913576edf14d4d369f9ef9c68d654ab5b71749c947b12e54a22b0 2 @@ -12590 +12590 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1NOXSolver.html e4175d8063aa0f8e7133598172a8530834cd287e25771d945e2b199f8a9d1173 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1NOXSolver.html 8f946dee3c3d103a515ca300894d241c17dae0e611d9a0e2ea79f2cb7e928327 2 @@ -12658 +12658 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1SparseMatrix.html 3a554bad03e9efceaf47ac0f9498a0f7c572233b1c01a5a74de160931770cd09 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1SparseMatrix.html 1542b3382914419eacfd1697289b777fb2f4e0bbfde536c03c5f0ad46a118922 2 @@ -12677 +12677 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1SparsityPattern.html a39b7abbed092e4d70bcc9b830aa19ba14a68d64c569912d2b9cca2e17187f4e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1SparsityPattern.html e456c9b3b2582e968a69a4eca678ed24af84b28d79420e0f685cef4e626ffdd6 2 @@ -12726 +12726 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classUtilities_1_1MPI_1_1Partitioner.html 89bd339f98853285eb0a8e9d2b84e2a9c56ea81f6d537340a2c621c2d5c51e35 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classUtilities_1_1MPI_1_1Partitioner.html a98f805e3ab09a7071be1db5bff532b918caa5f0ddae529ea824d1e519978df9 2 @@ -12729 +12729 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classUtilities_1_1MPI_1_1ProcessGrid.html 7ad081fc26956a342f0b523cffdd356e8d7bcd562df2c975fc04c0552f143930 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classUtilities_1_1MPI_1_1ProcessGrid.html a4a21f40bb7939f50bfcac13c5a738cebf07c96cbbc11cd8db4b36e58d1cf48a 2 @@ -12741 +12741 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classVector.html 818f6fe5f639a96c6ee1052406ce861b8809c820e02e02c78aba5cd16034a1a5 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classVector.html 5d4818ef9401d87664487284b52e2685ba8c3cf388d8f26d93a8b022da0a6d5a 2 @@ -12743 +12743 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classVectorFunctionFromScalarFunctionObject.html fca3bc5397c678b34cdbe6327516e701b83cd264bd97f9972b177a13fbd1d6e3 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classVectorFunctionFromScalarFunctionObject.html 3451efddff2b7bbc3008ba6831d3cd69eaf5229abe3fc5dc87c9848c5dd74dbd 2 @@ -12810 +12810 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classhp_1_1FECollection.html 8adbc326b8a16ccd925c40f62eb6c36b048d7376fd9f63c6f54880f88d68ec68 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classhp_1_1FECollection.html 255ea33900ede4e6b3c79a9b0486610332076fdc82277013f8064dea6adbf498 2 @@ -12932 +12932 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classinternal_1_1MappingQImplementation_1_1InverseQuadraticApproximation.html efb6eb07df9f07040034d88c126f09957bea718f63b3609e4a56975eba2dc3fb 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classinternal_1_1MappingQImplementation_1_1InverseQuadraticApproximation.html fb653d56ff7ff22a7bb15d6ab8b67294526e46af1899d38eb5c46e1d1519de03 2 @@ -12983 +12983 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classinternal_1_1TriangulationImplementation_1_1TriaLevel.html fb371041558bbeafcaceb8f0a15e71d12cadaa66aaa7382f906ed974dc185c8d 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classinternal_1_1TriangulationImplementation_1_1TriaLevel.html 0653119451597a6b81665ff0f82f6ea04630fad2ef948a4c015d820a687f7dd9 2 @@ -12987 +12987 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classinternal_1_1TriangulationImplementation_1_1TriaObjectsOrientations.html 14bd367f8b00d91be37195e3ad4c04a7e0af7973deca91172ae51e8efdd65774 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classinternal_1_1TriangulationImplementation_1_1TriaObjectsOrientations.html 4d283eff261cc9f3b9e0f9839237910bc633f393b8bf5d879fc2e101719c087d 2 @@ -13005 +13005 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1DistributedTriangulationBase.html 6f27d32626e78fd03ee287c4fd39963c914074571ce3f760c5c02abe66bbd577 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1DistributedTriangulationBase.html 7cf337d165eae4f42aa94bdadbea36ae86c3dfd4a21e4277f1a73979cbb94c20 2 @@ -13010 +13010 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1TriangulationBase.html dc46931c17c247c6a278d025068dfe123e9ec42ea3875619f478024d5da437b5 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1TriangulationBase.html 3fb2940f87c46407abb9ae5f88f3001f270046d5b7a9bb23e4e60e3e56eba305 2 @@ -13023 +13023 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1distributed_1_1Triangulation.html bccd25e6408ed4864079df08ee0da331daa243dac5041abf4084bad72d66e4ba 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1distributed_1_1Triangulation.html 2627acd8b9e29ae7a801642ebed59214025b01ec83079d0a3b7957e42d101fb5 2 @@ -13025 +13025 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1distributed_1_1Triangulation_3_011_00_01spacedim_01_4.html 32c1e3f2d1126f03359ec0963f6af48f04b5cbcb992b1dbcd4ff9affd00fcafe 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1distributed_1_1Triangulation_3_011_00_01spacedim_01_4.html b0e626aadc2fe4c4166df1434fc9f6f217cc219039e249510c9da73f8156cda6 2 @@ -13032 +13032 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1fullydistributed_1_1Triangulation.html 146a049ac79863809893cee87395a5b34d3f4f7d8e60c90a760fa578b477c05a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1fullydistributed_1_1Triangulation.html f41f4a9b5890ba23179a9e6a1034534543b32237f10cfa61a0a7d9e8beb0fa39 2 @@ -13037 +13037 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1shared_1_1Triangulation.html d760d1f241d00dfd9ed94a4ee9ebb27770bb5e36932fa5c27bb3c5a2f8f642f6 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1shared_1_1Triangulation.html 1e0b528377120a86f68f4648c76464cb8248db374c24b6be339db14fe82e06d6 2 @@ -13213 +13213 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/deprecated.html 50fbfe23e50d100c5003a9a76766c2f9dea0b2d9375f7ef63bb98d444f97c74a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/deprecated.html 9014639ad20a5fe9a555e7d19a31a3af1b37855e550e89abc85e9aac442ca253 2 @@ -13218 +13218 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/derivative__form_8h.html a08f46e73a568a2ae93fa3d821c5e6465c803c59876caf5c45b94ecab99af7b1 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/derivative__form_8h.html 584a2df8d714a4995e25d1574e5b717d581e98113f60ddd7c8488c511894818c 2 @@ -13681 +13681 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/doxygen_crawl.html 66c638b2fd7e0a863ace5b9d8c94ca0c045648e20ecc7b81718e297c36dd70e0 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/doxygen_crawl.html a87a6ea7fdefac7d048f853d4baba7a53b5da9829a16c0cc594da9676033d31e 2 @@ -14351 +14351 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/grid__tools_8h_source.html 164582b635f4f06f12dd4b57e9cb22fba441098102ea8f89ba68904e8efb5ed4 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/grid__tools_8h_source.html 47f601766695ca7452c5af37ad5daaa5b6ef332fa44538972073c50c5733590e 2 @@ -14366 +14366 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/group__CPP11.html 47e4e103b4449352634293bc2cfe6acc59f3d6bdf6253210ea21a0595115782b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/group__CPP11.html c89a7abcd95f68b857cf43849a4abb6c5f6d9d1e6523bf93df30406cd99470ab 2 @@ -14369 +14369 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/group__Concepts.html 2cca334bddf268b8f62e60d4f0cef91f744f84f7180a43073e52e14e6f96cc28 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/group__Concepts.html 0db63910a29b8fbf33df55ac9b131254e95897cabddb7338410c888a8e8a7055 2 @@ -14371 +14371 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/group__Exceptions.html 67a67de01b198bad16e249d6e27560a43591edbc74726929a1ae0c57f5dfd187 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/group__Exceptions.html c4a5664e41f3d85d4d8fe30c4e9e309555c9d910e262877c26f0af25ad80a77d 2 @@ -14383 +14383 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/group__LAOperators.html b79254d06b5bf9acb84034bf1fef0efb837e4128c0d0d34a1df94200d6773db3 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/group__LAOperators.html fdd8fa21e36cf931facba2cc44ad52746e1327a79bdd0c1c9c1bcb35abec0d97 2 @@ -14411 +14411 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/group__UpdateFlags.html 61a24342e263df221a15f94207a276d9b5e22f6a6b95d4e979d36dbaf8617daf 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/group__UpdateFlags.html b5dc8102a277089c67aa18cc7d7166bac0c90990504b44fe8ccc622bf9010af1 2 @@ -14416 +14416 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/group__auto__symb__diff.html 6597c4060f340dd52de7e39cf973adf5b77967232f358264890c7635b8e6c3b6 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/group__auto__symb__diff.html 5535a54a323a9a51f44c1fd3adb925a2234ab055bd9dadb308d5fa57982dba69 2 @@ -14418 +14418 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/group__constraints.html f1b312168b5fe15358c0a2edbd6203fb5345c32547afeca5b1be37b535f8cee2 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/group__constraints.html 093b8bfc6e050280fcdc92615d2b4b6a94d07a6f6f311302f1a39ebc5c89c060 2 @@ -14428 +14428 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/group__feaccess.html 1ecd81980890b42f3a4e15c706ba88b39aa44824863a81caaef6e4f1a1780f99 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/group__feaccess.html 1f587589da5a12586afebe8112cbca43d96f0b024ba329a0f8d9dfd068fcd95b 2 @@ -14442 +14442 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/group__hpcollection.html 933e985c61ef84af1ff0ce73eabe5c7c96854d47aadfd899eef9f3d774f8cdae 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/group__hpcollection.html 1f72f1cf397bdb97fc4cfedfd88410678843fc69d06b4a4f041e1407e3b1df80 2 @@ -14446 +14446 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/group__manifold.html 6f7c5894dafbd0a38e3e2432ef9d1a768134ded07a504c66f10499bf111f0407 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/group__manifold.html 7d78481e8e7587e5c2e9d041214cbfe828e84c73640202b3c4de9d6acd2cd3a3 2 @@ -14448,2 +14448,2 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/group__mapping.html 940bbd1eaba1dbe5ebc53936fec97ffa4662db95a449534016d3beabbbbfe403 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/group__matrixfree.html 465894e6beff1c50754330cb426640e1bd1c62ff2d80b26b38fcf1ac836f1062 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/group__mapping.html e9beaed29ba53587348f65ed7e884ea8efb39cc8554d0addaf04b285a911a2fd 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/group__matrixfree.html eaead8435fe14a2522cb22b11e7dcf08ed04ba7c1e3d05333029e8639843ba67 2 @@ -14459 +14459 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/group__reordering.html 3c67b8ec74ab19d7448420abcb29b53622278d0984ba7b39ffe446b6ea227c5d 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/group__reordering.html d1c1d718f4a88573815abbebc9aecbbb752ec5575c6f9559b7cd3d67a6102d27 2 @@ -14464 +14464 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/group__threads.html ce05ee1f37a61a9bd405807588eacd20d5ab324206e0336e4bfd524f3d303b48 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/group__threads.html abadb726f5de5c5c0598274e89a7c59a980f3c8df0027be12af8930ab36c031d 2 @@ -14468 +14468 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/group__vector__valued.html 237e8b4066d1784087eaa98a35b858b17cfff8f553772f8836c2a95ba309b0dd 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/group__vector__valued.html 0a305245f37f4f26bf1984376a696b7b146a9242007bc7e0db9fcd945554fbe8 2 @@ -14561 +14561 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/index.html c338cd54f7f156f1dbc7e48bab35d21cdfd46a1ca5a9bc64c350b3352372f136 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/index.html a1069dd00c28558b1ccd41777a623434f791e8b1e0fb028d18adb4c14639308f 2 @@ -14564 +14564 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/index__set_8h.html cbabce40553f8024773216bf4464a5de7a80aa8c1ccb22b128121c89f6f3abc5 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/index__set_8h.html f7b3700a69ecfb2da0dd19de5ef67e0b4825a1366db40c0770e023a25e86199a 2 @@ -14899,2 +14899,2 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceAdaptationStrategies_1_1Coarsening.html 6fda94bf347cc22e2c987802fe46d22d5f232b0432589a182ee5d90d92b1524b 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceAdaptationStrategies_1_1Refinement.html 5b424ba8b707c0cb0636dcae42e5540ab5b728521995c1806006b5f712962cbb 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceAdaptationStrategies_1_1Coarsening.html dfa4f9efb5b3851c6e81b91e76f94f0f2ad7720aa6a2550013a98c353109c4f0 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceAdaptationStrategies_1_1Refinement.html d08ab0985806590c0a2ad9db8f6258240e1d3848bc149a7d17c0cb15df839542 2 @@ -14915,2 +14915,2 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDataComponentInterpretation.html 63931172adc7527ed19b43de4d5f8d12d4a8dcebdd3a90d94af89904df90f974 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDataOutBase.html 921c1bf01fb7a689b507377d8f7fe57846c962c44838cca2890d73f15c7539f8 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDataComponentInterpretation.html 1f655b55e48810d2727287413654162997f4ec9210b0349ff1bc50ab911bd718 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDataOutBase.html f672614fdc1adb21bd367cfd9def8201f30dc37777eb44797acf17a5e8cb125f 2 @@ -14920 +14920 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDerivativeApproximation.html 52c7cf4baf3535d48c883356c256d65164a1b990371b8f261a175efc1d20844e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDerivativeApproximation.html 2be08b2fa3e2edbc41bbbf6798050af4f23ba1466c8213e9b1b4567472bddf49 2 @@ -14926 +14926 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDifferentiation_1_1SD.html 7cf99e9cd036c6281b7498c6fd92a80d71df5b5297ec128fccd2b6ae8f771504 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDifferentiation_1_1SD.html 4e62c4329b6414a3be08dfc2853c25bd0ba6f7b9c9ddb3c997e55ef3ad86b6bb 2 @@ -14931 +14931 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDoFRenumbering.html 0ca226212b1a76003eaa93d9bc98fdef7557b7fbcf184dabaa6d06ac248b69a2 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDoFRenumbering.html d799e3df85b4a8e512913b614a1a2bfb98c697e481e9308f18dffe9d33081171 2 @@ -14936 +14936 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDoFTools.html ca482edbc9e88fa83d518bd37b245a9000d6cde56d50cf1500617a5a19fc06d7 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDoFTools.html 2ae39b275e7ac9d6e5bc35f86fe862e68615d567bbdb8843b76f6189497dce4a 2 @@ -14944,3 +14944,3 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFESeries.html 70d03649452363c5df364c8872f8f90554d75fb2c7a23c48c516e7076b7182f1 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFETools.html 71e6a0e6c8eb4672aa0aa1ef19199e19da261497f23ed6df19d4f9857c23f108 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFETools_1_1Compositing.html beae6ef5524a06d442c316453e82dc2d7baddf701cc28dc7ccc6af4cd8911da9 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFESeries.html 4fb880a4eb6b2cf6f9cb534e168f573d8a3808d42c3779b4f249f398685efae3 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFETools.html c9b5d884d88501d4a78cdb70912810c8898fac9c19c527cc027dbd51cdd0c3f7 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFETools_1_1Compositing.html 2ab80bc9b2db201c70b30b2bbaed7d741dea10537068d4a03ce5bc6e01199415 2 @@ -14952 +14952 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFunctionTools.html 81725c60b31506f6149f860e91d87201559d8fc61106d2593c3f7620abe52349 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFunctionTools.html 6128467bcffc06de7b8a8ee07b311fe8c91fb513e5d54fbe21c8a14bb4431c27 2 @@ -14957 +14957 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGeometricUtilities_1_1Coordinates.html 43119b64fa9a3765f43a74d525e41cc8f24ca6a879757085723247418e5ec42d 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGeometricUtilities_1_1Coordinates.html 5ffac243224f87a519a14b00becd07363e7e5e90a429677c41f7fd011f560f01 2 @@ -14960 +14960 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGraphColoring.html e03adbcb4fe4d839445a47ffb11f1f1ee1ba4a139149ffa9fb6b631c50069f7a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGraphColoring.html 3df6561f76f472e1a54c9860693111ac77bb5825b6a62bc3b87ef536e44ef04c 2 @@ -14962 +14962 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGridGenerator.html 76a6d29e61b1ed44fe469c2d1c74d1e3ac72cfaa2c9f632c2ede85a3b9e750d4 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGridGenerator.html ff9e142ab5a298ea4253cea8d4f8d921d813cfacada531a36abacd279af86244 2 @@ -14965,2 +14965,2 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGridRefinement.html 352190e962dbf1e694b9e93bb22bef5a1bede415c0bc188c626318c8d32b3669 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGridTools.html 57f6a09d32832aece457a9237127661e02114ab94a2681f537ff4cd4586d2c82 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGridRefinement.html 6368c6c20e30d51f7ed02a5a24050275fef155b39c86a7e0b2ddedb101b131ae 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGridTools.html 7c26dcd7bd5945b74e286973698c0ffd28fd203393376304a648330639bafa5b 2 @@ -14989,8 +14989,8 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators.html af7b3f074fa10f5b8c62b0fa07375a2490fedb86b18068a4045abab927bd05ec 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Advection.html 08a16959f213bf68873a6d76a8326cc1cf205d3222da09a8b09339b9541c4213 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Divergence.html 9ff1cc5159708f743f88c1e97b1c6d432aedf930d0816c4e3666fbcae6e300ac 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Elasticity.html 685837bde816228721da790afca1738d6b5c8e05b3e9c1696587258108584c87 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1GradDiv.html d5b712e4e40fd0bac4c95c0481c29452b0f8cecc760e1bf6ba78a582504fb7f4 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1L2.html 22965c28188ceba811a45f0b6a6100714cbedcf3b415600dd8ebba7138d1336d 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Laplace.html 8a0631cc7108787ada2b08fee391a030405e476c4a29c1cd993720a4486f5f05 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Maxwell.html ba77bc930db685af7d05cfe64b9435ce2b5068d34369a7918b79f5d046805f0c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators.html 33e2e7c4d16b0b01aa1c6536e948af651b3a41d892946279889f5cef00aaca9b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Advection.html 9f3e0db11fa9452d1a16e3bac573c120f4b0309b1c82bdcb445dc1c78aeeef6e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Divergence.html ca8d5f2b91985f783003af7deddd0b859cc238550fd9d90c6e41d0c5e28ff3a6 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Elasticity.html 5fe8c7a4197ddcd59f546a868365fb305917e80fe6e86c8e5bbd6f9deeaa5e60 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1GradDiv.html 5fbd6347a6b8c3bf7be3d2e0418dce3df31f78e4a658a10f41a6fc5d3a0e2999 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1L2.html f1c59ad7653bcf97e46d32b062a87be5b00ff2b65d1178ba73d523c825112b8a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Laplace.html db202d8e145acce946f36500f151701a98dea8df972551ec07b68b4fc493c97c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Maxwell.html 3ab75d380f8773e103e3cf526e7e98bf5159447f2b60267df943b907c867c095 2 @@ -15016 +15016 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceNonMatching.html 4a87f46f82cc0012359e2157b5fe88e5d98e1ff22c7735e9690ee1b38668979d 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceNonMatching.html c766d840e917fe2f8e46dd323fa00ef4e1cef5fdb8e9b08965d54e8096ab963a 2 @@ -15020,2 +15020,2 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceNonMatching_1_1internal_1_1QuadratureGeneratorImplementation.html 787c19b216481001deeb660e5588ed1809366adb756aea82d5f498ec92dddfa4 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceOpenCASCADE.html a7a1dad00e22024c6793ea398f717bd270a6c31fbc3025357ee52f7e1739d8f5 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceNonMatching_1_1internal_1_1QuadratureGeneratorImplementation.html 9230accaedf38e5cf0f7ac2a3569a27d7dac557684c715f23fe84e7c40b12ebf 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceOpenCASCADE.html f4dd5aa81c1e3cea1d4ac5e505b1180c557d9951c31bd8977c2fbfb7eaad2521 2 @@ -15029 +15029 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceParticles_1_1Utilities.html 89e75469e4ed48595cebae49a746e792bc2fd77fbe7cf07614efeae8c198e6ea 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceParticles_1_1Utilities.html 29b355675d56e1b1e4754c6932b9c61d617de4171f7aebc75a421915c5047f7f 2 @@ -15039,7 +15039,7 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Notation_1_1Kelvin.html 33e1f3e27bf020f87f39946973a90260f4cb4f44a248947a430a2787f271e47f 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations.html 3512e0a9d0740dab0147319343b4c2e44bb999f08177ef99d2b87fd3fc03cf5c 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Contravariant.html 84cb9775f95f0e827ac6d9da8335c08a38a5653e940eb12d3c375e5b699e8dd9 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Covariant.html 7e32e39f85c3d493f6cdfa3e16cc9f88adcb6cbd88aaec8763123223c5a0ee4b 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Piola.html d36f96b4c0d4c3d1bc76f821aff5589464fdbc505b2d81fb38539202a2a07f2f 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Rotations.html d5e2e4cf2a6230b6fb0fa34389de6444038ee16567d3c63c147c0cf37eb22f31 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1VectorRelations.html 1285429554c09a9970e4f9e9e58f4a914282fc3725f3e760029277b8159c13bf 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Notation_1_1Kelvin.html 3724ec9e9362e38b2446e0ccf6bcd6b89c552403e4627d77ea3aa1508aba6344 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations.html a36b8665158efe66b6f927c2e8610c7b64614b10bb728ad62d6fcbb180594a3c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Contravariant.html 2f14413ae1a5ecffe12f6f6e1e425cbd50cbd31dc8ad12d95940bb759ac67eaa 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Covariant.html fdac09aa9f25228985f6ab2fd225162126af0a4c048241e37b48a2eb821cf008 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Piola.html 0252dcd979df8e5b39258f56c255a474add8929a6f28f07a0cfd2cdcbcea4a46 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Rotations.html a819d3a08a753669e038342063607b8ce868eb1e9585c7a5a9e63d59cf0aa605 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1VectorRelations.html 61d6ad379a2d0bbc9cc3463a4f7f66a854b3cc7b6cbadfc3e5434adba3c8c094 2 @@ -15053,2 +15053,2 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSLEPcWrappers.html 7472179f7c18ac4d9ea8c7e5f9c2d54998b7ae1adae54bf2577eaafc08aa460e 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSUNDIALS.html 01ba175258e52234c5487c1b39f216b88aaf7b2d402e692a728ac463c661ad59 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSLEPcWrappers.html c50d96aa59db576898c8c05ba1984c056eadfeac6a203a0aa21c102aaae1e4eb 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSUNDIALS.html e0481e41b256f975a41a778d29f53f26a5615dc2d9558324af23f67fa0f624fb 2 @@ -15058,2 +15058,2 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSmoothnessEstimator_1_1Fourier.html 4a19e30e22dceaed153cef1123a04e466d17270d04c711ad68021dfca6cb0537 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSmoothnessEstimator_1_1Legendre.html 6b5d6829fc06382c0b2f9f84a9a8aeed556665fbb218e4f828a09dd3bdd11d82 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSmoothnessEstimator_1_1Fourier.html 9eb4448576d242bc92b18e2bd08ec687def7bd8e03f0404837532080dc0f8ab1 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSmoothnessEstimator_1_1Legendre.html a8b40f0cc1e1fb6fbb5be79e00c3a79caace05feab0ab243a4e82b41fa265906 2 @@ -15061 +15061 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSparseMatrixTools.html 3c581f2d0d22951b9c48f708fd6c6425d045b17edd93a4fdec4e42996cfd5dec 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSparseMatrixTools.html ec50d951dac2be06cf3d17dd0a2a889b0cf807f9afca673dabfe7a7559d4f7ee 2 @@ -15066 +15066 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceTensorAccessors.html df32497735a04071c5a5b4dce6b3d3233fefc6bef4c9212dca0e7af9b8091d6b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceTensorAccessors.html 71abdbdbf61fe6d60d41aa711555c311b857fd8d157d741e4fdae4693703c4af 2 @@ -15089 +15089 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceUtilities_1_1LinearAlgebra.html 3eb2c7f313b4825bc18d5bbbf42483288ae98b3e1ab9fc3e26e5d396220657d0 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceUtilities_1_1LinearAlgebra.html 1284128b98e9b3a6ee9b3af25a2323f7e36ff4cf395aab45546d2130648be276 2 @@ -15091 +15091 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceUtilities_1_1MPI_1_1ConsensusAlgorithms.html 28d376398af1b400d44552ed404e7a75cd4ccd06a0ea2a958a17f3f463b0ace2 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceUtilities_1_1MPI_1_1ConsensusAlgorithms.html a24f85f235223e4ef851c5deda07552d99c8c830f428244ccc79c48c09b0e4fe 2 @@ -15102 +15102 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceVectorTools.html 583de2d8c86579133ec4b0349702675ae24913ba70049919a45de0edce215c8f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceVectorTools.html b0bca7bebc11d4cfd2797cf848522e2e38a664c78004437fee149aa5e00f8c20 2 @@ -15122,2 +15122,2 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespacehp_1_1Refinement.html 4ae5e00154925defcbe3c942e302396583f1bb785362e90e09696739052ee411 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceinternal.html 0c06d4c2f3560214f441e5bb2d6eca6f5d52774129d106c2fd2c394e3ed6c0a9 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespacehp_1_1Refinement.html d01588fc080821a6c7962bbce5e154a0b370090a2f9bccc6a3ada053a15bcf23 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceinternal.html 43a61f57ba06979e0ebb282d1f6a8429804a0283023da8051cfd14f77252a957 2 @@ -15257 +15257 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceparallel.html 890bf304fd19e192c8c44c1b5fe99d374aa4061c3df49e587b95674f80f9d70f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceparallel.html fe28dd45a22e8a909f5d0b3fbaccf0e7a02417a073806b0477b2347ac4489074 2 @@ -16330,4 +16330,4 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/step_1.html 33913046f13e051038b19f6ad451e2e52880fdd072aa338e10172faeb25b10b1 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_10.html 9cfa27b8fca3f26ff7495c5563864381028302764cc8a9b53a3d49f97f51ecd9 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_11.html f184478309f0c7e433a77d38495ea425dd81c6bc9b7d8325b12fda77c4399f97 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_12.html 7a5fb6f666754d4c84f92109c270df091e3d06de266a395ca00498ea13f26a1f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_1.html c87cd1c36ad2d5905c0c2d9a7f691870ccf29ab9b5db1a993d0887c3ff5d41fd 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_10.html b65aec03ee427de8cb4c4a3d43be29146a2635773e5ff5524f5ec0cae3c65811 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_11.html ae81aceea6feacd8445ac9641ec1d53b1e2a5f02be1d332413f322247a6c08b1 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_12.html 3091f2619aad5d50db86a1d860fca252b9c926d95fb7562e21fb8d435987ff20 2 @@ -16336,3 +16336,3 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/step_14.html 91f80a40376b8537041243a5abc9eb4e24ed4c1d4d4007a4b9fa4b514f92686c 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_15.html 25b78310c9fd7c5d87a693bcd432f7da49a6c71c01ef4ebeaa71962169a65f82 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_16.html 4e7a0101be28f3b094f61ee87f0b0e49e3322371ebaae4e2c35ce8fb8683f635 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_14.html 985ad48bec533ce5a393b552e040a4fdf1cfb514905287ca0f054bd2bee4f395 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_15.html 2c9a9a5d10da44a88b04306b27e8e70208e0b3171fa6a14afec0e90e477b1532 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_16.html 9a0b7e2cd663ec73677162cd43f9357c2fea4f8f566ce338640c01e935bd25c8 2 @@ -16341,40 +16341,40 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/step_18.html fe613a328c9f087cad107499e81980476ca1b6559fef0b7ff7df8f675c1eb339 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_19.html 32a51e8f088f649122393ffb8eb68fc75bee65bbdd02bc1b9930e07bbd5a1d33 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_2.html e24af121a55fd41cb6880308bc0987dadbf5039168515e5c39d9bc47a7dbdd8b 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_20.html 77459950b04b214389af79d43251a71ce2ed4eda1b0678086a5b62fc23d137f1 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_21.html 94d5f03342dd605059a4e8743dfeed68fcfd7b7156d219c1dced9afde3a03bd5 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_22.html a243933b149110940797858c327d86989775d7670e0bae21525eded71c2a82ed 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_23.html d620bb5540c06a14168604ba0d86dba6fc672c262469b3f3915284e76fe07e82 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_24.html d326a6fb5aba72b2b403d7649fb4be5b48abe5240d2c59f9ef82b12f8a1b0aba 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_25.html f1e0057ff89ad82db151caa09bbd7c3abfd432f0e240f74e0b30ee8cdba2ef2b 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_26.html 2c9f35bc8d7a1a59660f3628ade8138a79e2c13ede811be8f0cd5f4d1d1ce6a2 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_27.html c9f2ee13a8afc984c562dce31edfe62a16247efb7afaf89a433adaa8626cade1 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_28.html 80591723d3c34a33c736016360d8f6183f6aac755c233bf3b7a5f53642359ab5 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_29.html ba55270d1de3bbb6218412b612ef8036957c742e2d13fc311cde9489a507ff28 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_3.html 4e942eb05317cca9cea1420a8a60e6bf75bb48f5fa3f5c9749c83fb10195b27e 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_30.html 22a799147845d2b34c1924eae55acb3d6abfa4ce30a6b2e9c342c209ad58da92 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_31.html 56555c573fcf06ade5faf9d5d061705774f4e728ac6bb296565ed3b6235e6aa7 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_32.html 7a77673ed1b2af9a69f189668df923f15cf71a456519be5002f8a6ffc97c7edd 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_33.html 122e6bb6f6fe97edb968a5ced48a4db084bb1c252173d728a2ee28ef00e8386c 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_34.html f01014f092242278c11f2862eefdd43f04db59567852059e8dbce3ef953241c4 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_35.html c2f3eaec105d19dbb205245c6266a53acdaf73e4471ac857b9fb4a9606b357a3 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_36.html afa9ca3934f37655593fb29893b8c767b537244f13078b18cd9fb2379f2df6c2 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_37.html 47d4000e6d02ef94e376ed5dee65cd36df9479ba5d4c96c9da8b7ba96b3e21d6 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_38.html 671d2830dc05ba3b9e5ca0759152809c7454942eb70910c70901459e96a9ed53 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_39.html a67bddf36cb701b087dabd01b8d450dbfe963a4196f7509138f0c5e2353c16ef 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_4.html 376cd1c1e55a29248e54723db6a999c3fbb471e3cc5ba515e9b8c0ceaad33d6c 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_40.html 1f194eaa660c732184f697ef072438971ab4bc86f4070119c15bd4ec78dc2264 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_41.html 4a3afb311efed45a463a8d546e58a3012509a48983c9332d8b1d2ed0dc021f52 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_42.html 02e2fc4ad7fc8f1945a0963eaa1e7bd04ea79e0382c77f755bb9d6b93bf74e4c 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_43.html a02423bce8eecc5fa217541c5afc31149c2666c1b5fe46d763823e8196588839 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_44.html 82e5877c8e1d825ab9ef1e02c6154f6f02f8a3f882e49b614b530f870f1380ab 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_45.html 2dc6513618f1dfea0dd69711510ad4f8b6b2bcce7c7894acb8a5de28e698a054 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_46.html 906971239fd8d1dd81e3c1402182c6eb15d9473d31e33b6b84569d73af5ea615 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_47.html 3d22148dc2bbb8a9fe9735fd20f9fb295e2b111caffdf7a913963e0525d1c83f 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_48.html a628beb6dfa335fefe607203d6a9804b528ad7e7cec625ed88e500550f2a2af3 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_49.html 766fa46098f2d3d2f4c89f8281b83be28c97a9b4ddbdd2136e285e765cf6aff7 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_5.html 3491ce91b68315c9cdd708a54cb885dfa40f74e854cddbf5a57fc461736415d9 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_50.html 961834c0667a1c22c9c29c8c17141ba14d2bd0491d0611e591f9244b9379ba9d 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_51.html 18ea5e2df4948093c567483f579edc9ff316bf7c1e1e806e34fe1241e1e1520c 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_52.html 3ba6855676a214fd094f1bb96a2dad7e5d2d1221dd02a5230fb165147f5ae112 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_53.html 3350890cd34c9a7b820bdf715f39429a027641269939fbebef7ac6447a9fbf1d 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_18.html c106d7c79a2646898080637d6e7d62d9dbb96bc1a8012dd06b967e54532f054c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_19.html e485fcdee498919e503288db83dd42e4c418605a3b15deb00880cb178bc49621 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_2.html fb01f5506837ba02900a23abd332e26b115962a2a3dc0e97cdb37f4ffc7f902a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_20.html 84109b84d204a42dab0ae068c0fb3050337813cd909431536d8f36edbe9c3861 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_21.html fe3c501fd0f8ef303080190f5edc9444d8f0723ec41a56ca14458b0dd6e316c7 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_22.html 125b9fc26404f6258892cf6c9535809de5cf64cc03cd38d7fee33df51093fb30 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_23.html f261b0612a30580cd5a35c60e9a4e14c17fa5fb815b90ef9d2e00fe97eb7c5dd 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_24.html 412b978714c473fbb8ee26988c229ff04198d3f1a344f30b866c91fba036dc59 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_25.html 2e4d4a9c059b254e6c264026cbf15fe6cb9280022b446c609f326ae03e17206e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_26.html a2a7165034bc10e68eebf1261e9f8dfa8ec6e15945420c15056a686d0229a2ce 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_27.html babd67ddc3599346483e4982dfbb57ae207ceab788de3329de42eb561dbb2292 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_28.html eb5fc560bc25fbe889bd38f8bc7412ba4e92fe9fdeebbc9e468dbc2ea1a0725d 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_29.html f80d42ac62ce8a670939b91955e2539e8589bae9f835a028cfc9c535364485a9 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_3.html 02dbf1ef07080a7458e6179596cb0bb5783b3e94eaf7e313a614ef8d6564ea4d 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_30.html eecfd6ec1b1d6c216572285cdbe7a769ccc6d7fa7a7d606d816ae3da0ffdf426 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_31.html d6ccf7f3202beaafa13b38bbd3d1c415e12915b15dbacf3b79eef84ea18a96b3 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_32.html f8c791fe08394168e650fab8f2433ea1859ee144a119423ecf6ee2f5f6b0d787 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_33.html 44eec43779398390ddfd53a1da295a4b2e1193d2d1af16b4e524db3d51bdbf14 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_34.html 386650fe72dc48dd3dd15328d4bcdb7328b57c19dcad1a78df25cbd4eaab0f6d 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_35.html 10942d587d7ef4bb06611f9a2618477b0ae224e00045460f7b0ecbe5f57ab12b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_36.html 680ac711cf0d29d36d179add34428ce269c680ad4b4dcdde9270641a8c1d9edd 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_37.html 9f75c743bbbe90ed3486686f53d69b3885f5c802e73abfc2b321f2a6e774f0ae 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_38.html a7dc69917784b29cc3ad15c3b1384f170c08e1c199a7f4912014403d2079586d 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_39.html 93e3fad41312b68d1a3c5d6714e4053b2fdf2d00f3361c709a8a0ae31844822c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_4.html 684d20e1c6e9fe30a248cfe6506cb9d2a7ec83e9a632494ed7efebab8737e34b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_40.html fd0b1c15a123c7ac7479a4071e4f6c382eeab9769f1318bcebb2bca682b0f8b0 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_41.html d719128c4d4ca193bb8ed01d0b4759419f1ff8a8327a3675052ed20ebd6cf4db 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_42.html 8352c6e93f284f7f71a51a8f887c35ee513f09d8d5ea1b12b29cf0b759070cb7 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_43.html 19a0507a01ae89b9e05b31b6dcf2191b56b724ea40518fbce1e4cafafe7c74a6 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_44.html ca103b2329ea4f211034d058c17e289a1eabe79f5f220f7e66cd27d17bc66029 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_45.html 87ef020733bf1d31be402ba75d9e882d0ab5fa5270aab47ae0af95391ed0431f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_46.html 581b5cc84e00d58893e99cd356aa5124c571f93bf36d9db4105c4adfb3bed5d6 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_47.html 218cbced1283aee10530dbf7a4c5a0b80c169b220662324aab26e22b63adf44a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_48.html 13d4889dc3859149e304c617830f5f9c9ab321b041ffe12b8ddd5b4e611e30f2 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_49.html 14e6cfc7c42c6dff66130ef99ec23f17a0a21d21e1a0e42b3e5dab7494e0b224 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_5.html b224844149abe0b5b89b0ae3714342f9067703c2cdbd6ea78dd0e2874aa15734 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_50.html 3930e12927202477ca3d761e60be271e5d3c78960f8daeebad585d29612764b6 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_51.html c32184bd54f49c436d8450e5cf751405e563a0d19e124e73716722f46b694cd6 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_52.html 62fd01a3ffa06acd6e7a76e5d1fb5fd858b3751fe66c2e31fd496453694b3e7b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_53.html b6b8e4ae99595edf11434342958520ef7e3ff797ba2279f84c54cd289958c93a 2 @@ -16382,31 +16382,31 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/step_55.html 9fce7bc6246e04441ecbb43e5d001016856b3e97856be181d098923263794f85 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_56.html 5507fa321c93df04c47d3c812251e8095918c040fd253a19d5381d6aa3d387c9 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_57.html c85da5da11e9d4ff172f6270f526559b9a95325e01970432aecfb0a8b02bb098 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_58.html 8052df39a824f34c82d45c30755793d9c20e0a75b994fca574167b8cffd027ca 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_59.html 5a1cc861525c6b050beebabed56709bff35e15962d964d48b715d23b6fa07a3a 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_6.html ecaeca026f07d6842f3fddafb68ce2e7f443a3dd5909ff2242d7d5930a50d175 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_60.html c96c1bd4fc27b8ee2f09c1e2b0c530eca0f19ec0a70f4ee4b9438e008534117f 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_61.html 1bdb397e23038329d130ac753ba7e8d33562cca5dcb43d8e834004c93add8091 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_62.html 999c3e648f07af4de07ee67a8ddac8169e01f3f8bb4a217a80e8ea75459d59d5 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_63.html ca80b7418df972eb28edac33244067d48c36ac7c3fc73d3bd994571fb79a1318 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_64.html 6c0a1a95b23a301685bb62912399b083b6212fa41436a1c657e3b4f940282972 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_65.html 40fd53407e3219000b751ccd35f1e15af0db69f085b6381d6a82702f4441a5c0 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_66.html 99d1a70baa2d54af97eb4ea2612abdfba36e2dfd8824e25de4ad134218d78b1b 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_67.html 82284fd9a348e33331f8b806c7411fb3c245a7a4d8a684940b0fecab597d1740 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_68.html 88b7e2a73bdd0a5065ef0d7d293ec3809093041b759f87a1db979eb9089ff692 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_69.html bef2859bacb8ba34571b828e82778d7353b71ba58be7130a10dc391882b76f7a 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_7.html a77ed1c80fcc129be799974da9956c05f95b510ef2376b80a7669fbadbed6724 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_70.html 42228bbd5f3a20f3f7d1d8af76777bc84d40a4443b4be84cec73ea66c5564868 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_71.html 6df813b00e3c689cc1d382d6d9c0717e9dcc40dd3dad5f1671ae1fc7dcae06aa 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_72.html 10e4ef772641c16711d45aeb4c66bea7f71d5add8c022e9e5dba95705e662971 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_74.html 0374435d72c5d20665e153509aaaf64a9bbdea9ab4753cfdd1650d64dabc4825 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_75.html e9dcefbbb4ab5a380ebf0fcf3406d68722160e43f48bd888e57122353be0a952 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_76.html 848b3b516b35f231c5038cd57bb293be10d534471a508585fc3acd1d0011e366 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_77.html b412109d4a3015f016e2027eed840bf60c88820d5977f04b0ac19ecc973f3916 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_78.html 03c6203395a46115598c6550e58098b1f22648ca83340499f0b80c28aed343d6 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_79.html 1bdc078f3c0159770fbef3ef35fc7d22f7a363a545c15d153478ee53dcc30017 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_8.html e08d7f52c0368368a1a4b3bb6575946d7d4b9832abb2f1076617b7ce05830a1b 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_81.html acf254e6c9d2e706cefed245b40c67880e463ce150c7900a771c8683c2d05c6d 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_82.html 6d9796275169e6ec0eda09fe37a195852c0223dbec2112fffc15cf37c6c2a540 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_85.html eea93f9d2348fac037bfefeb3fd519a2db023135463063bd228ffc3f411558a9 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_9.html ba5667e68fc370a17917a6d43a6d6d14bbea4a1082db5a84bfd8bd11920e8939 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_55.html 01a8dd73c1f4da2abf5f21adefe042477cd4251df435ef04e6005a9a5fd8963e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_56.html 5c9ecc1a96282ea05d03b560dbfe7def8753079ca0c149e47361345562163a61 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_57.html d056c5e71abb25efd27600bb26fb5109d9f276923c58f05ca5b9ea884dec85c4 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_58.html d990afcd6552257ba7d3c2d94355b1f22eb0dba484999c05ff9e5c86c4a83b13 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_59.html 59a78bc69f78311d1ec260691fe1a0b476508f44b6f9266c29ff893e5a53284e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_6.html a81666e1d12049f19be1f5d23f83824a783f234a943d6e5b2ecc8b950d78041c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_60.html 4944e8cbf945127c4acd5ba59ad15bf72f41e45e41e0878dc5af1177464a5a28 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_61.html 18fcfdc1c417bfd61be152d428a4079b01f8e7d51c7058aea2522d3bd27d4189 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_62.html 42a84357cad3df1708b36ce16c822ca9b4cc03fe70560318aad1228095f607af 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_63.html 785aa1855ae6a990e394df2620e1a9ab1e45aba1ba209d53eb930e1c88710fc5 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_64.html 15f2f5ee78817e25cb55016bc35c2b056f7290da2ab3aaa563f0c5b581d311c4 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_65.html 9a4eca0e4fae5de5f2f2ad09d5611df3786e0976c530a8dbd7616a2e49bf117b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_66.html 2f1eb659b3628152130af800f023e71596fb62c739096d5842da3848c7902c64 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_67.html 3c0b1b761d01a5030c3a266143a627e7d41b2ff3114027fbf6299de4f6bd43a2 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_68.html 31a9a992c9c7dee1b9070f9a15421a762bb5b5c3697e9e454e7975f277cfb9e0 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_69.html a06d388d985a9331b64ca2c2e472520cd97945c8b228184020d3622de937619e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_7.html 3b7669d35ae6457e878a7ab87d8f1b54f69c55217b78123d2de60da073b1720b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_70.html 39c4ab74c426473bcb2179ab7b1231484af144893fad35a5a3d84fa47f41fbf6 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_71.html 7f396b6dddcfd2f3506dc5bf77ed7caf16ab30eb0b7bbef6659c9e9b88c01fd9 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_72.html 147382cf3e6ebab20b9bae2d5f7dedb06ae77b988d157a67f549a1f58fce8998 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_74.html 6c39bbfff65317f3d6646c4221f79eb55f0d176ee3bdd7f50cdf7482582bb534 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_75.html ae3334f555aa2ff6a6f004e20c237aae02b523574d67f4ee16162fbea789e7b6 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_76.html b3cfc2b1c05cb4d22d4eacb77fb288944f137be8cb59e5584fcd3cb76d9c83ef 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_77.html db7168f0e8d1302e1baa9aace2fc58456834ab8ce4003723a22548150d267a10 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_78.html eddb790f183f30cd7e0378e56bbe261463b563381799645e3c34ce3c030ca3ba 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_79.html 2859116cdd79927a71829ba099fa4b6e8dcfaf198a9683a8bf4143501734695d 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_8.html 14d86987d50b68f2a070d13daacec44a8daace6015b504591ce0c2cf1f4ce32f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_81.html fcadb98ce964e9c41d4f4425bf7741fa74dbc01854da9252687cc87ca1f362b9 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_82.html 06367770e43b3fa255158ed23f3b507810635367d02fde4f248097ee750db37b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_85.html da0331f5f0ac8bf58a99cdd577feaa7aae73821d39ac41b041d2810127bf9cac 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_9.html 9f9682ba0605a3173c78531ab8b681e76d78cc2ce81e6353f1e007c1f931814b 2 @@ -16466 +16466 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/structCellData.html b22e47a2a8159bd29c4d292726688848e890e43fe5ca76a93dc073ca74eb9b54 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/structCellData.html 8bb6375e8661f0031a3bb82f32659eaffa4bfb4e79ae2f18bae19bf41c6f5ec0 2 @@ -16471 +16471 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/structColorEnriched_1_1Helper.html cdcbe8611016c7991c3df21dafb71b3b7ffa3b41457b4413db7fa81cc18bd919 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/structColorEnriched_1_1Helper.html 3bdfe1f6c48b61ee36e0a355d17655362932da4a10261581474c442f46b0199b 2 @@ -16522 +16522 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/structDataPostprocessorInputs_1_1CommonInputs.html 4c74ea4d0b6c63c6460978c36d1b4a5330ad5c5b2c08e7c085286325ebf05448 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/structDataPostprocessorInputs_1_1CommonInputs.html 4928c6122c5f1b948e556807af29f3bebb215939bef31069368512f3a4788033 2 @@ -16528 +16528 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/structDataPostprocessorInputs_1_1Vector.html 3d5ce0fd34fe9d6c8a97141770065904830c4a0b639381d3383da6ea03de74bd 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/structDataPostprocessorInputs_1_1Vector.html f0d1c096472bef8493659bb9deee084269233f60bd1ffb7714fc74c6f60aab62 2 @@ -16682 +16682 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/structFEValuesViews_1_1Scalar_1_1OutputType.html 1e172ba66f0b06578da06c749ba1c067e32bb7629dac3ac023aeda4e0b4a7d07 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/structFEValuesViews_1_1Scalar_1_1OutputType.html efd223e38235e0c4a351a1c1c7b718bb4be6a1e46b628d5dc66bc04529707d8d 2 @@ -16686 +16686 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/structFEValuesViews_1_1SymmetricTensor_3_012_00_01dim_00_01spacedim_01_4_1_1OutputType.html 57d932be6fcf51a792f0fdba4a41ef3266460e25dfa4e1d4e6c34e311996c7ea 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/structFEValuesViews_1_1SymmetricTensor_3_012_00_01dim_00_01spacedim_01_4_1_1OutputType.html aeef2b33a9363bf2b9a0e75bcaaa2d1f2563e6dc5efe8fc1a5f0f8cb25ba0f17 2 @@ -16690 +16690 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/structFEValuesViews_1_1Tensor_3_012_00_01dim_00_01spacedim_01_4_1_1OutputType.html 00b55e288b0539b39b34ed03584916d8e712d1f63ca7a8ede9d64fab81e1c858 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/structFEValuesViews_1_1Tensor_3_012_00_01dim_00_01spacedim_01_4_1_1OutputType.html dd93902e6703f6ebe1915f53397866174ea1c3ee355fd3862488dd696cc179c6 2 @@ -16694 +16694 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/structFEValuesViews_1_1Vector_1_1OutputType.html 692b16cdbc685656f1f1ee843c333a0d4b27db4c7898842051227b3b0a7250f9 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/structFEValuesViews_1_1Vector_1_1OutputType.html 8e0e98e95a0432bb5bb06a94c5c0ca3481b001730883fab967e5cb12b4770fc1 2 @@ -16704 +16704 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/structGeometryInfo.html 06cbb67ed17d152322039145da41781bbdd1de74a2d39b5d584454712da1a6e0 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/structGeometryInfo.html 2c4cc55e46c1f9dcdde1d1c0aaaa8782780b0cba6d7148505f7d60977b80239b 2 @@ -16825 +16825 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/structNonMatching_1_1AdditionalQGeneratorData.html e07da5de4aa1591048142cc29e927f0c36f82ab0e84656806da64a8d61e19102 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/structNonMatching_1_1AdditionalQGeneratorData.html 08b8bc9422b78db25ce1057a5b88ea7c5dfa7b4b9404256f4a312e0def020028 2 @@ -16829 +16829 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/structNonMatching_1_1RegionUpdateFlags.html fcf9e937dc7afdf1898f63eceb0b201251bb7fd9c9c0cc5e7b6c684a4a2ddd28 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/structNonMatching_1_1RegionUpdateFlags.html 3e293e6643155d7e55ddaf1816161ff7953e5fc7dfcee1e2dd7d63ca20847949 2 @@ -16833 +16833 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/structNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1HeightDirectionData.html 912334bcaa1d991ca4a33270628ddc2f07570c8baaf7e078525ac84f4085814c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/structNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1HeightDirectionData.html aafe027411e1bfad95d9d9dfd907ef3cb8b4f7247d291b8042df4fb25564da72 2 @@ -16974 +16974 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/structProductType.html b063172d7d20b6681638224cce5538415c3f6362fd8b851ac1ab08491609e503 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/structProductType.html 8a5127c05007a27b6ecfcd5b5d0fca8fd2e3657d367d1b9d1acde8ac542bd1b4 2 @@ -17005 +17005 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/structSUNDIALS_1_1SundialsPreconditioner.html 070d5fe5d0ebaea0d0ee4a4e2d74f18f0f10fbf517a3d39f862b8d2391345b05 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/structSUNDIALS_1_1SundialsPreconditioner.html 691fde1375635947f048b3a63f1bf410933a8f0a8bbe6c78dca5daf191fd4fec 2 @@ -17050 +17050 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/structSynchronousIterators.html 42bf644f7ddab02aea8078b9d73c28f3a2eba0ca749b5bc73c30326e0bf66f86 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/structSynchronousIterators.html 701f08433c083a03612ab584a08657ec255005f2aa2b3aace161dc174feda5f5 2 @@ -17318 +17318 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/structinternal_1_1FEEvaluationImplSelector.html 87e51fe4e4da4372262acbf00cfb948e82ba7364b68df786d0d26f6f44bc3ebe 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/structinternal_1_1FEEvaluationImplSelector.html 34fae846491b0b24115c34db169806ba3dd2e16cdcea52bc5a22ad5d108718fa 2 @@ -17415 +17415 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/structinternal_1_1MatrixFreeFunctions_1_1MappingInfoStorage.html 92db71375a04e54361c6ef93c336f983326e6d91a588259445c1fe1bc5ac80c7 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/structinternal_1_1MatrixFreeFunctions_1_1MappingInfoStorage.html 804d256a2b9c7f601206106bd1d64e2e2e579c5ac7b581f714842787d950a124 2 @@ -17426 +17426 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/structinternal_1_1MatrixFreeFunctions_1_1UnivariateShapeData.html 4f9d61857faf28ee07e383ace38e55328f7949faade5571cccab318b3873b69f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/structinternal_1_1MatrixFreeFunctions_1_1UnivariateShapeData.html 25f596d948ca8e25c14fcc74ff5dad8fb26572dc19599052d22f54fd2b8f728e 2 @@ -17954 +17954 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/symmetric__tensor_8h.html b6dba35ba19c1606efeefc5671e030cadcc7d5ef6fc61ed40969b72f87de0f1b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/symmetric__tensor_8h.html 9146108d01f07f7f2f7bef1ca6eca5cf808bd23e62fb44cab60c555ea371643a 2 @@ -17958 +17958 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/synchronous__iterator_8h.html b151df7495dd2f6ca2f908d6523571b5c9c8d86007296442bfae360fe6b94680 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/synchronous__iterator_8h.html dda578d325a4090fe4d1025f7230971b0b902bba7510c3c008c98a9ef42f95c0 2 @@ -17988 +17988 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/tensor_8h.html 5126c06bc21cf744f688e6d0922060c4c92f7571b5773dbc0b1463a3acbcb105 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/tensor_8h.html 69d33f1306858919956cb4be629b155434c4f2530f8e6452346ec4a2033f6512 2 @@ -18050 +18050 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/todo.html 92eaf8eb695eec6698c9d0fdda08c8eca4852c4f235eeba7a2a25bc65723d31d 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/todo.html 7d7141a4b98276625b76cd1808c201dbbb1fde581d9247a38ad4648642fbfa4f 2 @@ -18269 +18269 @@ -/usr/share/doc/packages/dealii/doxygen/deal.tag 4a86a78faf46fe425936055ef8a524c2469e0955164ff9da0eff9bda5f80d7ac 2 +/usr/share/doc/packages/dealii/doxygen/deal.tag 27d9afd3fdc449ff1a1f09f931167a7a699b94e80c337a258aedcd6fa4af4181 2 comparing rpmtags comparing RELEASE comparing PROVIDES comparing scripts comparing filelist comparing file checksum creating rename script RPM file checksum differs. Extracting packages /usr/share/doc/packages/dealii/doxygen/deal.II/DEALGlossary.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/DEALGlossary.html 2024-01-30 03:04:27.308671930 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/DEALGlossary.html 2024-01-30 03:04:27.308671930 +0000 @@ -103,7 +103,7 @@
Block (linear algebra)

It is often convenient to treat a matrix or vector as a collection of individual blocks. For example, in step-20 (and other tutorial programs), we want to consider the global linear system $Ax=b$ in the form

-\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   \left(\begin{array}{cc}
     M & B^T \\ B & 0
   \end{array}\right)
@@ -114,9 +114,9 @@
   \left(\begin{array}{cc}
     F \\ G
   \end{array}\right),
-   \end{eqnarray*} + \end{eqnarray*}" src="form_92.png"/>

-

where $U,P$ are the values of velocity and pressure degrees of freedom, respectively, $M$ is the mass matrix on the velocity space, $B$ corresponds to the negative divergence operator, and $B^T$ is its transpose and corresponds to the negative gradient.

+

where $U,P$ are the values of velocity and pressure degrees of freedom, respectively, $M$ is the mass matrix on the velocity space, $B$ corresponds to the negative divergence operator, and $B^T$ is its transpose and corresponds to the negative gradient.

Using such a decomposition into blocks, one can then define preconditioners that are based on the individual operators that are present in a system of equations (for example the Schur complement, in the case of step-20), rather than the entire matrix. In essence, blocks are used to reflect the structure of a PDE system in linear algebra, in particular allowing for modular solvers for problems with multiple solution components. On the other hand, the matrix and right hand side vector can also treated as a unit, which is convenient for example during assembly of the linear system when one may not want to make a distinction between the individual components, or for an outer Krylov space solver that doesn't care about the block structure (e.g. if only the preconditioner needs the block structure).

Splitting matrices and vectors into blocks is supported by the BlockSparseMatrix, BlockVector, and related classes. See the overview of the various linear algebra classes in the Linear algebra classes module. The objects present two interfaces: one that makes the object look like a matrix or vector with global indexing operations, and one that makes the object look like a collection of sub-blocks that can be individually addressed. Depending on context, one may wish to use one or the other interface.

Typically, one defines the sub-structure of a matrix or vector by grouping the degrees of freedom that make up groups of physical quantities (for example all velocities) into individual blocks of the linear system. This is defined in more detail below in the glossary entry on Block (finite element).

@@ -135,7 +135,7 @@
FE_Q<dim>(1), 1);

With the exception of the number of blocks, the two objects are the same for all practical purposes, however.

Global degrees of freedom: While we have defined blocks above in terms of the vector components of a vector-valued solution function (or, equivalently, in terms of the vector-valued finite element space), every shape function of a finite element is part of one block or another. Consequently, we can partition all degrees of freedom defined on a DoFHandler into individual blocks. Since by default the DoFHandler class enumerates degrees of freedom in a more or less random way, you will first want to call the DoFRenumbering::component_wise function to make sure that all degrees of freedom that correspond to a single block are enumerated consecutively.

-

If you do this, you naturally partition matrices and vectors into blocks as well (see block (linear algebra)). In most cases, when you subdivide a matrix or vector into blocks, you do so by creating one block for each block defined by the finite element (i.e. in most practical cases the FESystem object). However, this needs not be so: the DoFRenumbering::component_wise function allows to group several vector components or finite element blocks into the same logical block (see, for example, the step-22 or step-31 tutorial programs, as opposed to step-20). As a consequence, using this feature, we can achieve the same result, i.e. subdividing matrices into $2\times 2$ blocks and vectors into 2 blocks, for the second way of creating a Stokes element outlined above using an extra argument as we would have using the first way of creating the Stokes element with two blocks right away.

+

If you do this, you naturally partition matrices and vectors into blocks as well (see block (linear algebra)). In most cases, when you subdivide a matrix or vector into blocks, you do so by creating one block for each block defined by the finite element (i.e. in most practical cases the FESystem object). However, this needs not be so: the DoFRenumbering::component_wise function allows to group several vector components or finite element blocks into the same logical block (see, for example, the step-22 or step-31 tutorial programs, as opposed to step-20). As a consequence, using this feature, we can achieve the same result, i.e. subdividing matrices into $2\times 2$ blocks and vectors into 2 blocks, for the second way of creating a Stokes element outlined above using an extra argument as we would have using the first way of creating the Stokes element with two blocks right away.

More information on this topic can be found in the documentation of FESystem, the Handling vector valued problems module and the tutorial programs referenced therein.

Selecting blocks: Many functions allow you to restrict their operation to certain vector components or blocks. For example, this is the case for the functions that interpolate boundary values: one may want to only interpolate the boundary values for the velocity block of a finite element field but not the pressure block. The way to do this is by passing a BlockMask argument to such functions, see the block mask entry of this glossary.

@@ -164,14 +164,14 @@
Boundary form

For a dim-dimensional triangulation in dim-dimensional space, the boundary form is a vector defined on faces. It is the vector product of the image of coordinate vectors on the surface of the unit cell. It is a vector normal to the surface, pointing outwards and having the length of the surface element.

-

A more general definition would be that (at least up to the length of this vector) it is exactly that vector that is necessary when considering integration by parts, i.e. equalities of the form $\int_\Omega \text{div} \vec \phi = -\int_{\partial\Omega} \vec n
-   \cdot \vec \phi$. Using this definition then also explains what this vector should be in the case of domains (and corresponding triangulations) of dimension dim that are embedded in a space spacedim: in that case, the boundary form is still a vector defined on the faces of the triangulation; it is orthogonal to all tangent directions of the boundary and within the tangent plane of the domain. Note that this is compatible with case dim==spacedim since there the tangent plane is the entire space ${\mathbb R}^\text{dim}$.

+

A more general definition would be that (at least up to the length of this vector) it is exactly that vector that is necessary when considering integration by parts, i.e. equalities of the form $\int_\Omega \text{div} \vec \phi = -\int_{\partial\Omega} \vec n
+   \cdot \vec \phi$. Using this definition then also explains what this vector should be in the case of domains (and corresponding triangulations) of dimension dim that are embedded in a space spacedim: in that case, the boundary form is still a vector defined on the faces of the triangulation; it is orthogonal to all tangent directions of the boundary and within the tangent plane of the domain. Note that this is compatible with case dim==spacedim since there the tangent plane is the entire space ${\mathbb R}^\text{dim}$.

In either case, the length of the vector equals the determinant of the transformation of reference face to the face of the current cell.

Boundary indicator

In a Triangulation object, every part of the boundary may be associated with a unique number (of type types::boundary_id) that is used to determine what kinds of boundary conditions are to be applied to a particular part of a boundary. The boundary is composed of the faces of the cells and, in 3d, the edges of these faces.

-

By default, all boundary indicators of a mesh are zero, unless you are reading from a mesh file that specifically sets them to something different, or unless you use one of the mesh generation functions in namespace GridGenerator that have a colorize option. A typical piece of code that sets the boundary indicator on part of the boundary to something else would look like this, here setting the boundary indicator to 42 for all faces located at $x=-1$:

for (auto &face : triangulation.active_face_iterators())
+

By default, all boundary indicators of a mesh are zero, unless you are reading from a mesh file that specifically sets them to something different, or unless you use one of the mesh generation functions in namespace GridGenerator that have a colorize option. A typical piece of code that sets the boundary indicator on part of the boundary to something else would look like this, here setting the boundary indicator to 42 for all faces located at $x=-1$:

for (auto &face : triangulation.active_face_iterators())
if (face->at_boundary())
if (face->center()[0] == -1)
face->set_boundary_id (42);
@@ -240,7 +240,7 @@

Component
-

When considering systems of equations in which the solution is not just a single scalar function, we say that we have a vector system with a vector-valued solution. For example, the vector solution in the elasticity equation considered in step-8 is $u=(u_x,u_y,u_z)^T$ consisting of the displacements in each of the three coordinate directions. The solution then has three elements. Similarly, the 3d Stokes equation considered in step-22 has four elements: $u=(v_x,v_y,v_z,p)^T$. We call the elements of the vector-valued solution components in deal.II. To be well-posed, for the solution to have $n$ components, there need to be $n$ partial differential equations to describe them. This concept is discussed in great detail in the Handling vector valued problems module.

+

When considering systems of equations in which the solution is not just a single scalar function, we say that we have a vector system with a vector-valued solution. For example, the vector solution in the elasticity equation considered in step-8 is $u=(u_x,u_y,u_z)^T$ consisting of the displacements in each of the three coordinate directions. The solution then has three elements. Similarly, the 3d Stokes equation considered in step-22 has four elements: $u=(v_x,v_y,v_z,p)^T$. We call the elements of the vector-valued solution components in deal.II. To be well-posed, for the solution to have $n$ components, there need to be $n$ partial differential equations to describe them. This concept is discussed in great detail in the Handling vector valued problems module.

In finite element programs, one frequently wants to address individual elements (components) of this vector-valued solution, or sets of components. For example, we do this extensively in step-8, and a lot of documentation is also provided in the module on Handling vector valued problems. If you are thinking only in terms of the partial differential equation (not in terms of its discretization), then the concept of components is the natural one.

On the other hand, when talking about finite elements and degrees of freedom, components are not always the correct concept because components are not always individually addressable. In particular, this is the case for non-primitive finite elements. Similarly, one may not always want to address individual components but rather sets of components — e.g. all velocity components together, and separate from the pressure in the Stokes system, without further splitting the velocities into their individual components. In either case, the correct concept to think in is that of a block. Since each component, if individually addressable, is also a block, thinking in terms of blocks is most frequently the better strategy.

For a given finite element, the number of components can be queried using the FiniteElementData::n_components() function, and you can find out which vector components are nonzero for a given finite element shape function using FiniteElement::get_nonzero_components(). The values and gradients of individual components of a shape function (if the element is primitive) can be queried using the FiniteElement::shape_value_component() and FiniteElement::shape_grad_component() functions on the reference cell. The FEValues::shape_value_component() and FEValues::shape_grad_component() functions do the same on a real cell. See also the documentation of the FiniteElement and FEValues classes.

@@ -262,7 +262,7 @@

would result in a mask [true, true, false] in 2d. Of course, in 3d, the result would be [true, true, true, false].

Note
Just as one can think of composed elements as being made up of components or blocks, there are component masks (represented by the ComponentMask class) and block masks (represented by the BlockMask class). The FiniteElement class has functions that convert between the two kinds of objects.
-Not all component masks actually make sense. For example, if you have a FE_RaviartThomas object in 2d, then it doesn't make any sense to have a component mask of the form [true, false] because you try to select individual vector components of a finite element where each shape function has both $x$ and $y$ velocities. In essence, while you can of course create such a component mask, there is nothing you can do with it.
+Not all component masks actually make sense. For example, if you have a FE_RaviartThomas object in 2d, then it doesn't make any sense to have a component mask of the form [true, false] because you try to select individual vector components of a finite element where each shape function has both $x$ and $y$ velocities. In essence, while you can of course create such a component mask, there is nothing you can do with it.
Compressing distributed vectors and matrices

For parallel computations, deal.II uses the vector and matrix classes defined in the PETScWrappers and TrilinosWrappers namespaces. When running programs in parallel using MPI, these classes only store a certain number of rows or elements on the current processor, whereas the rest of the vector or matrix is stored on the other processors that belong to our MPI universe. This presents a certain problem when you assemble linear systems: we add elements to the matrix and right hand side vectors that may or may not be stored locally. Sometimes, we may also want to just set an element, not add to it.

@@ -304,9 +304,9 @@

Degree of freedom
-

The term "degree of freedom" (often abbreviated as "DoF") is commonly used in the finite element community to indicate two slightly different, but related things. The first is that we'd like to represent the finite element solution as a linear combination of shape functions, in the form $u_h(\mathbf{x}) = \sum_{j=0}^{N-1} U_j \varphi_j(\mathbf{x})$. Here, $U_j$ is a vector of expansion coefficients. Because we don't know their values yet (we will compute them as the solution of a linear or nonlinear system), they are called "unknowns" or "degrees of freedom". The second meaning of the term can be explained as follows: A mathematical description of finite element problem is often to say that we are looking for a finite dimensional function $u_h \in V_h$ that satisfies some set of equations (e.g. $a(u_h,\varphi_h)=(f,\varphi_h)$ for all test functions $\varphi_h\in
-   V_h$). In other words, all we say here that the solution needs to lie in some space $V_h$. However, to actually solve this problem on a computer we need to choose a basis of this space; this is the set of shape functions $\varphi_j(\mathbf{x})$ we have used above in the expansion of $u_h(\mathbf
-   x)$ with coefficients $U_j$. There are of course many bases of the space $V_h$, but we will specifically choose the one that is described by the finite element functions that are traditionally defined locally on the cells of the mesh. Describing "degrees of freedom" in this context requires us to simply enumerate the basis functions of the space $V_h$. For $Q_1$ elements this means simply enumerating the vertices of the mesh in some way, but for higher elements one also has to enumerate the shape functions that are associated with edges, faces, or cell interiors of the mesh. The class that provides this enumeration of the basis functions of $V_h$ is called DoFHandler. The process of enumerating degrees of freedom is referred to as "distributing DoFs" in deal.II.

+

The term "degree of freedom" (often abbreviated as "DoF") is commonly used in the finite element community to indicate two slightly different, but related things. The first is that we'd like to represent the finite element solution as a linear combination of shape functions, in the form $u_h(\mathbf{x}) = \sum_{j=0}^{N-1} U_j \varphi_j(\mathbf{x})$. Here, $U_j$ is a vector of expansion coefficients. Because we don't know their values yet (we will compute them as the solution of a linear or nonlinear system), they are called "unknowns" or "degrees of freedom". The second meaning of the term can be explained as follows: A mathematical description of finite element problem is often to say that we are looking for a finite dimensional function $u_h \in V_h$ that satisfies some set of equations (e.g. $a(u_h,\varphi_h)=(f,\varphi_h)$ for all test functions $\varphi_h\in
+   V_h$). In other words, all we say here that the solution needs to lie in some space $V_h$. However, to actually solve this problem on a computer we need to choose a basis of this space; this is the set of shape functions $\varphi_j(\mathbf{x})$ we have used above in the expansion of $u_h(\mathbf
+   x)$ with coefficients $U_j$. There are of course many bases of the space $V_h$, but we will specifically choose the one that is described by the finite element functions that are traditionally defined locally on the cells of the mesh. Describing "degrees of freedom" in this context requires us to simply enumerate the basis functions of the space $V_h$. For $Q_1$ elements this means simply enumerating the vertices of the mesh in some way, but for higher elements one also has to enumerate the shape functions that are associated with edges, faces, or cell interiors of the mesh. The class that provides this enumeration of the basis functions of $V_h$ is called DoFHandler. The process of enumerating degrees of freedom is referred to as "distributing DoFs" in deal.II.

Direction flags
@@ -327,7 +327,7 @@
Distorted cells

A distorted cell is a cell for which the mapping from the reference cell to real cell has a Jacobian whose determinant is non-positive somewhere in the cell. Typically, we only check the sign of this determinant at the vertices of the cell. The function GeometryInfo::alternating_form_at_vertices computes these determinants at the vertices.

-

By way of example, if all of the determinants are of roughly equal value and on the order of $h^\text{dim}$ then the cell is well-shaped. For example, a square cell or face has determinants equal to $h^\text{dim}$ whereas a strongly sheared parallelogram has a determinant much smaller. Similarly, a cell with very unequal edge lengths will have widely varying determinants. Conversely, a pinched cell in which the location of two or more vertices is collapsed to a single point has a zero determinant at this location. Finally, an inverted or twisted cell in which the location of two vertices is out of order will have negative determinants.

+

By way of example, if all of the determinants are of roughly equal value and on the order of $h^\text{dim}$ then the cell is well-shaped. For example, a square cell or face has determinants equal to $h^\text{dim}$ whereas a strongly sheared parallelogram has a determinant much smaller. Similarly, a cell with very unequal edge lengths will have widely varying determinants. Conversely, a pinched cell in which the location of two or more vertices is collapsed to a single point has a zero determinant at this location. Finally, an inverted or twisted cell in which the location of two vertices is out of order will have negative determinants.

The following two images show a well-formed, a pinched, and a twisted cell for both 2d and 3d:

@@ -366,19 +366,19 @@

Generalized support points
-

"Generalized support points" are, as the name suggests, a generalization of support points. The latter are used to describe that a finite element simply interpolates values at individual points (the "support points"). If we call these points $\hat{\mathbf{x}}_i$ (where the hat indicates that these points are defined on the reference cell $\hat{K}$), then one typically defines shape functions $\varphi_j(\mathbf{x})$ in such a way that the nodal functionals $\Psi_i[\cdot]$ simply evaluate the function at the support point, i.e., that $\Psi_i[\varphi]=\varphi(\hat{\mathbf{x}}_i)$, and the basis is chosen so that $\Psi_i[\varphi_j]=\delta_{ij}$ where $\delta_{ij}$ is the Kronecker delta function. This leads to the common Lagrange elements.

-

(In the vector valued case, the only other piece of information besides the support points $\hat{\mathbf{x}}_i$ that one needs to provide is the vector component $c(i)$ the $i$th node functional corresponds, so that $\Psi_i[\varphi]=\varphi(\hat{\mathbf{x}}_i)_{c(i)}$.)

-

On the other hand, there are other kinds of elements that are not defined this way. For example, for the lowest order Raviart-Thomas element (see the FE_RaviartThomas class), the node functional evaluates not individual components of a vector-valued finite element function with dim components, but the normal component of this vector: $\Psi_i[\varphi]
+<dd><p class="Generalized support points" are, as the name suggests, a generalization of support points. The latter are used to describe that a finite element simply interpolates values at individual points (the "support points"). If we call these points $\hat{\mathbf{x}}_i$ (where the hat indicates that these points are defined on the reference cell $\hat{K}$), then one typically defines shape functions $\varphi_j(\mathbf{x})$ in such a way that the nodal functionals $\Psi_i[\cdot]$ simply evaluate the function at the support point, i.e., that $\Psi_i[\varphi]=\varphi(\hat{\mathbf{x}}_i)$, and the basis is chosen so that $\Psi_i[\varphi_j]=\delta_{ij}$ where $\delta_{ij}$ is the Kronecker delta function. This leads to the common Lagrange elements.

+

(In the vector valued case, the only other piece of information besides the support points $\hat{\mathbf{x}}_i$ that one needs to provide is the vector component $c(i)$ the $i$th node functional corresponds, so that $\Psi_i[\varphi]=\varphi(\hat{\mathbf{x}}_i)_{c(i)}$.)

+

On the other hand, there are other kinds of elements that are not defined this way. For example, for the lowest order Raviart-Thomas element (see the FE_RaviartThomas class), the node functional evaluates not individual components of a vector-valued finite element function with dim components, but the normal component of this vector: $\Psi_i[\varphi]
     =
     \varphi(\hat{\mathbf{x}}_i) \cdot \mathbf{n}_i
-   $, where the $\mathbf{n}_i$ are the normal vectors to the face of the cell on which $\hat{\mathbf{x}}_i$ is located. In other words, the node functional is a linear combination of the components of $\varphi$ when evaluated at $\hat{\mathbf{x}}_i$. Similar things happen for the BDM, ABF, and Nedelec elements (see the FE_BDM, FE_ABF, FE_Nedelec classes).

-

In these cases, the element does not have support points because it is not purely interpolatory; however, some kind of interpolation is still involved when defining shape functions as the node functionals still require point evaluations at special points $\hat{\mathbf{x}}_i$. In these cases, we call the points generalized support points.

-

Finally, there are elements that still do not fit into this scheme. For example, some hierarchical basis functions (see, for example the FE_Q_Hierarchical element) are defined so that the node functionals are moments of finite element functions, $\Psi_i[\varphi]
+   $, where the $\mathbf{n}_i$ are the normal vectors to the face of the cell on which $\hat{\mathbf{x}}_i$ is located. In other words, the node functional is a linear combination of the components of $\varphi$ when evaluated at $\hat{\mathbf{x}}_i$. Similar things happen for the BDM, ABF, and Nedelec elements (see the FE_BDM, FE_ABF, FE_Nedelec classes).

+

In these cases, the element does not have support points because it is not purely interpolatory; however, some kind of interpolation is still involved when defining shape functions as the node functionals still require point evaluations at special points $\hat{\mathbf{x}}_i$. In these cases, we call the points generalized support points.

+

Finally, there are elements that still do not fit into this scheme. For example, some hierarchical basis functions (see, for example the FE_Q_Hierarchical element) are defined so that the node functionals are moments of finite element functions, $\Psi_i[\varphi]
     =
     \int_{\hat{K}} \varphi(\hat{\mathbf{x}})
     {\hat{x}_1}^{p_1(i)}
     {\hat{x}_2}^{p_2(i)}
-   $ in 2d, and similarly for 3d, where the $p_d(i)$ are the order of the moment described by shape function $i$. Some other elements use moments over edges or faces. In all of these cases, node functionals are not defined through interpolation at all, and these elements then have neither support points, nor generalized support points.

+ $" src="form_124.png"/> in 2d, and similarly for 3d, where the $p_d(i)$ are the order of the moment described by shape function $i$. Some other elements use moments over edges or faces. In all of these cases, node functionals are not defined through interpolation at all, and these elements then have neither support points, nor generalized support points.

geometry paper
@@ -453,47 +453,47 @@
Lumped mass matrix

The mass matrix is a matrix of the form

-\begin{align*}
+<picture><source srcset=\begin{align*}
        M_{ij} = \int_\Omega \varphi_i(\mathbf x) \varphi_j(\mathbf x)\; dx,
-     \end{align*} + \end{align*}" src="form_126.png"/>

It frequently appears in the solution of time dependent problems where, if one uses an explicit time stepping method, it then leads to the need to solve problems of the form

-\begin{align*}
+<picture><source srcset=\begin{align*}
        MU^n = MU^{n-1} + k_n BU^{n-1},
-     \end{align*} + \end{align*}" src="form_127.png"/>

-

in time step $n$, where $U^n$ is the solution to be computed, $U^{n-1}$ is the known solution from the first time step, and $B$ is a matrix related to the differential operator in the PDE. $k_n$ is the size of the time step. A similar linear system of equations also arises out of the discretization of second-order differential equations.

-

The presence of the matrix $M$ on the left side is a nuisance because, even though we have used an explicit time stepping method, we still have to solve a linear system in each time step. It would be much preferable if the matrix were diagonal. "Lumping" the mass matrix is a strategy to replace $M$ by a matrix $M_\text{diagonal}$ that actually is diagonal, yet does not destroy the accuracy of the resulting solution.

-

Historically, mass lumping was performed by adding the elements of a row together and setting the diagonal entries of $M_\text{diagonal}$ to that sum. This works for $Q_1$ and $P_1$ elements, for example, and can be understood mechanically by replacing the continuous medium we are discretizating by one where the continuous mass distribution is replaced by one where (finite amounts of) mass are located only at the nodes. That is, we are "lumping together" the mass of an element at its vertices, thus giving rise to the name "lumped mass matrix". A more mathematical perspective is to compute the integral above for $M_{ij}$ via special quadrature rules; in particular, we replace the computation of

-\begin{align*}
+<p> in time step <picture><source srcset=$n$, where $U^n$ is the solution to be computed, $U^{n-1}$ is the known solution from the first time step, and $B$ is a matrix related to the differential operator in the PDE. $k_n$ is the size of the time step. A similar linear system of equations also arises out of the discretization of second-order differential equations.

+

The presence of the matrix $M$ on the left side is a nuisance because, even though we have used an explicit time stepping method, we still have to solve a linear system in each time step. It would be much preferable if the matrix were diagonal. "Lumping" the mass matrix is a strategy to replace $M$ by a matrix $M_\text{diagonal}$ that actually is diagonal, yet does not destroy the accuracy of the resulting solution.

+

Historically, mass lumping was performed by adding the elements of a row together and setting the diagonal entries of $M_\text{diagonal}$ to that sum. This works for $Q_1$ and $P_1$ elements, for example, and can be understood mechanically by replacing the continuous medium we are discretizating by one where the continuous mass distribution is replaced by one where (finite amounts of) mass are located only at the nodes. That is, we are "lumping together" the mass of an element at its vertices, thus giving rise to the name "lumped mass matrix". A more mathematical perspective is to compute the integral above for $M_{ij}$ via special quadrature rules; in particular, we replace the computation of

+\begin{align*}
        M_{ij} = \int_\Omega \varphi_i(\mathbf x) \varphi_j(\mathbf x)\; dx
               = \sum_K \int_K \varphi_i(\mathbf x) \varphi_j(\mathbf x)\; dx,
-     \end{align*} + \end{align*}" src="form_134.png"/>

by quadrature

-\begin{align*}
+<picture><source srcset=\begin{align*}
        (M_{\text{diagonal}})_{ij} = \sum_K \sum_q \varphi_i(\mathbf x_q^K) \varphi_j(\mathbf x_q^K)
        |K| w_q,
-     \end{align*} + \end{align*}" src="form_135.png"/>

where we choose the quadrature points as the nodes at which the shape functions are defined. If we order the quadrature points in the same way as the shape functions, then

-\begin{align*}
+<picture><source srcset=\begin{align*}
        \varphi_i(\mathbf x_q^K) = \delta_{iq},
-     \end{align*} + \end{align*}" src="form_136.png"/>

and consequently

-\begin{align*}
+<picture><source srcset=\begin{align*}
        (M_{\text{diagonal}})_{ij} = \delta_{ij} \sum_{K, \text{supp}\varphi_i \cap K \neq \emptyset} |K| w_i,
-     \end{align*} + \end{align*}" src="form_137.png"/>

-

where the sum extends over those cells on which $\varphi_i$ is nonzero. The so-computed mass matrix is therefore diagonal.

-

Whether or not this particular choice of quadrature formula is sufficient to retain the convergence rate of the discretization is a separate question. For the usual $Q_k$ finite elements (implemented by FE_Q and FE_DGQ), the appropriate quadrature formulas are of QGaussLobatto type. Mass lumping can also be done with FE_SimplexP_Bubbles, for example, if appropriate quadrature rules are chosen.

+

where the sum extends over those cells on which $\varphi_i$ is nonzero. The so-computed mass matrix is therefore diagonal.

+

Whether or not this particular choice of quadrature formula is sufficient to retain the convergence rate of the discretization is a separate question. For the usual $Q_k$ finite elements (implemented by FE_Q and FE_DGQ), the appropriate quadrature formulas are of QGaussLobatto type. Mass lumping can also be done with FE_SimplexP_Bubbles, for example, if appropriate quadrature rules are chosen.

For an example of where lumped mass matrices play a role, see step-69.

Manifold indicator

Every object that makes up a Triangulation (cells, faces, edges, etc.), is associated with a unique number (of type types::manifold_id) that is used to identify which manifold object is responsible to generate new points when the mesh is refined.

-

By default, all manifold indicators of a mesh are set to numbers::flat_manifold_id. A typical piece of code that sets the manifold indicator on a object to something else would look like this, here setting the manifold indicator to 42 for all cells whose center has an $x$ component less than zero:

+

By default, all manifold indicators of a mesh are set to numbers::flat_manifold_id. A typical piece of code that sets the manifold indicator on a object to something else would look like this, here setting the manifold indicator to 42 for all cells whose center has an $x$ component less than zero:

for (auto &cell : triangulation.active_cell_iterators())
if (cell->center()[0] < 0)
cell->set_manifold_id (42);
@@ -504,41 +504,41 @@
Mass matrix

The "mass matrix" is a matrix of the form

-\begin{align*}
+<picture><source srcset=\begin{align*}
        M_{ij} = \int_\Omega \varphi_i(\mathbf x) \varphi_j(\mathbf x)\; dx,
-     \end{align*} + \end{align*}" src="form_126.png"/>

-

possibly with a coefficient inside the integral, and where $\varphi_i(\mathbf x)$ are the shape functions of a finite element. The origin of the term refers to the fact that in structural mechanics (where the finite element method originated), one often starts from the elastodynamics (wave) equation

-\begin{align*}
+<p> possibly with a coefficient inside the integral, and where <picture><source srcset=$\varphi_i(\mathbf x)$ are the shape functions of a finite element. The origin of the term refers to the fact that in structural mechanics (where the finite element method originated), one often starts from the elastodynamics (wave) equation

+\begin{align*}
        \rho \frac{\partial^2 u}{\partial t^2}
        -\nabla \cdot C \nabla u = f.
-     \end{align*} /usr/share/doc/packages/dealii/doxygen/deal.II/Tutorial.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/Tutorial.html 2024-01-30 03:04:27.344672230 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/Tutorial.html 2024-01-30 03:04:27.344672230 +0000 @@ -340,7 +340,7 @@

-step-47

Solving the fourth-order biharmonic equation using the $C^0$ Interior Penalty (C0IP) method.
+step-47

Solving the fourth-order biharmonic equation using the $C^0$ Interior Penalty (C0IP) method.
Keywords: FEInterfaceValues

/usr/share/doc/packages/dealii/doxygen/deal.II/_formulas.tex differs (LaTeX 2e document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/_formulas.tex 2023-10-24 00:00:00.000000000 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/_formulas.tex 2023-10-24 00:00:00.000000000 +0000 @@ -31,15 +31,6 @@ \pagestyle{empty} \begin{document} -$O(\text{dim}^3)$ -\pagebreak - -$u = u - P^{-1} (A u - v)$ -\pagebreak - -$u = u - P^{-T} (A u - v)$ -\pagebreak - $F(u,\nabla u)=0$ \pagebreak @@ -158,6 +149,15 @@ $\dfrac{d f(x, y(x))}{d y}$ \pagebreak +$O(\text{dim}^3)$ +\pagebreak + +$u = u - P^{-1} (A u - v)$ +\pagebreak + +$u = u - P^{-T} (A u - v)$ +\pagebreak + $u|_{\partial\Omega}=g$ \pagebreak @@ -337,116 +337,6 @@ $J_K$ \pagebreak -$Q_2$ -\pagebreak - -$p$ -\pagebreak - -$(A+k\,B)\,C$ -\pagebreak - -$B$ -\pagebreak - -$b-Ax$ -\pagebreak - -$\|u-u_h\|_{H^1} \le Ch^p \|u\|_{H^{p+1}}$ -\pagebreak - -$V_h$ -\pagebreak - -$u_h(\mathbf x)= \sum_j U_j \varphi_i(\mathbf x)$ -\pagebreak - -$U_j$ -\pagebreak - -$(u,v)$ -\pagebreak - -\begin{align*} - \mathbf S(u,v) &= (1-v)\mathbf c_0(u)+v \mathbf c_1(u) + (1-u)\mathbf c_2(v) + u \mathbf c_3(v) \\ - &\quad - \left[(1-u)(1-v) \mathbf x_0 + u(1-v) \mathbf x_1 + (1-u)v \mathbf x_2 + uv \mathbf x_3 \right] - \end{align*} -\pagebreak - -$\bf x_0, \bf x_1, \bf x_2, \bf x_3$ -\pagebreak - -$\bf c_0, \bf c_1, \bf c_2, \bf c_3$ -\pagebreak - -$(0,1)^2$ -\pagebreak - -$(u,v) = (0.5, - 0.5)$ -\pagebreak - -$\mathbf c_0(0.5)$ -\pagebreak - -$\mathbf c_1(0.5)$ -\pagebreak - -$\mathbf c_2(0.5)$ -\pagebreak - -$\mathbf c_3(0.5)$ -\pagebreak - -$\frac{\displaystyle 1}{\displaystyle 2}$ -\pagebreak - -$-\frac{\displaystyle 1}{\displaystyle 4}$ -\pagebreak - -$-\frac{\displaystyle - 1}{\displaystyle 4}$ -\pagebreak - -$\frac{\displaystyle - 1}{\displaystyle 2}$ -\pagebreak - -$+\frac{\displaystyle - 1}{\displaystyle 2}$ -\pagebreak - -$\frac{\displaystyle - 1}{\displaystyle 8}$ -\pagebreak - -$\frac{\displaystyle 1}{\displaystyle 8}$ -\pagebreak - -$(u_i,v_i)$ -\pagebreak - -$\mathcal O(h^{k+1})$ -\pagebreak - -$k=10$ -\pagebreak - -$\mathcal O(k)$ -\pagebreak - -$\mathcal O(k^d)$ -\pagebreak - -$\mathcal O(1)$ -\pagebreak - -$(k+1)^{d-1}$ -\pagebreak - -$(k+1)^d$ -\pagebreak - \begin{eqnarray*} \left(\begin{array}{cc} M & B^T \\ B & 0 @@ -467,6 +357,9 @@ $M$ \pagebreak +$B$ +\pagebreak + $B^T$ \pagebreak @@ -495,6 +388,9 @@ $u_h(\mathbf{x}) = \sum_{j=0}^{N-1} U_j \varphi_j(\mathbf{x})$ \pagebreak +$U_j$ +\pagebreak + $u_h \in V_h$ \pagebreak @@ -505,6 +401,9 @@ V_h$ \pagebreak +$V_h$ +\pagebreak + $\varphi_j(\mathbf{x})$ \pagebreak @@ -831,6 +730,107 @@ $\mathbf F$ \pagebreak +$Q_2$ +\pagebreak + +$p$ +\pagebreak + +$(A+k\,B)\,C$ +\pagebreak + +$b-Ax$ +\pagebreak + +$u_h(\mathbf x)= \sum_j U_j \varphi_i(\mathbf x)$ +\pagebreak + /usr/share/doc/packages/dealii/doxygen/deal.II/_formulas_dark.tex differs (LaTeX 2e document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/_formulas_dark.tex 2023-10-24 00:00:00.000000000 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/_formulas_dark.tex 2023-10-24 00:00:00.000000000 +0000 @@ -33,15 +33,6 @@ \pagestyle{empty} \begin{document} -$O(\text{dim}^3)$ -\pagebreak - -$u = u - P^{-1} (A u - v)$ -\pagebreak - -$u = u - P^{-T} (A u - v)$ -\pagebreak - $F(u,\nabla u)=0$ \pagebreak @@ -160,6 +151,15 @@ $\dfrac{d f(x, y(x))}{d y}$ \pagebreak +$O(\text{dim}^3)$ +\pagebreak + +$u = u - P^{-1} (A u - v)$ +\pagebreak + +$u = u - P^{-T} (A u - v)$ +\pagebreak + $u|_{\partial\Omega}=g$ \pagebreak @@ -339,116 +339,6 @@ $J_K$ \pagebreak -$Q_2$ -\pagebreak - -$p$ -\pagebreak - -$(A+k\,B)\,C$ -\pagebreak - -$B$ -\pagebreak - -$b-Ax$ -\pagebreak - -$\|u-u_h\|_{H^1} \le Ch^p \|u\|_{H^{p+1}}$ -\pagebreak - -$V_h$ -\pagebreak - -$u_h(\mathbf x)= \sum_j U_j \varphi_i(\mathbf x)$ -\pagebreak - -$U_j$ -\pagebreak - -$(u,v)$ -\pagebreak - -\begin{align*} - \mathbf S(u,v) &= (1-v)\mathbf c_0(u)+v \mathbf c_1(u) + (1-u)\mathbf c_2(v) + u \mathbf c_3(v) \\ - &\quad - \left[(1-u)(1-v) \mathbf x_0 + u(1-v) \mathbf x_1 + (1-u)v \mathbf x_2 + uv \mathbf x_3 \right] - \end{align*} -\pagebreak - -$\bf x_0, \bf x_1, \bf x_2, \bf x_3$ -\pagebreak - -$\bf c_0, \bf c_1, \bf c_2, \bf c_3$ -\pagebreak - -$(0,1)^2$ -\pagebreak - -$(u,v) = (0.5, - 0.5)$ -\pagebreak - -$\mathbf c_0(0.5)$ -\pagebreak - -$\mathbf c_1(0.5)$ -\pagebreak - -$\mathbf c_2(0.5)$ -\pagebreak - -$\mathbf c_3(0.5)$ -\pagebreak - -$\frac{\displaystyle 1}{\displaystyle 2}$ -\pagebreak - -$-\frac{\displaystyle 1}{\displaystyle 4}$ -\pagebreak - -$-\frac{\displaystyle - 1}{\displaystyle 4}$ -\pagebreak - -$\frac{\displaystyle - 1}{\displaystyle 2}$ -\pagebreak - -$+\frac{\displaystyle - 1}{\displaystyle 2}$ -\pagebreak - -$\frac{\displaystyle - 1}{\displaystyle 8}$ -\pagebreak - -$\frac{\displaystyle 1}{\displaystyle 8}$ -\pagebreak - -$(u_i,v_i)$ -\pagebreak - -$\mathcal O(h^{k+1})$ -\pagebreak - -$k=10$ -\pagebreak - -$\mathcal O(k)$ -\pagebreak - -$\mathcal O(k^d)$ -\pagebreak - -$\mathcal O(1)$ -\pagebreak - -$(k+1)^{d-1}$ -\pagebreak - -$(k+1)^d$ -\pagebreak - \begin{eqnarray*} \left(\begin{array}{cc} M & B^T \\ B & 0 @@ -469,6 +359,9 @@ $M$ \pagebreak +$B$ +\pagebreak + $B^T$ \pagebreak @@ -497,6 +390,9 @@ $u_h(\mathbf{x}) = \sum_{j=0}^{N-1} U_j \varphi_j(\mathbf{x})$ \pagebreak +$U_j$ +\pagebreak + $u_h \in V_h$ \pagebreak @@ -507,6 +403,9 @@ V_h$ \pagebreak +$V_h$ +\pagebreak + $\varphi_j(\mathbf{x})$ \pagebreak @@ -833,6 +732,107 @@ $\mathbf F$ \pagebreak +$Q_2$ +\pagebreak + +$p$ +\pagebreak + +$(A+k\,B)\,C$ +\pagebreak + +$b-Ax$ +\pagebreak + +$u_h(\mathbf x)= \sum_j U_j \varphi_i(\mathbf x)$ +\pagebreak + /usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_6_1_and_6_2.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_6_1_and_6_2.html 2024-01-30 03:04:27.860676529 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_6_1_and_6_2.html 2024-01-30 03:04:27.864676562 +0000 @@ -695,7 +695,7 @@
  • -

    Improved: The FEValuesViews objects that one gets when writing things like fe_values[velocities] (see Handling vector valued problems) have become a lot smarter. They now compute a significant amount of data at creation time, rather than on the fly. This means that creating such objects becomes more expensive but using them is cheaper. To offset this cost, FEValuesBase objects now create all possible FEValuesViews objects at creation time, rather than whenever you do things like fe_values[velocities], and simply return a reference to a pre-generated object. This turns an $O(N)$ effort into an $O(1)$ effort, where $N$ is the number of cells.
    +

    Improved: The FEValuesViews objects that one gets when writing things like fe_values[velocities] (see Handling vector valued problems) have become a lot smarter. They now compute a significant amount of data at creation time, rather than on the fly. This means that creating such objects becomes more expensive but using them is cheaper. To offset this cost, FEValuesBase objects now create all possible FEValuesViews objects at creation time, rather than whenever you do things like fe_values[velocities], and simply return a reference to a pre-generated object. This turns an $O(N)$ effort into an $O(1)$ effort, where $N$ is the number of cells.
    (WB 2008/12/10)

    /usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_6_2_and_6_3.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_6_2_and_6_3.html 2024-01-30 03:04:27.892676795 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_6_2_and_6_3.html 2024-01-30 03:04:27.892676795 +0000 @@ -366,7 +366,7 @@
  • -

    New: The GeometryInfo::d_linear_shape_function and GeometryInfo::d_linear_shape_function_gradient functions can be used to represent the $d$-linear shape functions that are frequently used to map the reference cell to real cells (though the Mapping class hierarchy also allows to use higher order mappings).
    +

    New: The GeometryInfo::d_linear_shape_function and GeometryInfo::d_linear_shape_function_gradient functions can be used to represent the $d$-linear shape functions that are frequently used to map the reference cell to real cells (though the Mapping class hierarchy also allows to use higher order mappings).
    (WB 2009/06/28)

    @@ -501,7 +501,7 @@
  • -

    New: There are new functions FullMatrix::cholesky and FullMatrix::outer_product. FullMatrix::cholesky finds the Cholesky decomposition of a matrix in lower triangular form. FullMatrix::outer_product calculates *this $= VW^T$ where $V$ and $W$ are vectors.
    +

    New: There are new functions FullMatrix::cholesky and FullMatrix::outer_product. FullMatrix::cholesky finds the Cholesky decomposition of a matrix in lower triangular form. FullMatrix::outer_product calculates *this $= VW^T$ where $V$ and $W$ are vectors.
    (Jean Marie Linhart 2009/07/27)

    /usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_8_1_and_8_2.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_8_1_and_8_2.html 2024-01-30 03:04:27.924677062 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_8_1_and_8_2.html 2024-01-30 03:04:27.924677062 +0000 @@ -839,7 +839,7 @@

  • -

    New: There is now a new class Functions::InterpolatedTensorProductGridData that can be used to (bi-/tri-)linearly interpolate data given on a tensor product mesh of $x$ (and $y$ and $z$) values, for example to evaluate experimentally determined coefficients, or to assess the accuracy of a solution by comparing with a solution generated by a different code and written in gridded data. There is also a new class Functions::InterpolatedUniformGridData that can perform the same task more efficiently if the data is stored on meshes that are uniform in each coordinate direction.
    +

    New: There is now a new class Functions::InterpolatedTensorProductGridData that can be used to (bi-/tri-)linearly interpolate data given on a tensor product mesh of $x$ (and $y$ and $z$) values, for example to evaluate experimentally determined coefficients, or to assess the accuracy of a solution by comparing with a solution generated by a different code and written in gridded data. There is also a new class Functions::InterpolatedUniformGridData that can perform the same task more efficiently if the data is stored on meshes that are uniform in each coordinate direction.
    (Wolfgang Bangerth, 2013/12/20)

  • /usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_8_5_0_and_9_0_0.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_8_5_0_and_9_0_0.html 2024-01-30 03:04:27.972677462 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_8_5_0_and_9_0_0.html 2024-01-30 03:04:27.976677496 +0000 @@ -101,7 +101,7 @@

  • -

    New: The implementation of the divergence in FEValuesExtractors::Tensor was changed so that the gradient operator is contracted from the right. This is done in order to make it consistent with gradient of the second order tensor, namely $Grad(T) : I = Div(T)$.
    +

    New: The implementation of the divergence in FEValuesExtractors::Tensor was changed so that the gradient operator is contracted from the right. This is done in order to make it consistent with gradient of the second order tensor, namely $Grad(T) : I = Div(T)$.
    (Denis Davydov, 2018/04/12)

  • /usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_9_1_1_and_9_2_0.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_9_1_1_and_9_2_0.html 2024-01-30 03:04:28.016677829 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_9_1_1_and_9_2_0.html 2024-01-30 03:04:28.016677829 +0000 @@ -608,7 +608,7 @@

  • -

    Improved: GridGenerator::hyper_shell() in 3d now supports more n_cells options. While previously only 6, 12, or 96 cells were possible, the function now supports any number of the kind $6 \times 2^m$ with $m$ a non-negative integer. The new cases $m=2,3$ and $m\geq 5$ correspond to refinement in the azimuthal direction of the 6 or 12 cell case with a single mesh layer in radial direction, and are intended for shells that are thin and should be given more resolution in azimuthal direction.
    +

    Improved: GridGenerator::hyper_shell() in 3d now supports more n_cells options. While previously only 6, 12, or 96 cells were possible, the function now supports any number of the kind $6 \times 2^m$ with $m$ a non-negative integer. The new cases $m=2,3$ and $m\geq 5$ correspond to refinement in the azimuthal direction of the 6 or 12 cell case with a single mesh layer in radial direction, and are intended for shells that are thin and should be given more resolution in azimuthal direction.
    (Martin Kronbichler, 2020/04/07)

  • @@ -878,7 +878,7 @@

  • -

    Improved: GridTools::delete_duplicated_vertices() now runs, for cubelike geometries, in $O(n^{3/2})$ time in 2D and $O(n^(5/3))$ time in 3D instead of $O(n^2)$ time.
    +

    Improved: GridTools::delete_duplicated_vertices() now runs, for cubelike geometries, in $O(n^{3/2})$ time in 2D and $O(n^(5/3))$ time in 3D instead of $O(n^2)$ time.
    (David Wells, 2020/01/12)

  • @@ -1013,7 +1013,7 @@

  • -

    Improved: The setup of MappingQGeneric::InternalData within the constructor of FEValues would previously unconditionally allocate memory for all shape functions and all quadrature points, also for the case where we use the tensor product and the full interpolation is unnecessary. This has been fixed, improving the situation for very high orders and numbers of quadrature points (e.g., avoiding 400 MB of memory for mapping degrees of 15 with $16^3$ quadrature points in 3D).
    +

    Improved: The setup of MappingQGeneric::InternalData within the constructor of FEValues would previously unconditionally allocate memory for all shape functions and all quadrature points, also for the case where we use the tensor product and the full interpolation is unnecessary. This has been fixed, improving the situation for very high orders and numbers of quadrature points (e.g., avoiding 400 MB of memory for mapping degrees of 15 with $16^3$ quadrature points in 3D).
    (Martin Kronbichler, 2019/11/26)

  • @@ -1562,7 +1562,7 @@

  • -

    Improved: The additional roots of the HermiteLikeInterpolation with degree $p$ greater than four have been switched to the roots of the Jacobi polynomial $P^{(4,4)}_{p-3}$, making the interior bubble functions $L_2$ orthogonal and improving the conditioning of interpolation slightly.
    +

    Improved: The additional roots of the HermiteLikeInterpolation with degree $p$ greater than four have been switched to the roots of the Jacobi polynomial $P^{(4,4)}_{p-3}$, making the interior bubble functions $L_2$ orthogonal and improving the conditioning of interpolation slightly.
    (Martin Kronbichler, 2019/07/12)

  • /usr/share/doc/packages/dealii/doxygen/deal.II/classAffineConstraints.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classAffineConstraints.html 2024-01-30 03:04:28.084678395 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classAffineConstraints.html 2024-01-30 03:04:28.084678395 +0000 @@ -358,9 +358,9 @@

    The algorithms used in the implementation of this class are described in some detail in the hp-paper. There is also a significant amount of documentation on how to use this class in the Constraints on degrees of freedom module.

    Description of constraints

    Each "line" in objects of this class corresponds to one constrained degree of freedom, with the number of the line being i, entered by using add_line() or add_lines(). The entries in this line are pairs of the form (j,aij), which are added by add_entry() or add_entries(). The organization is essentially a SparsityPattern, but with only a few lines containing nonzero elements, and therefore no data wasted on the others. For each line, which has been added by the mechanism above, an elimination of the constrained degree of freedom of the form

    -\[
+<picture><source srcset=\[
  x_i = \sum_j a_{ij} x_j + b_i
-\] +\]" src="form_1577.png"/>

    is performed, where bi is optional and set by set_inhomogeneity(). Thus, if a constraint is formulated for instance as a zero mean value of several degrees of freedom, one of the degrees has to be chosen to be eliminated.

    Note that the constraints are linear in the xi, and that there might be a constant (non-homogeneous) term in the constraint. This is exactly the form we need for hanging node constraints, where we need to constrain one degree of freedom in terms of others. There are other conditions of this form possible, for example for implementing mean value conditions as is done in the step-11 tutorial program. The name of the class stems from the fact that these constraints can be represented in matrix form as X x = b, and this object then describes the matrix X and the vector b. The most frequent way to create/fill objects of this type is using the DoFTools::make_hanging_node_constraints() function. The use of these objects is first explained in step-6.

    @@ -914,13 +914,13 @@
    -

    Add an entry to a given line. In other words, this function adds a term $a_{ij} x_j$ to the constraints for the $i$th degree of freedom.

    +

    Add an entry to a given line. In other words, this function adds a term $a_{ij} x_j$ to the constraints for the $i$th degree of freedom.

    If an entry with the same indices as the one this function call denotes already exists, then this function simply returns provided that the value of the entry is the same. Thus, it does no harm to enter a constraint twice.

    Parameters
    - - - + + +
    [in]constrained_dof_indexThe index $i$ of the degree of freedom that is being constrained.
    [in]columnThe index $j$ of the degree of freedom being entered into the constraint for degree of freedom $i$.
    [in]weightThe factor $a_{ij}$ that multiplies $x_j$.
    [in]constrained_dof_indexThe index $i$ of the degree of freedom that is being constrained.
    [in]columnThe index $j$ of the degree of freedom being entered into the constraint for degree of freedom $i$.
    [in]weightThe factor $a_{ij}$ that multiplies $x_j$.
    @@ -981,11 +981,11 @@
    -

    Set an inhomogeneity to the constraint for a degree of freedom. In other words, it adds a constant $b_i$ to the constraint for degree of freedom $i$. For this to work, you need to call add_line() first for the given degree of freedom.

    +

    Set an inhomogeneity to the constraint for a degree of freedom. In other words, it adds a constant $b_i$ to the constraint for degree of freedom $i$. For this to work, you need to call add_line() first for the given degree of freedom.

    Parameters
    - - + +
    [in]constrained_dof_indexThe index $i$ of the degree of freedom that is being constrained.
    [in]valueThe right hand side value $b_i$ for the constraint on the degree of freedom $i$.
    [in]constrained_dof_indexThe index $i$ of the degree of freedom that is being constrained.
    [in]valueThe right hand side value $b_i$ for the constraint on the degree of freedom $i$.
    @@ -1013,9 +1013,9 @@

    Close the filling of entries. Since the lines of a matrix of this type are usually filled in an arbitrary order and since we do not want to use associative constrainers to store the lines, we need to sort the lines and within the lines the columns before usage of the matrix. This is done through this function.

    Also, zero entries are discarded, since they are not needed.

    After closing, no more entries are accepted. If the object was already closed, then this function returns immediately.

    -

    This function also resolves chains of constraints. For example, degree of freedom 13 may be constrained to $u_{13} = \frac{u_3}{2} + \frac{u_7}{2}$ while degree of freedom 7 is itself constrained as $u_{7} = \frac{u_2}{2}
-+ \frac{u_4}{2}$. Then, the resolution will be that $u_{13} =
-\frac{u_3}{2} + \frac{u_2}{4} + \frac{u_4}{4}$. Note, however, that cycles in this graph of constraints are not allowed, i.e., for example $u_4$ may not itself be constrained, directly or indirectly, to $u_{13}$ again.

    +

    This function also resolves chains of constraints. For example, degree of freedom 13 may be constrained to $u_{13} = \frac{u_3}{2} + \frac{u_7}{2}$ while degree of freedom 7 is itself constrained as $u_{7} = \frac{u_2}{2}
++ \frac{u_4}{2}$. Then, the resolution will be that $u_{13} =
+\frac{u_3}{2} + \frac{u_2}{4} + \frac{u_4}{4}$. Note, however, that cycles in this graph of constraints are not allowed, i.e., for example $u_4$ may not itself be constrained, directly or indirectly, to $u_{13}$ again.

    @@ -1445,9 +1445,9 @@

    Print the constraints represented by the current object to the given stream.

    For each constraint of the form

    -\[
+<picture><source srcset=\[
  x_{42} = 0.5 x_2 + 0.25 x_{14} + 2.75
-\] +\]" src="form_1586.png"/>

    this function will write a sequence of lines that look like this:

    42 2 : 0.5
    42 14 : 0.25
    @@ -2025,7 +2025,7 @@

    This function takes a matrix of local contributions (local_matrix) corresponding to the degrees of freedom indices given in local_dof_indices and distributes them to the global matrix. In other words, this function implements a scatter operation. In most cases, these local contributions will be the result of an integration over a cell or face of a cell. However, as long as local_matrix and local_dof_indices have the same number of elements, this function is happy with whatever it is given.

    In contrast to the similar function in the DoFAccessor class, this function also takes care of constraints, i.e. if one of the elements of local_dof_indices belongs to a constrained node, then rather than writing the corresponding element of local_matrix into global_matrix, the element is distributed to the entries in the global matrix to which this particular degree of freedom is constrained.

    -

    With this scheme, we never write into rows or columns of constrained degrees of freedom. In order to make sure that the resulting matrix can still be inverted, we need to do something with the diagonal elements corresponding to constrained nodes. Thus, if a degree of freedom in local_dof_indices is constrained, we distribute the corresponding entries in the matrix, but also add the absolute value of the diagonal entry of the local matrix to the corresponding entry in the global matrix. Assuming the discretized operator is positive definite, this guarantees that the diagonal entry is always non-zero, positive, and of the same order of magnitude as the other entries of the matrix. On the other hand, when solving a source problem $Au=f$ the exact value of the diagonal element is not important, since the value of the respective degree of freedom will be overwritten by the distribute() call later on anyway.

    +

    With this scheme, we never write into rows or columns of constrained degrees of freedom. In order to make sure that the resulting matrix can still be inverted, we need to do something with the diagonal elements corresponding to constrained nodes. Thus, if a degree of freedom in local_dof_indices is constrained, we distribute the corresponding entries in the matrix, but also add the absolute value of the diagonal entry of the local matrix to the corresponding entry in the global matrix. Assuming the discretized operator is positive definite, this guarantees that the diagonal entry is always non-zero, positive, and of the same order of magnitude as the other entries of the matrix. On the other hand, when solving a source problem $Au=f$ the exact value of the diagonal element is not important, since the value of the respective degree of freedom will be overwritten by the distribute() call later on anyway.

    Note
    The procedure described above adds an unforeseeable number of artificial eigenvalues to the spectrum of the matrix. Therefore, it is recommended to use the equivalent function with two local index vectors in such a case.

    By using this function to distribute local contributions to the global object, one saves the call to the condense function after the vectors and matrices are fully assembled.

    Note
    This function in itself is thread-safe, i.e., it works properly also when several threads call it simultaneously. However, the function call is only thread-safe if the underlying global matrix allows for simultaneous access and the access is not to rows with the same global index at the same time. This needs to be made sure from the caller's site. There is no locking mechanism inside this method to prevent data races.
    @@ -2067,7 +2067,7 @@

    This function does almost the same as the function above but can treat general rectangular matrices. The main difference to achieve this is that the diagonal entries in constrained rows are left untouched instead of being filled with arbitrary values.

    -

    Since the diagonal entries corresponding to eliminated degrees of freedom are not set, the result may have a zero eigenvalue, if applied to a square matrix. This has to be considered when solving the resulting problems. For solving a source problem $Au=f$, it is possible to set the diagonal entry after building the matrix by a piece of code of the form

    +

    Since the diagonal entries corresponding to eliminated degrees of freedom are not set, the result may have a zero eigenvalue, if applied to a square matrix. This has to be considered when solving the resulting problems. For solving a source problem $Au=f$, it is possible to set the diagonal entry after building the matrix by a piece of code of the form

    for (unsigned int i=0;i<matrix.m();++i)
    if (constraints.is_constrained(i))
    matrix.diag_element(i) = 1.;
    @@ -2356,7 +2356,7 @@
    -

    Given a vector, set all constrained degrees of freedom to values so that the constraints are satisfied. For example, if the current object stores the constraint $x_3=\frac 12 x_1 + \frac 12 x_2$, then this function will read the values of $x_1$ and $x_2$ from the given vector and set the element $x_3$ according to this constraints. Similarly, if the current object stores the constraint $x_{42}=208$, then this function will set the 42nd element of the given vector to 208.

    +

    Given a vector, set all constrained degrees of freedom to values so that the constraints are satisfied. For example, if the current object stores the constraint $x_3=\frac 12 x_1 + \frac 12 x_2$, then this function will read the values of $x_1$ and $x_2$ from the given vector and set the element $x_3$ according to this constraints. Similarly, if the current object stores the constraint $x_{42}=208$, then this function will set the 42nd element of the given vector to 208.

    Note
    If this function is called with a parallel vector vec, then the vector must not contain ghost elements.
    /usr/share/doc/packages/dealii/doxygen/deal.II/classAlgorithms_1_1ThetaTimestepping.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classAlgorithms_1_1ThetaTimestepping.html 2024-01-30 03:04:28.124678729 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classAlgorithms_1_1ThetaTimestepping.html 2024-01-30 03:04:28.124678729 +0000 @@ -219,9 +219,9 @@

    For fixed theta, the Crank-Nicolson scheme is the only second order scheme. Nevertheless, further stability may be achieved by choosing theta larger than ½, thereby introducing a first order error term. In order to avoid a loss of convergence order, the adaptive theta scheme can be used, where theta=½+c dt.

    Assume that we want to solve the equation u' + F(u) = 0 with a step size k. A step of the theta scheme can be written as

    -\[
+<picture><source srcset=\[
   M u_{n+1} + \theta k F(u_{n+1})  = M u_n - (1-\theta)k F(u_n).
-\] +\]" src="form_351.png"/>

    Here, M is the mass matrix. We see, that the right hand side amounts to an explicit Euler step with modified step size in weak form (up to inversion of M). The left hand side corresponds to an implicit Euler step with modified step size (right hand side given). Thus, the implementation of the theta scheme will use two Operator objects, one for the explicit, one for the implicit part. Each of these will use its own TimestepData to account for the modified step sizes (and different times if the problem is not autonomous). Note that once the explicit part has been computed, the left hand side actually constitutes a linear or nonlinear system which has to be solved.

    Usage AnyData

    @@ -301,8 +301,8 @@
    }
    size_type n() const
    size_type m() const
    -

    Now we need to study the application of the implicit and explicit operator. We assume that the pointer matrix points to the matrix created in the main program (the constructor did this for us). Here, we first get the time step size from the AnyData object that was provided as input. Then, if we are in the first step or if the timestep has changed, we fill the local matrix $m$, such that with the given matrix $M$, it becomes

    -\[ m = I - \Delta t M. \] +

    Now we need to study the application of the implicit and explicit operator. We assume that the pointer matrix points to the matrix created in the main program (the constructor did this for us). Here, we first get the time step size from the AnyData object that was provided as input. Then, if we are in the first step or if the timestep has changed, we fill the local matrix $m$, such that with the given matrix $M$, it becomes

    +\[ m = I - \Delta t M. \]

    After we have worked off the notifications, we clear them, such that the matrix is only generated when necessary.

    void Explicit::operator()(AnyData &out, const AnyData &in)
    @@ -1142,7 +1142,7 @@

    The operator computing the explicit part of the scheme. This will receive in its input data the value at the current time with name "Current time solution". It should obtain the current time and time step size from explicit_data().

    -

    Its return value is $ Mu+cF(u) $, where $u$ is the current state vector, $M$ the mass matrix, $F$ the operator in space and $c$ is the adjusted time step size $(1-\theta) \Delta t$.

    +

    Its return value is $ Mu+cF(u) $, where $u$ is the current state vector, $M$ the mass matrix, $F$ the operator in space and $c$ is the adjusted time step size $(1-\theta) \Delta t$.

    Definition at line 416 of file theta_timestepping.h.

    @@ -1170,7 +1170,7 @@

    The operator solving the implicit part of the scheme. It will receive in its input data the vector "Previous time". Information on the timestep should be obtained from implicit_data().

    -

    Its return value is the solution u of Mu-cF(u)=f, where f is the dual space vector found in the "Previous time" entry of the input data, M the mass matrix, F the operator in space and c is the adjusted time step size $ \theta \Delta t$

    +

    Its return value is the solution u of Mu-cF(u)=f, where f is the dual space vector found in the "Previous time" entry of the input data, M the mass matrix, F the operator in space and c is the adjusted time step size $ \theta \Delta t$

    Definition at line 428 of file theta_timestepping.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classAnisotropicPolynomials.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classAnisotropicPolynomials.html 2024-01-30 03:04:28.152678962 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classAnisotropicPolynomials.html 2024-01-30 03:04:28.156678995 +0000 @@ -154,10 +154,10 @@

    Detailed Description

    template<int dim>
    class AnisotropicPolynomials< dim >

    Anisotropic tensor product of given polynomials.

    -

    Given one-dimensional polynomials $P^x_1(x), P^x_2(x), \ldots$ in $x$-direction, $P^y_1(y), P^y_2(y), \ldots$ in $y$-direction, and so on, this class generates polynomials of the form $Q_{ijk}(x,y,z)
-= P^x_i(x)P^y_j(y)P^z_k(z)$. (With obvious generalization if dim is in fact only 2. If dim is in fact only 1, then the result is simply the same set of one-dimensional polynomials passed to the constructor.)

    -

    If the elements of each set of base polynomials are mutually orthogonal on the interval $[-1,1]$ or $[0,1]$, then the tensor product polynomials are orthogonal on $[-1,1]^d$ or $[0,1]^d$, respectively.

    -

    The resulting dim-dimensional tensor product polynomials are ordered as follows: We iterate over the $x$ coordinates running fastest, then the $y$ coordinate, etc. For example, for dim==2, the first few polynomials are thus $P^x_1(x)P^y_1(y)$, $P^x_2(x)P^y_1(y)$, $P^x_3(x)P^y_1(y)$, ..., $P^x_1(x)P^y_2(y)$, $P^x_2(x)P^y_2(y)$, $P^x_3(x)P^y_2(y)$, etc.

    +

    Given one-dimensional polynomials $P^x_1(x), P^x_2(x), \ldots$ in $x$-direction, $P^y_1(y), P^y_2(y), \ldots$ in $y$-direction, and so on, this class generates polynomials of the form $Q_{ijk}(x,y,z)
+= P^x_i(x)P^y_j(y)P^z_k(z)$. (With obvious generalization if dim is in fact only 2. If dim is in fact only 1, then the result is simply the same set of one-dimensional polynomials passed to the constructor.)

    +

    If the elements of each set of base polynomials are mutually orthogonal on the interval $[-1,1]$ or $[0,1]$, then the tensor product polynomials are orthogonal on $[-1,1]^d$ or $[0,1]^d$, respectively.

    +

    The resulting dim-dimensional tensor product polynomials are ordered as follows: We iterate over the $x$ coordinates running fastest, then the $y$ coordinate, etc. For example, for dim==2, the first few polynomials are thus $P^x_1(x)P^y_1(y)$, $P^x_2(x)P^y_1(y)$, $P^x_3(x)P^y_1(y)$, ..., $P^x_1(x)P^y_2(y)$, $P^x_2(x)P^y_2(y)$, $P^x_3(x)P^y_2(y)$, etc.

    Definition at line 322 of file tensor_product_polynomials.h.

    Constructor & Destructor Documentation

    @@ -590,7 +590,7 @@
    -

    Each tensor product polynomial $p_i$ is a product of one-dimensional polynomials in each space direction. Compute the indices of these one- dimensional polynomials for each space direction, given the index i.

    +

    Each tensor product polynomial $p_i$ is a product of one-dimensional polynomials in each space direction. Compute the indices of these one- dimensional polynomials for each space direction, given the index i.

    Definition at line 538 of file tensor_product_polynomials.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classArpackSolver.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classArpackSolver.html 2024-01-30 03:04:28.184679228 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classArpackSolver.html 2024-01-30 03:04:28.184679228 +0000 @@ -230,14 +230,14 @@

    Detailed Description

    Interface for using ARPACK. ARPACK is a collection of Fortran77 subroutines designed to solve large scale eigenvalue problems. Here we interface to the routines dnaupd and dneupd of ARPACK. If the operator is specified to be symmetric we use the symmetric interface dsaupd and dseupd of ARPACK instead. The package is designed to compute a few eigenvalues and corresponding eigenvectors of a general n by n matrix A. It is most appropriate for large sparse matrices A.

    -

    In this class we make use of the method applied to the generalized eigenspectrum problem $(A-\lambda B)x=0$, for $x\neq0$; where $A$ is a system matrix, $B$ is a mass matrix, and $\lambda, x$ are a set of eigenvalues and eigenvectors respectively.

    +

    In this class we make use of the method applied to the generalized eigenspectrum problem $(A-\lambda B)x=0$, for $x\neq0$; where $A$ is a system matrix, $B$ is a mass matrix, and $\lambda, x$ are a set of eigenvalues and eigenvectors respectively.

    The ArpackSolver can be used in application codes with serial objects in the following way:

    solver.solve(A, B, OP, lambda, x, size_of_spectrum);
    SolverControl & solver_control
    -

    for the generalized eigenvalue problem $Ax=B\lambda x$, where the variable size_of_spectrum tells ARPACK the number of eigenvector/eigenvalue pairs to solve for. Here, lambda is a vector that will contain the eigenvalues computed, x a vector that will contain the eigenvectors computed, and OP is an inverse operation for the matrix A. Shift and invert transformation around zero is applied.

    +

    for the generalized eigenvalue problem $Ax=B\lambda x$, where the variable size_of_spectrum tells ARPACK the number of eigenvector/eigenvalue pairs to solve for. Here, lambda is a vector that will contain the eigenvalues computed, x a vector that will contain the eigenvectors computed, and OP is an inverse operation for the matrix A. Shift and invert transformation around zero is applied.

    Through the AdditionalData the user can specify some of the parameters to be set.

    For further information on how the ARPACK routines dsaupd, dseupd, dnaupd and dneupd work and also how to set the parameters appropriately please take a look into the ARPACK manual.

    Note
    Whenever you eliminate degrees of freedom using AffineConstraints, you generate spurious eigenvalues and eigenvectors. If you make sure that the diagonals of eliminated matrix rows are all equal to one, you get a single additional eigenvalue. But beware that some functions in deal.II set these diagonals to rather arbitrary (from the point of view of eigenvalue problems) values. See also step-36 for an example.
    @@ -510,7 +510,7 @@
    -

    Solve the generalized eigensprectrum problem $A x=\lambda B x$ by calling the dsaupd and dseupd or dnaupd and dneupd functions of ARPACK.

    +

    Solve the generalized eigensprectrum problem $A x=\lambda B x$ by calling the dsaupd and dseupd or dnaupd and dneupd functions of ARPACK.

    The function returns a vector of eigenvalues of length n and a vector of eigenvectors of length n in the symmetric case and of length n+1 in the non-symmetric case. In the symmetric case all eigenvectors are real. In the non-symmetric case complex eigenvalues always occur as complex conjugate pairs. Therefore the eigenvector for an eigenvalue with nonzero complex part is stored by putting the real and the imaginary parts in consecutive real-valued vectors. The eigenvector of the complex conjugate eigenvalue does not need to be stored, since it is just the complex conjugate of the stored eigenvector. Thus, if the last n-th eigenvalue has a nonzero imaginary part, Arpack needs in total n+1 real-valued vectors to store real and imaginary parts of the eigenvectors.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classArrayView.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classArrayView.html 2024-01-30 03:04:28.236679662 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classArrayView.html 2024-01-30 03:04:28.236679662 +0000 @@ -1025,7 +1025,7 @@
    -

    Return a reference to the $i$th element of the range represented by the current object.

    +

    Return a reference to the $i$th element of the range represented by the current object.

    This function is marked as const because it does not change the view object. It may however return a reference to a non-const memory location depending on whether the template type of the class is const or not.

    This function is only allowed to be called if the underlying data is indeed stored in CPU memory.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classAutoDerivativeFunction.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classAutoDerivativeFunction.html 2024-01-30 03:04:28.280680028 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classAutoDerivativeFunction.html 2024-01-30 03:04:28.280680028 +0000 @@ -353,27 +353,27 @@

    Names of difference formulas.

    Enumerator
    Euler 

    The symmetric Euler formula of second order:

    -\[
+<picture><source srcset=\[
 u'(t) \approx
 \frac{u(t+h) -
 u(t-h)}{2h}.
-\] +\]" src="form_359.png"/>

    UpwindEuler 

    The upwind Euler formula of first order:

    -\[
+<picture><source srcset=\[
 u'(t) \approx
 \frac{u(t) -
 u(t-h)}{h}.
-\] +\]" src="form_360.png"/>

    FourthOrder 

    The fourth order scheme

    -\[
+<picture><source srcset=\[
 u'(t) \approx
 \frac{u(t-2h) - 8u(t-h)
 +  8u(t+h) - u(t+2h)}{12h}.
-\] +\]" src="form_361.png"/>

    /usr/share/doc/packages/dealii/doxygen/deal.II/classBarycentricPolynomial.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classBarycentricPolynomial.html 2024-01-30 03:04:28.308680262 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classBarycentricPolynomial.html 2024-01-30 03:04:28.308680262 +0000 @@ -149,36 +149,36 @@ class BarycentricPolynomial< dim, Number >

    Polynomial implemented in barycentric coordinates.

    Barycentric coordinates are a coordinate system defined on simplices that are particularly easy to work with since they express coordinates in the simplex as convex combinations of the vertices. For example, any point in a triangle can be written as

    -\[
+<picture><source srcset=\[
   (x, y) = c_0 (x_0, y_0) + c_1 (x_1, y_1) + c_2 (x_2, y_2).
-\] +\]" src="form_626.png"/>

    -

    where each value $c_i$ is the relative weight of each vertex (so the centroid is, in 2d, where each $c_i = 1/3$). Since we only consider convex combinations we can rewrite this equation as

    +

    where each value $c_i$ is the relative weight of each vertex (so the centroid is, in 2d, where each $c_i = 1/3$). Since we only consider convex combinations we can rewrite this equation as

    -\[
+<picture><source srcset=\[
   (x, y) = (1 - c_1 - c_2) (x_0, y_0) + c_1 (x_1, y_1) + c_2 (x_2, y_2).
-\] +\]" src="form_628.png"/>

    -

    This results in three polynomials that are equivalent to $P^1$ in 2d. More exactly, this class implements a polynomial space defined with the basis, in 2d, of

    -\begin{align*}
+<p>This results in three polynomials that are equivalent to <picture><source srcset=$P^1$ in 2d. More exactly, this class implements a polynomial space defined with the basis, in 2d, of

    +\begin{align*}
 t_0(x, y) &= 1 - x - y \\
 t_1(x, y) &= x \\
 t_2(x, y) &= y
-\end{align*} +\end{align*}" src="form_630.png"/>

    and, in 3d,

    -\begin{align*}
+<picture><source srcset=\begin{align*}
 t_0(x, y) &= 1 - x - y - z \\
 t_1(x, y) &= x             \\
 t_2(x, y) &= y             \\
 t_2(x, y) &= z
-\end{align*} +\end{align*}" src="form_631.png"/>

    which is, in practice, a very convenient basis for defining simplex polynomials: for example, the fourth basis function of a TRI6 element is

    -\[
+<picture><source srcset=\[
 4 * t_1(x, y) * t_2(x, y).
-\] +\]" src="form_632.png"/>

    Barycentric polynomials in dim-dimensional space have dim + 1 variables in since t_0 can be written in terms of the other monomials.

    Monomials can be conveniently constructed with BarycentricPolynomial::monomial().

    /usr/share/doc/packages/dealii/doxygen/deal.II/classBaseQR.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classBaseQR.html 2024-01-30 03:04:28.332680461 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classBaseQR.html 2024-01-30 03:04:28.332680461 +0000 @@ -156,8 +156,8 @@

    Detailed Description

    template<typename VectorType>
    class BaseQR< VectorType >

    A base class for thin QR implementations.

    -

    This class and classes derived from it are meant to build $Q$ and $R$ matrices one row/column at a time, i.e., by growing $R$ matrix from an empty $0\times 0$ matrix to $N\times N$, where $N$ is the number of added column vectors.

    -

    As a consequence, matrices which have the same number of rows as each vector (i.e. $Q$ matrix) is stored as a collection of vectors of VectorType.

    +

    This class and classes derived from it are meant to build $Q$ and $R$ matrices one row/column at a time, i.e., by growing $R$ matrix from an empty $0\times 0$ matrix to $N\times N$, where $N$ is the number of added column vectors.

    +

    As a consequence, matrices which have the same number of rows as each vector (i.e. $Q$ matrix) is stored as a collection of vectors of VectorType.

    Definition at line 44 of file qr.h.

    Member Typedef Documentation

    @@ -368,7 +368,7 @@ const bool transpose = false&#href_anchor"memdoc"> -

    Solve $Rx=y$. Vectors x and y should be consistent with the current size of the subspace. If transpose is true, $R^Tx=y$ is solved instead.

    +

    Solve $Rx=y$. Vectors x and y should be consistent with the current size of the subspace. If transpose is true, $R^Tx=y$ is solved instead.

    @@ -400,7 +400,7 @@
    -

    Set $y = Qx$. The size of $x$ should be consistent with the size of the R matrix.

    +

    Set $y = Qx$. The size of $x$ should be consistent with the size of the R matrix.

    Implemented in QR< VectorType >, and ImplicitQR< VectorType >.

    @@ -434,7 +434,7 @@
    -

    Set $y = Q^Tx$. The size of $x$ should be consistent with the size of column vectors.

    +

    Set $y = Q^Tx$. The size of $x$ should be consistent with the size of column vectors.

    Implemented in QR< VectorType >, and ImplicitQR< VectorType >.

    @@ -468,7 +468,7 @@
    -

    Set $y = QRx$. The size of $x$ should be consistent with the size of the R matrix.

    +

    Set $y = QRx$. The size of $x$ should be consistent with the size of the R matrix.

    Implemented in QR< VectorType >, and ImplicitQR< VectorType >.

    @@ -502,7 +502,7 @@
    -

    Set $y = R^T Q^Tx$. The size of $x$ should be consistent with the size of column vectors.

    +

    Set $y = R^T Q^Tx$. The size of $x$ should be consistent with the size of column vectors.

    Implemented in QR< VectorType >, and ImplicitQR< VectorType >.

    @@ -557,7 +557,7 @@
    -

    Compute $y=Hx$ where $H$ is the matrix formed by the column vectors stored by this object.

    +

    Compute $y=Hx$ where $H$ is the matrix formed by the column vectors stored by this object.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classBlockIndices.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classBlockIndices.html 2024-01-30 03:04:28.364680728 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classBlockIndices.html 2024-01-30 03:04:28.364680728 +0000 @@ -210,7 +210,7 @@ void swap (BlockIndices &u, BlockIndices &v) &#href_anchor"details" id="details">

    Detailed Description

    -

    BlockIndices represents a range of indices (such as the range $[0,N)$ of valid indices for elements of a vector) and how this one range is broken down into smaller but contiguous "blocks" (such as the velocity and pressure parts of a solution vector). In particular, it provides the ability to translate between global indices and the indices within a block. This class is used, for example, in the BlockVector, BlockSparsityPattern, and BlockMatrixBase classes.

    +

    BlockIndices represents a range of indices (such as the range $[0,N)$ of valid indices for elements of a vector) and how this one range is broken down into smaller but contiguous "blocks" (such as the velocity and pressure parts of a solution vector). In particular, it provides the ability to translate between global indices and the indices within a block. This class is used, for example, in the BlockVector, BlockSparsityPattern, and BlockMatrixBase classes.

    The information that can be obtained from this class falls into two groups. First, it is possible to query the global size of the index space (through the total_size() member function), and the number of blocks and their sizes (via size() and the block_size() functions).

    Secondly, this class manages the conversion of global indices to the local indices within this block, and the other way around. This is required, for example, when you address a global element in a block vector and want to know within which block this is, and which index within this block it corresponds to. It is also useful if a matrix is composed of several blocks, where you have to translate global row and column indices to local ones.

    See also
    Block (linear algebra)
    /usr/share/doc/packages/dealii/doxygen/deal.II/classBlockLinearOperator.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classBlockLinearOperator.html 2024-01-30 03:04:28.416681162 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classBlockLinearOperator.html 2024-01-30 03:04:28.416681162 +0000 @@ -276,7 +276,7 @@
    LinearOperator< Range, Domain, BlockPayload::BlockType > linear_operator(const Matrix &matrix)
    BlockLinearOperator< Range, Domain, BlockPayload > block_diagonal_operator(const BlockMatrixType &block_matrix)

    A BlockLinearOperator can be sliced to a LinearOperator at any time. This removes all information about the underlying block structure (because above std::function objects are no longer available) - the linear operator interface, however, remains intact.

    -
    Note
    This class makes heavy use of std::function objects and lambda functions. This flexibility comes with a run-time penalty. Only use this object to encapsulate object with medium to large individual block sizes, and small block structure (as a rule of thumb, matrix blocks greater than $1000\times1000$).
    +
    Note
    This class makes heavy use of std::function objects and lambda functions. This flexibility comes with a run-time penalty. Only use this object to encapsulate object with medium to large individual block sizes, and small block structure (as a rule of thumb, matrix blocks greater than $1000\times1000$).

    Definition at line 166 of file block_linear_operator.h.

    Member Typedef Documentation

    @@ -787,9 +787,9 @@
    LinearOperator< Range, Domain, BlockPayload::BlockType > distribute_constraints_linear_operator(const AffineConstraints< typename Range::value_type > &constraints, const LinearOperator< Range, Domain, BlockPayload::BlockType > &exemplar)

    and Id_c is the projection to the subspace consisting of all vector entries associated with constrained degrees of freedom.

    This LinearOperator object is used together with constrained_right_hand_side() to build up the following modified system of linear equations:

    -\[
+<picture><source srcset=\[
   (C^T A C + Id_c) x = C^T (b - A\,k)
-\] +\]" src="form_1616.png"/>

    with a given (unconstrained) system matrix $A$, right hand side $b$, and linear constraints $C$ with inhomogeneities $k$.

    A detailed explanation of this approach is given in the Constraints on degrees of freedom module.

    @@ -830,9 +830,9 @@

    with

    This LinearOperator object is used together with constrained_right_hand_side() to build up the following modified system of linear equations:

    -\[
+<picture><source srcset=\[
   (C^T A C + Id_c) x = C^T (b - A\,k)
-\] +\]" src="form_1616.png"/>

    with a given (unconstrained) system matrix $A$, right hand side $b$, and linear constraints $C$ with inhomogeneities $k$.

    A detailed explanation of this approach is given in the Constraints on degrees of freedom module.

    @@ -864,8 +864,8 @@
    -

    Addition of two linear operators first_op and second_op given by $(\mathrm{first\_op}+\mathrm{second\_op})x \dealcoloneq \mathrm{first\_op}(x)
-+ \mathrm{second\_op}(x)$

    +

    Addition of two linear operators first_op and second_op given by $(\mathrm{first\_op}+\mathrm{second\_op})x \dealcoloneq \mathrm{first\_op}(x)
++ \mathrm{second\_op}(x)$

    Definition at line 390 of file linear_operator.h.

    @@ -893,8 +893,8 @@
    -

    Subtraction of two linear operators first_op and second_op given by $(\mathrm{first\_op}-\mathrm{second\_op})x \dealcoloneq \mathrm{first\_op}(x)
-- \mathrm{second\_op}(x)$

    +

    Subtraction of two linear operators first_op and second_op given by $(\mathrm{first\_op}-\mathrm{second\_op})x \dealcoloneq \mathrm{first\_op}(x)
+- \mathrm{second\_op}(x)$

    Definition at line 449 of file linear_operator.h.

    @@ -1530,7 +1530,7 @@

    Return a LinearOperator that performs the operations associated with the Schur complement. There are two additional helper functions, condense_schur_rhs() and postprocess_schur_solution(), that are likely necessary to be used in order to perform any useful tasks in linear algebra with this operator.

    We construct the definition of the Schur complement in the following way:

    Consider a general system of linear equations that can be decomposed into two major sets of equations:

    -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
 \mathbf{K}\mathbf{d} = \mathbf{f}
 \quad \Rightarrow\quad
 \left(\begin{array}{cc}
@@ -1543,60 +1543,60 @@
 \left(\begin{array}{cc}
    f \\ g
 \end{array}\right),
-\end{eqnarray*} +\end{eqnarray*}" src="form_1852.png"/>

    -

    where $ A,B,C,D $ represent general subblocks of the matrix $ \mathbf{K} $ and, similarly, general subvectors of $ \mathbf{d},\mathbf{f} $ are given by $ x,y,f,g $ .

    +

    where $ A,B,C,D $ represent general subblocks of the matrix $ \mathbf{K} $ and, similarly, general subvectors of $ \mathbf{d},\mathbf{f} $ are given by $ x,y,f,g $ .

    This is equivalent to the following two statements:

    -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   (1) \quad Ax + By &=& f \\
   (2) \quad Cx + Dy &=& g \quad .
-\end{eqnarray*} +\end{eqnarray*}" src="form_1857.png"/>

    -

    Assuming that $ A,D $ are both square and invertible, we could then perform one of two possible substitutions,

    -\begin{eqnarray*}
+<p>Assuming that <picture><source srcset=$ A,D $ are both square and invertible, we could then perform one of two possible substitutions,

    +\begin{eqnarray*}
   (3) \quad x &=& A^{-1}(f - By) \quad \text{from} \quad (1) \\
   (4) \quad y &=& D^{-1}(g - Cx) \quad \text{from} \quad (2) ,
-\end{eqnarray*} +\end{eqnarray*}" src="form_1859.png"/>

    which amount to performing block Gaussian elimination on this system of equations.

    For the purpose of the current implementation, we choose to substitute (3) into (2)

    -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   C \: A^{-1}(f - By) + Dy &=& g \\
   -C \: A^{-1} \: By + Dy &=& g - C \: A^{-1} \: f \quad .
-\end{eqnarray*} +\end{eqnarray*}" src="form_1860.png"/>

    This leads to the result

    -\[
+<picture><source srcset=\[
   (5) \quad (D - C\: A^{-1} \:B)y  = g - C \: A^{-1} f
       \quad \Rightarrow \quad Sy = g'
-\] +\]" src="form_1861.png"/>

    -

    with $ S = (D - C\: A^{-1} \:B) $ being the Schur complement and the modified right-hand side vector $ g' = g - C \: A^{-1} f $ arising from the condensation step. Note that for this choice of $ S $, submatrix $ D $ need not be invertible and may thus be the null matrix. Ideally $ A $ should be well-conditioned.

    -

    So for any arbitrary vector $ a $, the Schur complement performs the following operation:

    -\[
+<p> with <picture><source srcset=$ S = (D - C\: A^{-1} \:B) $ being the Schur complement and the modified right-hand side vector $ g' = g - C \: A^{-1} f $ arising from the condensation step. Note that for this choice of $ S $, submatrix $ D $ need not be invertible and may thus be the null matrix. Ideally $ A $ should be well-conditioned.

    +

    So for any arbitrary vector $ a $, the Schur complement performs the following operation:

    +\[
   (6) \quad Sa = (D - C \: A^{-1} \: B)a
-\] +\]" src="form_1868.png"/>

    A typical set of steps needed the solve a linear system (1),(2) would be:

    1. Define the inverse matrix A_inv (using inverse_operator()).
    2. -
    3. Define the Schur complement $ S $ (using schur_complement()).
    4. -
    5. Define iterative inverse matrix $ S^{-1} $ such that (6) holds. It is necessary to use a solver with a preconditioner to compute the approximate inverse operation of $ S $ since we never compute $ S $ directly, but rather the result of its operation. To achieve this, one may again use the inverse_operator() in conjunction with the Schur complement that we've just constructed. Observe that the both $ S $ and its preconditioner operate over the same space as $ D $.
    6. +
    7. Define the Schur complement $ S $ (using schur_complement()).
    8. +
    9. Define iterative inverse matrix $ S^{-1} $ such that (6) holds. It is necessary to use a solver with a preconditioner to compute the approximate inverse operation of $ S $ since we never compute $ S $ directly, but rather the result of its operation. To achieve this, one may again use the inverse_operator() in conjunction with the Schur complement that we've just constructed. Observe that the both $ S $ and its preconditioner operate over the same space as $ D $.
    10. Perform pre-processing step on the RHS of (5) using condense_schur_rhs():

      -\[
+<picture><source srcset=\[
      g' = g - C \: A^{-1} \: f
-   \] + \]" src="form_1870.png"/>

    11. -
    12. Solve for $ y $ in (5):

      -\[
+<li>Solve for <picture><source srcset=$ y $ in (5):

      +\[
      y =  S^{-1} g'
-   \] + \]" src="form_1872.png"/>

    13. Perform the post-processing step from (3) using postprocess_schur_solution():

      -\[
+<picture><source srcset=\[
      x =  A^{-1} (f - By)
-   \] + \]" src="form_1873.png"/>

    @@ -1642,10 +1642,10 @@
    LinearOperator< Domain, Range, BlockPayload::BlockType > inverse_operator(const LinearOperator< Range, Domain, BlockPayload::BlockType > &op, Solver &solver, const Preconditioner &preconditioner)
    PackagedOperation< Domain_1 > postprocess_schur_solution(const LinearOperator< Range_1, Domain_1, Payload > &A_inv, const LinearOperator< Range_1, Domain_2, Payload > &B, const Domain_2 &y, const Range_1 &f)
    -

    In the above example, the preconditioner for $ S $ was defined as the preconditioner for $ D $, which is valid since they operate on the same space. However, if $ D $ and $ S $ are too dissimilar, then this may lead to a large number of solver iterations as $ \text{prec}(D) $ is not a good approximation for $ S^{-1} $.

    -

    A better preconditioner in such a case would be one that provides a more representative approximation for $ S^{-1} $. One approach is shown in step-22, where $ D $ is the null matrix and the preconditioner for $ S^{-1}
-$ is derived from the mass matrix over this space.

    -

    From another viewpoint, a similar result can be achieved by first constructing an object that represents an approximation for $ S $ wherein expensive operation, namely $ A^{-1} $, is approximated. Thereafter we construct the approximate inverse operator $ \tilde{S}^{-1} $ which is then used as the preconditioner for computing $ S^{-1} $.

    // Construction of approximate inverse of Schur complement
    +

    In the above example, the preconditioner for $ S $ was defined as the preconditioner for $ D $, which is valid since they operate on the same space. However, if $ D $ and $ S $ are too dissimilar, then this may lead to a large number of solver iterations as $ \text{prec}(D) $ is not a good approximation for $ S^{-1} $.

    +

    A better preconditioner in such a case would be one that provides a more representative approximation for $ S^{-1} $. One approach is shown in step-22, where $ D $ is the null matrix and the preconditioner for $ S^{-1}
+$ is derived from the mass matrix over this space.

    +

    From another viewpoint, a similar result can be achieved by first constructing an object that represents an approximation for $ S $ wherein expensive operation, namely $ A^{-1} $, is approximated. Thereafter we construct the approximate inverse operator $ \tilde{S}^{-1} $ which is then used as the preconditioner for computing $ S^{-1} $.

    // Construction of approximate inverse of Schur complement
    const auto A_inv_approx = linear_operator(preconditioner_A);
    const auto S_approx = schur_complement(A_inv_approx,B,C,D);
    @@ -1668,8 +1668,8 @@
    // Solve for y
    y = S_inv * rhs;
    x = postprocess_schur_solution (A_inv,B,y,f);
    -

    Note that due to the construction of S_inv_approx and subsequently S_inv, there are a pair of nested iterative solvers which could collectively consume a lot of resources. Therefore care should be taken in the choices leading to the construction of the iterative inverse_operators. One might consider the use of a IterationNumberControl (or a similar mechanism) to limit the number of inner solver iterations. This controls the accuracy of the approximate inverse operation $ \tilde{S}^{-1} $ which acts only as the preconditioner for $ S^{-1} $. Furthermore, the preconditioner to $ \tilde{S}^{-1} $, which in this example is $
-\text{prec}(D) $, should ideally be computationally inexpensive.

    +

    Note that due to the construction of S_inv_approx and subsequently S_inv, there are a pair of nested iterative solvers which could collectively consume a lot of resources. Therefore care should be taken in the choices leading to the construction of the iterative inverse_operators. One might consider the use of a IterationNumberControl (or a similar mechanism) to limit the number of inner solver iterations. This controls the accuracy of the approximate inverse operation $ \tilde{S}^{-1} $ which acts only as the preconditioner for $ S^{-1} $. Furthermore, the preconditioner to $ \tilde{S}^{-1} $, which in this example is $
+\text{prec}(D) $, should ideally be computationally inexpensive.

    However, if an iterative solver based on IterationNumberControl is used as a preconditioner then the preconditioning operation is not a linear operation. Here a flexible solver like SolverFGMRES (flexible GMRES) is best employed as an outer solver in order to deal with the variable behavior of the preconditioner. Otherwise the iterative solver can stagnate somewhere near the tolerance of the preconditioner or generally behave erratically. Alternatively, using a ReductionControl would ensure that the preconditioner always solves to the same tolerance, thereby rendering its behavior constant.

    Further examples of this functionality can be found in the test-suite, such as tests/lac/schur_complement_01.cc . The solution of a multi- component problem (namely step-22) using the schur_complement can be found in tests/lac/schur_complement_03.cc .

    See also
    Block (linear algebra)
    @@ -1692,7 +1692,7 @@
    -

    Return the number of blocks in a column (i.e, the number of "block rows", or the number $m$, if interpreted as a $m\times n$ block system).

    +

    Return the number of blocks in a column (i.e, the number of "block rows", or the number $m$, if interpreted as a $m\times n$ block system).

    Definition at line 297 of file block_linear_operator.h.

    @@ -1711,7 +1711,7 @@
    -

    Return the number of blocks in a row (i.e, the number of "block columns", or the number $n$, if interpreted as a $m\times n$ block system).

    +

    Return the number of blocks in a row (i.e, the number of "block columns", or the number $n$, if interpreted as a $m\times n$ block system).

    Definition at line 303 of file block_linear_operator.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classBlockMatrixBase.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classBlockMatrixBase.html 2024-01-30 03:04:28.468681595 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classBlockMatrixBase.html 2024-01-30 03:04:28.468681595 +0000 @@ -728,7 +728,7 @@
    -

    Return the dimension of the codomain (or range) space. Note that the matrix is of dimension $m \times n$.

    +

    Return the dimension of the codomain (or range) space. Note that the matrix is of dimension $m \times n$.

    @@ -748,7 +748,7 @@
    -

    Return the dimension of the domain space. Note that the matrix is of dimension $m \times n$.

    +

    Return the dimension of the domain space. Note that the matrix is of dimension $m \times n$.

    @@ -1296,7 +1296,7 @@ const BlockVectorType & src&#href_anchor"memdoc"> -

    Adding Matrix-vector multiplication. Add $M*src$ on $dst$ with $M$ being this matrix.

    +

    Adding Matrix-vector multiplication. Add $M*src$ on $dst$ with $M$ being this matrix.

    @@ -1385,7 +1385,7 @@ const BlockVectorType & v&#href_anchor"memdoc"> -

    Compute the matrix scalar product $\left(u,Mv\right)$.

    +

    Compute the matrix scalar product $\left(u,Mv\right)$.

    @@ -1744,7 +1744,7 @@
    -

    Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

    +

    Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

    Due to problems with deriving template arguments between the block and non-block versions of the vmult/Tvmult functions, the actual functions are implemented in derived classes, with implementations forwarding the calls to the implementations provided here under a unique name for which template arguments can be derived by the compiler.

    @@ -1868,7 +1868,7 @@
    -

    Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

    +

    Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

    Due to problems with deriving template arguments between the block and non-block versions of the vmult/Tvmult functions, the actual functions are implemented in derived classes, with implementations forwarding the calls to the implementations provided here under a unique name for which template arguments can be derived by the compiler.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classBlockSparseMatrix.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classBlockSparseMatrix.html 2024-01-30 03:04:28.532682128 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classBlockSparseMatrix.html 2024-01-30 03:04:28.532682128 +0000 @@ -941,7 +941,7 @@
    -

    Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

    +

    Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

    Definition at line 396 of file block_sparse_matrix.h.

    @@ -1069,7 +1069,7 @@
    -

    Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

    +

    Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

    Definition at line 440 of file block_sparse_matrix.h.

    @@ -1395,7 +1395,7 @@
    -

    Return the dimension of the codomain (or range) space. Note that the matrix is of dimension $m \times n$.

    +

    Return the dimension of the codomain (or range) space. Note that the matrix is of dimension $m \times n$.

    @@ -1421,7 +1421,7 @@
    -

    Return the dimension of the domain space. Note that the matrix is of dimension $m \times n$.

    +

    Return the dimension of the domain space. Note that the matrix is of dimension $m \times n$.

    @@ -2061,7 +2061,7 @@
    -

    Adding Matrix-vector multiplication. Add $M*src$ on $dst$ with $M$ being this matrix.

    +

    Adding Matrix-vector multiplication. Add $M*src$ on $dst$ with $M$ being this matrix.

    @@ -2166,7 +2166,7 @@
    -

    Compute the matrix scalar product $\left(u,Mv\right)$.

    +

    Compute the matrix scalar product $\left(u,Mv\right)$.

    @@ -2609,7 +2609,7 @@
    -

    Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

    +

    Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

    Due to problems with deriving template arguments between the block and non-block versions of the vmult/Tvmult functions, the actual functions are implemented in derived classes, with implementations forwarding the calls to the implementations provided here under a unique name for which template arguments can be derived by the compiler.

    @@ -2717,7 +2717,7 @@
    -

    Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

    +

    Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

    Due to problems with deriving template arguments between the block and non-block versions of the vmult/Tvmult functions, the actual functions are implemented in derived classes, with implementations forwarding the calls to the implementations provided here under a unique name for which template arguments can be derived by the compiler.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classBlockSparseMatrixEZ.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classBlockSparseMatrixEZ.html 2024-01-30 03:04:28.568682428 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classBlockSparseMatrixEZ.html 2024-01-30 03:04:28.568682428 +0000 @@ -754,7 +754,7 @@ const BlockVector< somenumber > & src&#href_anchor"memdoc"> -

    Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

    +

    Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

    Definition at line 371 of file block_sparse_matrix_ez.h.

    @@ -779,7 +779,7 @@ const BlockVector< somenumber > & src&#href_anchor"memdoc"> -

    Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

    +

    Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

    Definition at line 409 of file block_sparse_matrix_ez.h.

    @@ -804,7 +804,7 @@ const BlockVector< somenumber > & src&#href_anchor"memdoc"> -

    Adding Matrix-vector multiplication. Add $M*src$ on $dst$ with $M$ being this matrix.

    +

    Adding Matrix-vector multiplication. Add $M*src$ on $dst$ with $M$ being this matrix.

    Definition at line 391 of file block_sparse_matrix_ez.h.

    @@ -829,7 +829,7 @@ const BlockVector< somenumber > & src&#href_anchor"memdoc"> -

    Adding Matrix-vector multiplication. Add $M^T*src$ to $dst$ with $M$ being this matrix. This function does the same as vmult_add() but takes the transposed matrix.

    +

    Adding Matrix-vector multiplication. Add $M^T*src$ to $dst$ with $M$ being this matrix. This function does the same as vmult_add() but takes the transposed matrix.

    Definition at line 429 of file block_sparse_matrix_ez.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classBlockVector.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classBlockVector.html 2024-01-30 03:04:28.624682894 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classBlockVector.html 2024-01-30 03:04:28.624682894 +0000 @@ -1768,7 +1768,7 @@
    -

    Return the square of the $l_2$-norm.

    +

    Return the square of the $l_2$-norm.

    @@ -1820,7 +1820,7 @@
    -

    Return the $l_1$-norm of the vector, i.e. the sum of the absolute values.

    +

    Return the $l_1$-norm of the vector, i.e. the sum of the absolute values.

    @@ -1846,7 +1846,7 @@
    -

    Return the $l_2$-norm of the vector, i.e. the square root of the sum of the squares of the elements.

    +

    Return the $l_2$-norm of the vector, i.e. the square root of the sum of the squares of the elements.

    @@ -1872,7 +1872,7 @@
    -

    Return the maximum absolute value of the elements of this vector, which is the $l_\infty$-norm of a vector.

    +

    Return the maximum absolute value of the elements of this vector, which is the $l_\infty$-norm of a vector.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classBlockVectorBase.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classBlockVectorBase.html 2024-01-30 03:04:28.676683327 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classBlockVectorBase.html 2024-01-30 03:04:28.676683327 +0000 @@ -1218,7 +1218,7 @@
    -

    Return the square of the $l_2$-norm.

    +

    Return the square of the $l_2$-norm.

    @@ -1258,7 +1258,7 @@
    -

    Return the $l_1$-norm of the vector, i.e. the sum of the absolute values.

    +

    Return the $l_1$-norm of the vector, i.e. the sum of the absolute values.

    @@ -1278,7 +1278,7 @@
    -

    Return the $l_2$-norm of the vector, i.e. the square root of the sum of the squares of the elements.

    +

    Return the $l_2$-norm of the vector, i.e. the square root of the sum of the squares of the elements.

    @@ -1298,7 +1298,7 @@
    -

    Return the maximum absolute value of the elements of this vector, which is the $l_\infty$-norm of a vector.

    +

    Return the maximum absolute value of the elements of this vector, which is the $l_\infty$-norm of a vector.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classBoundingBox.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classBoundingBox.html 2024-01-30 03:04:28.708683594 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classBoundingBox.html 2024-01-30 03:04:28.708683594 +0000 @@ -166,11 +166,11 @@ &#href_anchor"details" id="details">

    Detailed Description

    template<int spacedim, typename Number = double>
    class BoundingBox< spacedim, Number >

    A class that represents a box of arbitrary dimension spacedim and with sides parallel to the coordinate axes, that is, a region

    -\[
+<picture><source srcset=\[
 [x_0^L, x_0^U] \times ... \times [x_{spacedim-1}^L, x_{spacedim-1}^U],
-\] +\]" src="form_362.png"/>

    -

    where $(x_0^L , ..., x_{spacedim-1}^L)$ and $(x_0^U , ..., x_{spacedim-1}^U)$ denote the two vertices (bottom left and top right) which are used to represent the box. The quantities $x_k^L$ and $x_k^U$ denote the "lower" and "upper" bounds of values that are within the box for each coordinate direction $k$.

    +

    where $(x_0^L , ..., x_{spacedim-1}^L)$ and $(x_0^U , ..., x_{spacedim-1}^U)$ denote the two vertices (bottom left and top right) which are used to represent the box. The quantities $x_k^L$ and $x_k^U$ denote the "lower" and "upper" bounds of values that are within the box for each coordinate direction $k$.

    Geometrically, a bounding box is thus:

    Bounding boxes are, for example, useful in parallel distributed meshes to give a general description of the owners of each portion of the mesh. More generally, bounding boxes are often used to roughly describe a region of space in which an object is contained; if a candidate point is not within the bounding box (a test that is cheap to execute), then it is not necessary to perform an expensive test whether the candidate point is in fact inside the object itself. Bounding boxes are therefore often used as a first, cheap rejection test before more detailed checks. As such, bounding boxes serve many of the same purposes as the convex hull, for which it is also relatively straightforward to compute whether a point is inside or outside, though not quite as cheap as for the bounding box.

    -

    Taking the cross section of a BoundingBox<spacedim> orthogonal to a given direction gives a box in one dimension lower: BoundingBox<spacedim - 1>. In 3d, the 2 coordinates of the cross section of BoundingBox<3> can be ordered in 2 different ways. That is, if we take the cross section orthogonal to the y direction we could either order a 3d-coordinate into a 2d-coordinate as $(x,z)$ or as $(z,x)$. This class uses the second convention, corresponding to the coordinates being ordered cyclicly $x \rightarrow y \rightarrow z \rightarrow x \rightarrow ... $ To be precise, if we take a cross section:

    +

    Taking the cross section of a BoundingBox<spacedim> orthogonal to a given direction gives a box in one dimension lower: BoundingBox<spacedim - 1>. In 3d, the 2 coordinates of the cross section of BoundingBox<3> can be ordered in 2 different ways. That is, if we take the cross section orthogonal to the y direction we could either order a 3d-coordinate into a 2d-coordinate as $(x,z)$ or as $(z,x)$. This class uses the second convention, corresponding to the coordinates being ordered cyclicly $x \rightarrow y \rightarrow z \rightarrow x \rightarrow ... $ To be precise, if we take a cross section:

    /usr/share/doc/packages/dealii/doxygen/deal.II/classCUDAWrappers_1_1PreconditionIC.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classCUDAWrappers_1_1PreconditionIC.html 2024-01-30 03:04:28.736683827 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classCUDAWrappers_1_1PreconditionIC.html 2024-01-30 03:04:28.736683827 +0000 @@ -480,7 +480,7 @@
    Orthogonal to Cross section coordinates ordered as
    -

    cuSPARSE description of the lower triangular matrix $L$.

    +

    cuSPARSE description of the lower triangular matrix $L$.

    Definition at line 176 of file cuda_precondition.h.

    @@ -534,7 +534,7 @@
    -

    Solve and analysis structure for the lower triangular matrix $L$.

    +

    Solve and analysis structure for the lower triangular matrix $L$.

    Definition at line 186 of file cuda_precondition.h.

    @@ -750,7 +750,7 @@
    -

    Determine if level information should be generated for the lower triangular matrix $L$. This value can be modified through an AdditionalData object.

    +

    Determine if level information should be generated for the lower triangular matrix $L$. This value can be modified through an AdditionalData object.

    Definition at line 233 of file cuda_precondition.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classCUDAWrappers_1_1PreconditionILU.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classCUDAWrappers_1_1PreconditionILU.html 2024-01-30 03:04:28.760684028 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classCUDAWrappers_1_1PreconditionILU.html 2024-01-30 03:04:28.760684028 +0000 @@ -482,7 +482,7 @@
    -

    cuSPARSE description of the lower triangular matrix $L$.

    +

    cuSPARSE description of the lower triangular matrix $L$.

    Definition at line 388 of file cuda_precondition.h.

    @@ -563,7 +563,7 @@
    -

    Solve and analysis structure for the lower triangular matrix $L$.

    +

    Solve and analysis structure for the lower triangular matrix $L$.

    Definition at line 403 of file cuda_precondition.h.

    @@ -779,7 +779,7 @@
    -

    Determine if level information should be generated for the lower triangular matrix $L$. This value can be modified through an AdditionalData object.

    +

    Determine if level information should be generated for the lower triangular matrix $L$. This value can be modified through an AdditionalData object.

    Definition at line 450 of file cuda_precondition.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classCUDAWrappers_1_1SparseMatrix.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classCUDAWrappers_1_1SparseMatrix.html 2024-01-30 03:04:28.800684361 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classCUDAWrappers_1_1SparseMatrix.html 2024-01-30 03:04:28.800684361 +0000 @@ -563,7 +563,7 @@
    -

    Return the dimension of the codomain (or range) space. Note that the matrix is of dimension $m \times n$.

    +

    Return the dimension of the codomain (or range) space. Note that the matrix is of dimension $m \times n$.

    Definition at line 381 of file cuda_sparse_matrix.h.

    @@ -593,7 +593,7 @@
    -

    Return the dimension of the domain space. Note that the matrix is of dimension $m \times n$.

    +

    Return the dimension of the domain space. Note that the matrix is of dimension $m \times n$.

    Definition at line 390 of file cuda_sparse_matrix.h.

    @@ -775,7 +775,7 @@ const LinearAlgebra::CUDAWrappers::Vector< Number > & src&#href_anchor"memdoc"> -

    Matrix-vector multiplication: let $dst = M \cdot src$ with $M$ being this matrix.

    +

    Matrix-vector multiplication: let $dst = M \cdot src$ with $M$ being this matrix.

    Definition at line 512 of file cuda_sparse_matrix.cc.

    @@ -798,7 +798,7 @@ const LinearAlgebra::CUDAWrappers::Vector< Number > & src&#href_anchor"memdoc"> -

    Matrix-vector multiplication: let $dst = M^T \cdot src$ with $M$ being this matrix. This function does the same as vmult() but takes this transposed matrix.

    +

    Matrix-vector multiplication: let $dst = M^T \cdot src$ with $M$ being this matrix. This function does the same as vmult() but takes this transposed matrix.

    Definition at line 530 of file cuda_sparse_matrix.cc.

    @@ -821,7 +821,7 @@ const LinearAlgebra::CUDAWrappers::Vector< Number > & src&#href_anchor"memdoc"> -

    Adding matrix-vector multiplication. Add $M \cdot src$ on $dst$ with $M$ being this matrix.

    +

    Adding matrix-vector multiplication. Add $M \cdot src$ on $dst$ with $M$ being this matrix.

    Definition at line 548 of file cuda_sparse_matrix.cc.

    @@ -844,7 +844,7 @@ const LinearAlgebra::CUDAWrappers::Vector< Number > & src&#href_anchor"memdoc"> -

    Adding matrix-vector multiplication. Add $M^T \cdot src$ to $dst$ with $M$ being this matrix. This function foes the same as vmult_add() but takes the transposed matrix.

    +

    Adding matrix-vector multiplication. Add $M^T \cdot src$ to $dst$ with $M$ being this matrix. This function foes the same as vmult_add() but takes the transposed matrix.

    Definition at line 566 of file cuda_sparse_matrix.cc.

    @@ -866,7 +866,7 @@
    -

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e., $\left(v,Mv\right)$. This is useful, e.g., in the finite context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    +

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e., $\left(v,Mv\right)$. This is useful, e.g., in the finite context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    Obviously, the matrix needs to be quadratic for this operation.

    Definition at line 584 of file cuda_sparse_matrix.cc.

    @@ -890,7 +890,7 @@ const LinearAlgebra::CUDAWrappers::Vector< Number > & v&#href_anchor"memdoc"> -

    Compute the matrix scalar product $\left(u,Mv\right)$.

    +

    Compute the matrix scalar product $\left(u,Mv\right)$.

    Definition at line 597 of file cuda_sparse_matrix.cc.

    @@ -918,8 +918,8 @@ const LinearAlgebra::CUDAWrappers::Vector< Number > & b&#href_anchor"memdoc"> -

    Compute the residual of an equation $M \cdot x=b$, where the residual is defined to be $r=b-M \cdot x$. Write the residual into $dst$. The $l_2$ norm of the residual vector is returned.

    -

    Source $x$ and destination $dst$ must not be the same vector.

    +

    Compute the residual of an equation $M \cdot x=b$, where the residual is defined to be $r=b-M \cdot x$. Write the residual into $dst$. The $l_2$ norm of the residual vector is returned.

    +

    Source $x$ and destination $dst$ must not be the same vector.

    Definition at line 611 of file cuda_sparse_matrix.cc.

    @@ -941,8 +941,8 @@
    -

    Return the $l_1$-norm of the matrix, that is $|M|_1=\max_{\mathrm{all\
-columns\ }j}\sum_{\mathrm{all\ rows\ }i} |M_{ij}|$, (max. sum of columns). This is the natural matrix norm that is compatible to the $l_1$-norm for vectors, i.e., $|Mv|_1\leq |M|_1 |v|_1$.

    +

    Return the $l_1$-norm of the matrix, that is $|M|_1=\max_{\mathrm{all\
+columns\ }j}\sum_{\mathrm{all\ rows\ }i} |M_{ij}|$, (max. sum of columns). This is the natural matrix norm that is compatible to the $l_1$-norm for vectors, i.e., $|Mv|_1\leq |M|_1 |v|_1$.

    Definition at line 626 of file cuda_sparse_matrix.cc.

    @@ -964,8 +964,8 @@
    -

    Return the $l_\infty$-norm of the matrix, that is $|M|_\infty=\max_{\mathrm{all\ rows\ }i}\sum_{\mathrm{all\ columns\ }j}
-|M_{ij}|$, (max. sum of rows). This is the natural norm that is compatible to the $l_\infty$-norm of vectors, i.e., $|Mv|_\infty \leq
+<p>Return the <picture><source srcset=$l_\infty$-norm of the matrix, that is $|M|_\infty=\max_{\mathrm{all\ rows\ }i}\sum_{\mathrm{all\ columns\ }j}
+|M_{ij}|$, (max. sum of rows). This is the natural norm that is compatible to the $l_\infty$-norm of vectors, i.e., $|Mv|_\infty \leq
 |M|_\infty |v|_\infty$.

    Definition at line 645 of file cuda_sparse_matrix.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classCellAccessor.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classCellAccessor.html 2024-01-30 03:04:28.908685261 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classCellAccessor.html 2024-01-30 03:04:28.908685261 +0000 @@ -4149,8 +4149,8 @@
    -

    This function computes a fast approximate transformation from the real to the unit cell by inversion of an affine approximation of the $d$-linear function from the reference $d$-dimensional cell.

    -

    The affine approximation of the unit to real cell mapping is found by a least squares fit of an affine function to the $2^d$ vertices of the present object. For any valid mesh cell whose geometry is not degenerate, this operation results in a unique affine mapping. Thus, this function will return a finite result for all given input points, even in cases where the actual transformation by an actual bi-/trilinear or higher order mapping might be singular. Besides only approximating the mapping from the vertex points, this function also ignores the attached manifold descriptions. The result is only exact in case the transformation from the unit to the real cell is indeed affine, such as in one dimension or for Cartesian and affine (parallelogram) meshes in 2d/3d.

    +

    This function computes a fast approximate transformation from the real to the unit cell by inversion of an affine approximation of the $d$-linear function from the reference $d$-dimensional cell.

    +

    The affine approximation of the unit to real cell mapping is found by a least squares fit of an affine function to the $2^d$ vertices of the present object. For any valid mesh cell whose geometry is not degenerate, this operation results in a unique affine mapping. Thus, this function will return a finite result for all given input points, even in cases where the actual transformation by an actual bi-/trilinear or higher order mapping might be singular. Besides only approximating the mapping from the vertex points, this function also ignores the attached manifold descriptions. The result is only exact in case the transformation from the unit to the real cell is indeed affine, such as in one dimension or for Cartesian and affine (parallelogram) meshes in 2d/3d.

    For exact transformations to the unit cell, use Mapping::transform_real_to_unit_cell().

    Note
    If dim<spacedim we first project p onto the plane.
    @@ -4213,17 +4213,17 @@
    -

    Return the barycenter (also called centroid) of the object. The barycenter for an object $K$ of dimension $d$ in $D$ space dimensions is given by the $D$-dimensional vector $\mathbf x_K$ defined by

    -\[
+<p>Return the barycenter (also called centroid) of the object. The barycenter for an object <picture><source srcset=$K$ of dimension $d$ in $D$ space dimensions is given by the $D$-dimensional vector $\mathbf x_K$ defined by

    +\[
   \mathbf x_K = \frac{1}{|K|} \int_K \mathbf x \; \textrm{d}x
-\] +\]" src="form_1482.png"/>

    where the measure of the object is given by

    -\[
+<picture><source srcset=\[
   |K| = \int_K \mathbf 1 \; \textrm{d}x.
-\] +\]" src="form_1483.png"/>

    -

    This function assumes that $K$ is mapped by a $d$-linear function from the reference $d$-dimensional cell. Then the integrals above can be pulled back to the reference cell and evaluated exactly (if through lengthy and, compared to the center() function, expensive computations).

    +

    This function assumes that $K$ is mapped by a $d$-linear function from the reference $d$-dimensional cell. Then the integrals above can be pulled back to the reference cell and evaluated exactly (if through lengthy and, compared to the center() function, expensive computations).

    Definition at line 1597 of file tria_accessor.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classChartManifold.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classChartManifold.html 2024-01-30 03:04:28.956685661 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classChartManifold.html 2024-01-30 03:04:28.956685661 +0000 @@ -206,37 +206,37 @@

    Detailed Description

    template<int dim, int spacedim = dim, int chartdim = dim>
    class ChartManifold< dim, spacedim, chartdim >

    This class describes mappings that can be expressed in terms of charts. Specifically, this class with its template arguments describes a chart of dimension chartdim, which is part of a Manifold<dim,spacedim> and is used in an object of type Triangulation<dim,spacedim>: It specializes a Manifold of dimension chartdim embedded in a manifold of dimension spacedim, for which you have explicit pull_back() and push_forward() transformations. Its use is explained in great detail in step-53.

    -

    This is a helper class which is useful when you have an explicit map from an Euclidean space of dimension chartdim to an Euclidean space of dimension spacedim which represents your manifold, i.e., when your manifold $\mathcal{M}$ can be represented by a map

    -\[ F: \mathcal{B} \subset
-R^{\text{chartdim}} \mapsto \mathcal{M} \subset R^{\text{spacedim}} \] +

    This is a helper class which is useful when you have an explicit map from an Euclidean space of dimension chartdim to an Euclidean space of dimension spacedim which represents your manifold, i.e., when your manifold $\mathcal{M}$ can be represented by a map

    +\[ F: \mathcal{B} \subset
+R^{\text{chartdim}} \mapsto \mathcal{M} \subset R^{\text{spacedim}} \]

    (the push_forward() function) and that admits the inverse transformation

    -\[ F^{-1}: \mathcal{M} \subset R^{\text{spacedim}} \mapsto \mathcal{B}
-\subset R^{\text{chartdim}} \] +\[ F^{-1}: \mathcal{M} \subset R^{\text{spacedim}} \mapsto \mathcal{B}
+\subset R^{\text{chartdim}} \]

    (the pull_back() function).

    The get_new_point() function of the ChartManifold class is implemented by calling the pull_back() method for all surrounding_points, computing their weighted average in the chartdim Euclidean space, and calling the push_forward() method with the resulting point, i.e.,

    -\[
-\mathbf x^{\text{new}} = F(\sum_i w_i F^{-1}(\mathbf x_i)).  \] +\[
+\mathbf x^{\text{new}} = F(\sum_i w_i F^{-1}(\mathbf x_i)).  \]

    Derived classes are required to implement the push_forward() and the pull_back() methods. All other functions (with the exception of the push_forward_gradient() function, see below) that are required by mappings will then be provided by this class.

    Providing function gradients

    -

    In order to compute vectors that are tangent to the manifold (for example, tangent to a surface embedded in higher dimensional space, or simply the three unit vectors of ${\mathbb R}^3$), one needs to also have access to the gradient of the push-forward function $F$. The gradient is the matrix $(\nabla F)_{ij}=\partial_j F_i$, where we take the derivative with regard to the chartdim reference coordinates on the flat Euclidean space in which $\mathcal B$ is located. In other words, at a point $\mathbf x$, $\nabla F(\mathbf x)$ is a matrix of size spacedim times chartdim.

    +

    In order to compute vectors that are tangent to the manifold (for example, tangent to a surface embedded in higher dimensional space, or simply the three unit vectors of ${\mathbb R}^3$), one needs to also have access to the gradient of the push-forward function $F$. The gradient is the matrix $(\nabla F)_{ij}=\partial_j F_i$, where we take the derivative with regard to the chartdim reference coordinates on the flat Euclidean space in which $\mathcal B$ is located. In other words, at a point $\mathbf x$, $\nabla F(\mathbf x)$ is a matrix of size spacedim times chartdim.

    Only the ChartManifold::get_tangent_vector() function uses the gradient of the push-forward, but only a subset of all finite element codes actually require the computation of tangent vectors. Consequently, while derived classes need to implement the abstract virtual push_forward() and pull_back() functions of this class, they do not need to implement the virtual push_forward_gradient() function. Rather, that function has a default implementation (and consequently is not abstract, therefore not forcing derived classes to overload it), but the default implementation clearly can not compute anything useful and therefore simply triggers and exception.

    A note on the template arguments

    The dimension arguments chartdim, dim and spacedim must satisfy the following relationships:

    dim <= spacedim
    chartdim <= spacedim

    However, there is no a priori relationship between dim and chartdim. For example, if you want to describe a mapping for an edge (a 1d object) in a 2d triangulation embedded in 3d space, you could do so by parameterizing it via a line

    -\[
+<picture><source srcset=\[
      F: [0,1] \rightarrow {\mathbb R}^3
-  \] + \]" src="form_1426.png"/>

    in which case chartdim is 1. On the other hand, there is no reason why one can't describe this as a mapping

    -\[
+<picture><source srcset=\[
      F: {\mathbb R}^3 \rightarrow {\mathbb R}^3
-  \] + \]" src="form_1427.png"/>

    -

    in such a way that the line $[0,1]\times \{0\}\times \{0\}$ happens to be mapped onto the edge in question. Here, chartdim is 3. This may seem cumbersome but satisfies the requirements of an invertible function $F$ just fine as long as it is possible to get from the edge to the pull-back space and then back again. Finally, given that we are dealing with a 2d triangulation in 3d, one will often have a mapping from, say, the 2d unit square or unit disk to the domain in 3d space, and the edge in question may simply be the mapped edge of the unit domain in 2d space. In this case, chartdim is 2.

    +

    in such a way that the line $[0,1]\times \{0\}\times \{0\}$ happens to be mapped onto the edge in question. Here, chartdim is 3. This may seem cumbersome but satisfies the requirements of an invertible function $F$ just fine as long as it is possible to get from the edge to the pull-back space and then back again. Finally, given that we are dealing with a 2d triangulation in 3d, one will often have a mapping from, say, the 2d unit square or unit disk to the domain in 3d space, and the edge in question may simply be the mapped edge of the unit domain in 2d space. In this case, chartdim is 2.

    Definition at line 902 of file manifold.h.

    Member Typedef Documentation

    @@ -566,7 +566,7 @@
    -

    Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the chartdim-dimensional to the spacedim-dimensional space. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

    +

    Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the chartdim-dimensional to the spacedim-dimensional space. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

    This function is used in the computations required by the get_tangent_vector() function. Since not all users of the Manifold class interface will require calling that function, the current function is implemented but will trigger an exception whenever called. This allows derived classes to avoid implementing the push_forward_gradient function if this functionality is not needed in the user program.

    Refer to the general documentation of this class for more information.

    @@ -600,7 +600,7 @@
    -

    Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

    +

    Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

    For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

    \begin{align*}
   \zeta(t) &= \xi_1 +  t (\xi_2-\xi_1)
@@ -616,7 +616,7 @@
                                     -F^{-1}(\mathbf x_1)\right]\right).
 \end{align*}

    -

    What the current function is supposed to return is $\mathbf s'(0)$. By the chain rule, this is equal to

    +

    What the current function is supposed to return is $\mathbf s'(0)$. By the chain rule, this is equal to

    \begin{align*}
   \mathbf s'(0) &=
     \frac{d}{dt}\left. F\left(F^{-1}(\mathbf x_1)
/usr/share/doc/packages/dealii/doxygen/deal.II/classChunkSparseMatrix.html differs (HTML document, ASCII text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/classChunkSparseMatrix.html	2024-01-30 03:04:29.012686127 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classChunkSparseMatrix.html	2024-01-30 03:04:29.012686127 +0000
@@ -772,7 +772,7 @@
         </tr>
       </table>
 </div><div class= -

    Return the dimension of the codomain (or range) space. Note that the matrix is of dimension $m \times n$.

    +

    Return the dimension of the codomain (or range) space. Note that the matrix is of dimension $m \times n$.

    @@ -792,7 +792,7 @@
    -

    Return the dimension of the domain space. Note that the matrix is of dimension $m \times n$.

    +

    Return the dimension of the domain space. Note that the matrix is of dimension $m \times n$.

    @@ -1036,7 +1036,7 @@
    -

    Symmetrize the matrix by forming the mean value between the existing matrix and its transpose, $A = \frac 12(A+A^T)$.

    +

    Symmetrize the matrix by forming the mean value between the existing matrix and its transpose, $A = \frac 12(A+A^T)$.

    This operation assumes that the underlying sparsity pattern represents a symmetric object. If this is not the case, then the result of this operation will not be a symmetric matrix, since it only explicitly symmetrizes by looping over the lower left triangular part for efficiency reasons; if there are entries in the upper right triangle, then these elements are missed in the symmetrization. Symmetrization of the sparsity pattern can be obtain by ChunkSparsityPattern::symmetrize().

    @@ -1367,7 +1367,7 @@
    -

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    +

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    Obviously, the matrix needs to be quadratic for this operation, and for the result to actually be a norm it also needs to be either real symmetric or complex hermitian.

    The underlying template types of both this matrix and the given vector should either both be real or complex-valued, but not mixed, for this function to make sense.

    @@ -1392,7 +1392,7 @@ const Vector< somenumber > & v&#href_anchor"memdoc"> -

    Compute the matrix scalar product $\left(u,Mv\right)$.

    +

    Compute the matrix scalar product $\left(u,Mv\right)$.

    @@ -1441,8 +1441,8 @@
    -

    Return the l1-norm of the matrix, that is $|M|_1=max_{all columns
-j}\sum_{all rows i} |M_ij|$, (max. sum of columns). This is the natural matrix norm that is compatible to the l1-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann : Numerische Mathematik)

    +

    Return the l1-norm of the matrix, that is $|M|_1=max_{all columns
+j}\sum_{all rows i} |M_ij|$, (max. sum of columns). This is the natural matrix norm that is compatible to the l1-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann : Numerische Mathematik)

    @@ -1462,8 +1462,8 @@
    -

    Return the linfty-norm of the matrix, that is $|M|_infty=max_{all rows
-i}\sum_{all columns j} |M_ij|$, (max. sum of rows). This is the natural matrix norm that is compatible to the linfty-norm of vectors, i.e. $|Mv|_infty \leq |M|_infty |v|_infty$. (cf. Haemmerlin-Hoffmann : Numerische Mathematik)

    +

    Return the linfty-norm of the matrix, that is $|M|_infty=max_{all rows
+i}\sum_{all columns j} |M_ij|$, (max. sum of rows). This is the natural matrix norm that is compatible to the linfty-norm of vectors, i.e. $|Mv|_infty \leq |M|_infty |v|_infty$. (cf. Haemmerlin-Hoffmann : Numerische Mathematik)

    /usr/share/doc/packages/dealii/doxygen/deal.II/classChunkSparsityPattern.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classChunkSparsityPattern.html 2024-01-30 03:04:29.056686494 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classChunkSparsityPattern.html 2024-01-30 03:04:29.056686494 +0000 @@ -1123,7 +1123,7 @@
    -

    Compute the bandwidth of the matrix represented by this structure. The bandwidth is the maximum of $|i-j|$ for which the index pair $(i,j)$ represents a nonzero entry of the matrix. Consequently, the maximum bandwidth a $n\times m$ matrix can have is $\max\{n-1,m-1\}$.

    +

    Compute the bandwidth of the matrix represented by this structure. The bandwidth is the maximum of $|i-j|$ for which the index pair $(i,j)$ represents a nonzero entry of the matrix. Consequently, the maximum bandwidth a $n\times m$ matrix can have is $\max\{n-1,m-1\}$.

    Definition at line 520 of file chunk_sparsity_pattern.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classCompositionManifold.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classCompositionManifold.html 2024-01-30 03:04:29.100686860 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classCompositionManifold.html 2024-01-30 03:04:29.100686860 +0000 @@ -594,7 +594,7 @@
    -

    Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

    +

    Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

    For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

    \begin{align*}
   \zeta(t) &= \xi_1 +  t (\xi_2-\xi_1)
@@ -610,7 +610,7 @@
                                     -F^{-1}(\mathbf x_1)\right]\right).
 \end{align*}

    -

    What the current function is supposed to return is $\mathbf s'(0)$. By the chain rule, this is equal to

    +

    What the current function is supposed to return is $\mathbf s'(0)$. By the chain rule, this is equal to

    \begin{align*}
   \mathbf s'(0) &=
     \frac{d}{dt}\left. F\left(F^{-1}(\mathbf x_1)
/usr/share/doc/packages/dealii/doxygen/deal.II/classCylindricalManifold.html differs (HTML document, ASCII text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/classCylindricalManifold.html	2024-01-30 03:04:29.144687227 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classCylindricalManifold.html	2024-01-30 03:04:29.144687227 +0000
@@ -413,7 +413,7 @@
   </tr>
 </table>
 </div><div class= -

    Compute the cylindrical coordinates $(r, \phi, \lambda)$ for the given space point where $r$ denotes the distance from the axis, $\phi$ the angle between the given point and the computed normal direction, and $\lambda$ the axial position.

    +

    Compute the cylindrical coordinates $(r, \phi, \lambda)$ for the given space point where $r$ denotes the distance from the axis, $\phi$ the angle between the given point and the computed normal direction, and $\lambda$ the axial position.

    Implements ChartManifold< dim, spacedim, chartdim >.

    @@ -445,7 +445,7 @@
    -

    Compute the Cartesian coordinates for a chart point given in cylindrical coordinates $(r, \phi, \lambda)$, where $r$ denotes the distance from the axis, $\phi$ the angle between the given point and the computed normal direction, and $\lambda$ the axial position.

    +

    Compute the Cartesian coordinates for a chart point given in cylindrical coordinates $(r, \phi, \lambda)$, where $r$ denotes the distance from the axis, $\phi$ the angle between the given point and the computed normal direction, and $\lambda$ the axial position.

    Definition at line 1144 of file manifold_lib.cc.

    @@ -475,7 +475,7 @@
    -

    Compute the derivatives of the mapping from cylindrical coordinates $(r, \phi, \lambda)$ to cartesian coordinates where $r$ denotes the distance from the axis, $\phi$ the angle between the given point and the computed normal direction, and $\lambda$ the axial position.

    +

    Compute the derivatives of the mapping from cylindrical coordinates $(r, \phi, \lambda)$ to cartesian coordinates where $r$ denotes the distance from the axis, $\phi$ the angle between the given point and the computed normal direction, and $\lambda$ the axial position.

    Definition at line 1164 of file manifold_lib.cc.

    @@ -644,7 +644,7 @@
    -

    Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the chartdim-dimensional to the spacedim-dimensional space. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

    +

    Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the chartdim-dimensional to the spacedim-dimensional space. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

    This function is used in the computations required by the get_tangent_vector() function. Since not all users of the Manifold class interface will require calling that function, the current function is implemented but will trigger an exception whenever called. This allows derived classes to avoid implementing the push_forward_gradient function if this functionality is not needed in the user program.

    Refer to the general documentation of this class for more information.

    @@ -678,7 +678,7 @@
    -

    Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

    +

    Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

    For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

    \begin{align*}
   \zeta(t) &= \xi_1 +  t (\xi_2-\xi_1)
@@ -694,7 +694,7 @@
                                     -F^{-1}(\mathbf x_1)\right]\right).
 \end{align*}

    -

    What the current function is supposed to return is $\mathbf s'(0)$. By the chain rule, this is equal to

    +

    What the current function is supposed to return is $\mathbf s'(0)$. By the chain rule, this is equal to

    \begin{align*}
   \mathbf s'(0) &=
     \frac{d}{dt}\left. F\left(F^{-1}(\mathbf x_1)
/usr/share/doc/packages/dealii/doxygen/deal.II/classDataPostprocessor.html differs (HTML document, ASCII text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/classDataPostprocessor.html	2024-01-30 03:04:29.176687493 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classDataPostprocessor.html	2024-01-30 03:04:29.176687493 +0000
@@ -183,7 +183,7 @@
 <p>As a consequence, <a class=DataOut is forced to take things apart into their real and imaginary parts, and both are output as separate quantities. This is the case for data that is written directly to a file by DataOut, but it is also the case for data that is first routed through DataPostprocessor objects (or objects of their derived classes): All these objects see is a collection of real values, even if the underlying solution vector was complex-valued.

    All of this has two implications:

    step-58 provides an example of how this class (or, rather, the derived DataPostprocessorScalar class) is used in a complex-valued situation.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classDataPostprocessorTensor.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classDataPostprocessorTensor.html 2024-01-30 03:04:29.204687726 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classDataPostprocessorTensor.html 2024-01-30 03:04:29.204687726 +0000 @@ -255,7 +255,7 @@

    These pictures show an ellipse representing the gradient tensor at, on average, every tenth mesh point. You may want to read through the documentation of the VisIt visualization program (see https://wci.llnl.gov/simulation/computer-codes/visit/) for an interpretation of how exactly tensors are visualizated.

    -

    In elasticity, one is often interested not in the gradient of the displacement, but in the "strain", i.e., the symmetrized version of the gradient $\varepsilon=\frac 12 (\nabla u + \nabla u^T)$. This is easily facilitated with the following minor modification:

    template <int dim>
    +

    In elasticity, one is often interested not in the gradient of the displacement, but in the "strain", i.e., the symmetrized version of the gradient $\varepsilon=\frac 12 (\nabla u + \nabla u^T)$. This is easily facilitated with the following minor modification:

    template <int dim>
    class StrainPostprocessor : public DataPostprocessorTensor<dim>
    {
    public:
    /usr/share/doc/packages/dealii/doxygen/deal.II/classDerivativeApproximation_1_1internal_1_1SecondDerivative.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classDerivativeApproximation_1_1internal_1_1SecondDerivative.html 2024-01-30 03:04:29.224687893 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classDerivativeApproximation_1_1internal_1_1SecondDerivative.html 2024-01-30 03:04:29.224687893 +0000 @@ -235,7 +235,7 @@
    -

    Return the norm of the derivative object. Here, for the (symmetric) tensor of second derivatives, we choose the absolute value of the largest eigenvalue, which is the matrix norm associated to the $l_2$ norm of vectors. It is also the largest value of the curvature of the solution.

    +

    Return the norm of the derivative object. Here, for the (symmetric) tensor of second derivatives, we choose the absolute value of the largest eigenvalue, which is the matrix norm associated to the $l_2$ norm of vectors. It is also the largest value of the curvature of the solution.

    Definition at line 492 of file derivative_approximation.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classDerivativeApproximation_1_1internal_1_1ThirdDerivative.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classDerivativeApproximation_1_1internal_1_1ThirdDerivative.html 2024-01-30 03:04:29.240688027 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classDerivativeApproximation_1_1internal_1_1ThirdDerivative.html 2024-01-30 03:04:29.244688060 +0000 @@ -230,7 +230,7 @@
    -

    Return the norm of the derivative object. Here, for the (symmetric) tensor of second derivatives, we choose the absolute value of the largest eigenvalue, which is the matrix norm associated to the $l_2$ norm of vectors. It is also the largest value of the curvature of the solution.

    +

    Return the norm of the derivative object. Here, for the (symmetric) tensor of second derivatives, we choose the absolute value of the largest eigenvalue, which is the matrix norm associated to the $l_2$ norm of vectors. It is also the largest value of the curvature of the solution.

    Definition at line 631 of file derivative_approximation.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classDerivativeForm.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classDerivativeForm.html 2024-01-30 03:04:29.272688293 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classDerivativeForm.html 2024-01-30 03:04:29.272688293 +0000 @@ -453,8 +453,8 @@
    -

    Compute the Frobenius norm of this form, i.e., the expression $\sqrt{\sum_{ij} |DF_{ij}|^2} =
-\sqrt{\sum_{ij} |\frac{\partial F_i}{\partial x_j}|^2}$.

    +

    Compute the Frobenius norm of this form, i.e., the expression $\sqrt{\sum_{ij} |DF_{ij}|^2} =
+\sqrt{\sum_{ij} |\frac{\partial F_i}{\partial x_j}|^2}$.

    @@ -474,7 +474,7 @@
    -

    Compute the volume element associated with the jacobian of the transformation $\mathbf F$. That is to say if $DF$ is square, it computes $\det(DF)$, in case DF is not square returns $\sqrt{\det(DF^T \,DF)}$.

    +

    Compute the volume element associated with the jacobian of the transformation $\mathbf F$. That is to say if $DF$ is square, it computes $\det(DF)$, in case DF is not square returns $\sqrt{\det(DF^T \,DF)}$.

    @@ -494,9 +494,9 @@
    -

    Assuming that the current object stores the Jacobian of a mapping $\mathbf F$, then the current function computes the covariant form of the derivative, namely $(\nabla \mathbf F) {\mathbf G}^{-1}$, where $\mathbf G = (\nabla \mathbf F)^{T}(\nabla \mathbf F)$. If $\nabla \mathbf
-F$ is a square matrix (i.e., $\mathbf F:
-{\mathbb R}^n \mapsto {\mathbb R}^n$), then this function simplifies to computing $\nabla {\mathbf F}^{-T}$.

    +

    Assuming that the current object stores the Jacobian of a mapping $\mathbf F$, then the current function computes the covariant form of the derivative, namely $(\nabla \mathbf F) {\mathbf G}^{-1}$, where $\mathbf G = (\nabla \mathbf F)^{T}(\nabla \mathbf F)$. If $\nabla \mathbf
+F$ is a square matrix (i.e., $\mathbf F:
+{\mathbb R}^n \mapsto {\mathbb R}^n$), then this function simplifies to computing $\nabla {\mathbf F}^{-T}$.

    @@ -552,7 +552,7 @@
    -

    Auxiliary function that computes $A T^{T}$ where A represents the current object.

    +

    Auxiliary function that computes $A T^{T}$ where A represents the current object.

    @@ -581,7 +581,7 @@
    -

    One of the uses of DerivativeForm is to apply it as a linear transformation. This function returns $\nabla \mathbf F(\mathbf x) \Delta \mathbf x$, which approximates the change in $\mathbf F(\mathbf x)$ when $\mathbf x$ is changed by the amount $\Delta \mathbf x$

    +

    One of the uses of DerivativeForm is to apply it as a linear transformation. This function returns $\nabla \mathbf F(\mathbf x) \Delta \mathbf x$, which approximates the change in $\mathbf F(\mathbf x)$ when $\mathbf x$ is changed by the amount $\Delta \mathbf x$

    \[
   \nabla \mathbf F(\mathbf x) \; \Delta \mathbf x
   \approx
@@ -715,7 +715,7 @@
   </tr>
 </table>
 </div><div class= -

    Similar to the previous apply_transformation(). In matrix notation, it computes $DF2 \, DF1^{T}$. Moreover, the result of this operation $\mathbf A$ can be interpreted as a metric tensor in ${\mathbb R}^\text{spacedim}$ which corresponds to the Euclidean metric tensor in ${\mathbb R}^\text{dim}$. For every pair of vectors $\mathbf u, \mathbf v \in {\mathbb R}^\text{spacedim}$, we have:

    +

    Similar to the previous apply_transformation(). In matrix notation, it computes $DF2 \, DF1^{T}$. Moreover, the result of this operation $\mathbf A$ can be interpreted as a metric tensor in ${\mathbb R}^\text{spacedim}$ which corresponds to the Euclidean metric tensor in ${\mathbb R}^\text{dim}$. For every pair of vectors $\mathbf u, \mathbf v \in {\mathbb R}^\text{spacedim}$, we have:

    \[
   \mathbf u \cdot \mathbf A \mathbf v =
   \text{DF2}^{-1}(\mathbf u) \cdot \text{DF1}^{-1}(\mathbf v)
/usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1CellLevelBase.html differs (HTML document, ASCII text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1CellLevelBase.html	2024-01-30 03:04:29.316688660 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1CellLevelBase.html	2024-01-30 03:04:29.316688660 +0000
@@ -514,7 +514,7 @@
   </tr>
 </table>
 </div><div class= -

    Compute the value of the residual vector field $\mathbf{r}(\mathbf{X})$.

    +

    Compute the value of the residual vector field $\mathbf{r}(\mathbf{X})$.

    Parameters
    @@ -552,9 +552,9 @@
    [out]residualA Vector object with the value for each component of the vector field evaluated at the point defined by the independent variable values.

    Compute the gradient (first derivative) of the residual vector field with respect to all independent variables, i.e.

    -\[
+<picture><source srcset=\[
   \frac{\partial\mathbf{r}(\mathbf{X})}{\partial\mathbf{X}}
-\] +\]" src="form_904.png"/>

    Parameters
    @@ -1295,7 +1295,7 @@
    -

    Set the actual value of the independent variable $X_{i}$.

    +

    Set the actual value of the independent variable $X_{i}$.

    Parameters
    @@ -1336,7 +1336,7 @@
    [in]indexThe index in the vector of independent variables.
    -

    Initialize an independent variable $X_{i}$ such that subsequent operations performed with it are tracked.

    +

    Initialize an independent variable $X_{i}$ such that subsequent operations performed with it are tracked.

    Note
    Care must be taken to mark each independent variable only once.
    The order in which the independent variables are marked defines the order of all future internal operations. They must be manipulated in the same order as that in which they are first marked. If not then, for example, ADOL-C won't throw an error, but rather it might complain nonsensically during later computations or produce garbage results.
    @@ -1411,7 +1411,7 @@
    -

    Initialize an independent variable $X_{i}$.

    +

    Initialize an independent variable $X_{i}$.

    Parameters
    @@ -1542,7 +1542,7 @@
    [out]outAn auto-differentiable number that is ready for use in standard computations. The operations that are performed with it are not recorded on the tape, and so should only be used when not in recording mode.
    -

    Register the definition of the index'th dependent variable $f(\mathbf{X})$.

    +

    Register the definition of the index'th dependent variable $f(\mathbf{X})$.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1EnergyFunctional.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1EnergyFunctional.html 2024-01-30 03:04:29.368689093 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1EnergyFunctional.html 2024-01-30 03:04:29.368689093 +0000 @@ -440,11 +440,11 @@

    The constructor for the class.

    Parameters
    [in]indexThe index of the entry in the global list of dependent variables that this function belongs to.
    - +
    [in]n_independent_variablesThe number of independent variables that will be used in the definition of the functions that it is desired to compute the sensitivities of. In the computation of $\Psi(\mathbf{X})$, this will be the number of inputs $\mathbf{X}$, i.e., the dimension of the domain space.
    [in]n_independent_variablesThe number of independent variables that will be used in the definition of the functions that it is desired to compute the sensitivities of. In the computation of $\Psi(\mathbf{X})$, this will be the number of inputs $\mathbf{X}$, i.e., the dimension of the domain space.
    -
    Note
    There is only one dependent variable associated with the total energy attributed to the local finite element. That is to say, this class assumes that the (local) right hand side and matrix contribution is computed from the first and second derivatives of a scalar function $\Psi(\mathbf{X})$.
    +
    Note
    There is only one dependent variable associated with the total energy attributed to the local finite element. That is to say, this class assumes that the (local) right hand side and matrix contribution is computed from the first and second derivatives of a scalar function $\Psi(\mathbf{X})$.

    Definition at line 793 of file ad_helpers.cc.

    @@ -495,7 +495,7 @@
    -

    Register the definition of the total cell energy $\Psi(\mathbf{X})$.

    +

    Register the definition of the total cell energy $\Psi(\mathbf{X})$.

    Parameters
    @@ -527,9 +527,9 @@
    [in]energyA recorded function that defines the total cell energy. This represents the single dependent variable from which both the residual and its linearization are to be computed.

    Evaluation of the total scalar energy functional for a chosen set of degree of freedom values, i.e.

    -\[
+<picture><source srcset=\[
   \Psi(\mathbf{X}) \vert_{\mathbf{X}}
-\] +\]" src="form_906.png"/>

    The values at the evaluation point $\mathbf{X}$ are obtained by calling CellLevelBase::set_dof_values().

    Returns
    The value of the energy functional at the evaluation point corresponding to a chosen set of local degree of freedom values.
    @@ -562,12 +562,12 @@
    -

    Evaluation of the residual for a chosen set of degree of freedom values. Underlying this is the computation of the gradient (first derivative) of the scalar function $\Psi$ with respect to all independent variables, i.e.

    -\[
+<p>Evaluation of the residual for a chosen set of degree of freedom values. Underlying this is the computation of the gradient (first derivative) of the scalar function <picture><source srcset=$\Psi$ with respect to all independent variables, i.e.

    +\[
   \mathbf{r}(\mathbf{X}) =
 \frac{\partial\Psi(\mathbf{X})}{\partial\mathbf{X}}
 \Big\vert_{\mathbf{X}}
-\] +\]" src="form_907.png"/>

    The values at the evaluation point $\mathbf{X}$ are obtained by calling CellLevelBase::set_dof_values().

    Parameters
    @@ -607,13 +607,13 @@
    -

    Compute the linearization of the residual vector around a chosen set of degree of freedom values. Underlying this is the computation of the Hessian (second derivative) of the scalar function $\Psi$ with respect to all independent variables, i.e.

    -\[
+<p>Compute the linearization of the residual vector around a chosen set of degree of freedom values. Underlying this is the computation of the Hessian (second derivative) of the scalar function <picture><source srcset=$\Psi$ with respect to all independent variables, i.e.

    +\[
   \frac{\partial\mathbf{r}(\mathbf{X})}{\partial\mathbf{X}}
     =
 \frac{\partial^{2}\Psi(\mathbf{X})}{\partial\mathbf{X}
 \otimes \partial\mathbf{X}} \Big\vert_{\mathbf{X}}
-\] +\]" src="form_908.png"/>

    The values at the evaluation point $\mathbf{X}$ are obtained by calling CellLevelBase::set_dof_values().

    Parameters
    @@ -1551,7 +1551,7 @@
    -

    Set the actual value of the independent variable $X_{i}$.

    +

    Set the actual value of the independent variable $X_{i}$.

    Parameters
    @@ -1592,7 +1592,7 @@
    [in]indexThe index in the vector of independent variables.
    -

    Initialize an independent variable $X_{i}$ such that subsequent operations performed with it are tracked.

    +

    Initialize an independent variable $X_{i}$ such that subsequent operations performed with it are tracked.

    Note
    Care must be taken to mark each independent variable only once.
    The order in which the independent variables are marked defines the order of all future internal operations. They must be manipulated in the same order as that in which they are first marked. If not then, for example, ADOL-C won't throw an error, but rather it might complain nonsensically during later computations or produce garbage results.
    @@ -1667,7 +1667,7 @@
    -

    Initialize an independent variable $X_{i}$.

    +

    Initialize an independent variable $X_{i}$.

    Parameters
    @@ -1798,7 +1798,7 @@
    [out]outAn auto-differentiable number that is ready for use in standard computations. The operations that are performed with it are not recorded on the tape, and so should only be used when not in recording mode.
    -

    Register the definition of the index'th dependent variable $f(\mathbf{X})$.

    +

    Register the definition of the index'th dependent variable $f(\mathbf{X})$.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1HelperBase.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1HelperBase.html 2024-01-30 03:04:29.408689426 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1HelperBase.html 2024-01-30 03:04:29.408689426 +0000 @@ -991,7 +991,7 @@
    [in]indexThe index of the entry in the global list of dependent variables that this function belongs to.
    -

    Set the actual value of the independent variable $X_{i}$.

    +

    Set the actual value of the independent variable $X_{i}$.

    Parameters
    @@ -1032,7 +1032,7 @@
    [in]indexThe index in the vector of independent variables.
    -

    Initialize an independent variable $X_{i}$ such that subsequent operations performed with it are tracked.

    +

    Initialize an independent variable $X_{i}$ such that subsequent operations performed with it are tracked.

    Note
    Care must be taken to mark each independent variable only once.
    The order in which the independent variables are marked defines the order of all future internal operations. They must be manipulated in the same order as that in which they are first marked. If not then, for example, ADOL-C won't throw an error, but rather it might complain nonsensically during later computations or produce garbage results.
    @@ -1107,7 +1107,7 @@
    -

    Initialize an independent variable $X_{i}$.

    +

    Initialize an independent variable $X_{i}$.

    Parameters
    @@ -1238,7 +1238,7 @@
    [out]outAn auto-differentiable number that is ready for use in standard computations. The operations that are performed with it are not recorded on the tape, and so should only be used when not in recording mode.
    -

    Register the definition of the index'th dependent variable $f(\mathbf{X})$.

    +

    Register the definition of the index'th dependent variable $f(\mathbf{X})$.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1PointLevelFunctionsBase.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1PointLevelFunctionsBase.html 2024-01-30 03:04:29.456689826 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1PointLevelFunctionsBase.html 2024-01-30 03:04:29.456689826 +0000 @@ -437,7 +437,7 @@
    [in]indexThe index of the entry in the global list of dependent variables that this function belongs to.
    const ExtractorType & extractor&#href_anchor"memdoc"> -

    Register the subset of independent variables $\mathbf{A} \subset \mathbf{X}$.

    +

    Register the subset of independent variables $\mathbf{A} \subset \mathbf{X}$.

    Parameters
    @@ -551,7 +551,7 @@
    [in]valueA field that defines a number of independent variables. When considering taped AD numbers with branching functions, to avoid potential issues with branch switching it may be a good idea to choose these values close or equal to those that will be later evaluated and differentiated around.
    const ExtractorType & extractor&#href_anchor"memdoc"> -

    Set the values for a subset of independent variables $\mathbf{A} \subset \mathbf{X}$.

    +

    Set the values for a subset of independent variables $\mathbf{A} \subset \mathbf{X}$.

    Parameters
    @@ -600,7 +600,7 @@
    [in]valueA field that defines the values of a number of independent variables.
    -

    Set the actual value of the independent variable $X_{i}$.

    +

    Set the actual value of the independent variable $X_{i}$.

    Parameters
    @@ -1353,7 +1353,7 @@
    [in]indexThe index in the vector of independent variables.
    -

    Set the actual value of the independent variable $X_{i}$.

    +

    Set the actual value of the independent variable $X_{i}$.

    Parameters
    @@ -1394,7 +1394,7 @@
    [in]indexThe index in the vector of independent variables.
    -

    Initialize an independent variable $X_{i}$ such that subsequent operations performed with it are tracked.

    +

    Initialize an independent variable $X_{i}$ such that subsequent operations performed with it are tracked.

    Note
    Care must be taken to mark each independent variable only once.
    The order in which the independent variables are marked defines the order of all future internal operations. They must be manipulated in the same order as that in which they are first marked. If not then, for example, ADOL-C won't throw an error, but rather it might complain nonsensically during later computations or produce garbage results.
    @@ -1469,7 +1469,7 @@
    -

    Initialize an independent variable $X_{i}$.

    +

    Initialize an independent variable $X_{i}$.

    Parameters
    @@ -1600,7 +1600,7 @@
    [out]outAn auto-differentiable number that is ready for use in standard computations. The operations that are performed with it are not recorded on the tape, and so should only be used when not in recording mode.
    -

    Register the definition of the index'th dependent variable $f(\mathbf{X})$.

    +

    Register the definition of the index'th dependent variable $f(\mathbf{X})$.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1ResidualLinearization.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1ResidualLinearization.html 2024-01-30 03:04:29.508690259 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1ResidualLinearization.html 2024-01-30 03:04:29.508690259 +0000 @@ -454,8 +454,8 @@

    The constructor for the class.

    Parameters
    [in]indexThe index of the entry in the global list of dependent variables that this function belongs to.
    - - + +
    [in]n_independent_variablesThe number of independent variables that will be used in the definition of the functions that it is desired to compute the sensitivities of. In the computation of $\mathbf{r}(\mathbf{X})$, this will be the number of inputs $\mathbf{X}$, i.e., the dimension of the domain space.
    [in]n_dependent_variablesThe number of scalar functions to be defined that will have a sensitivity to the given independent variables. In the computation of $\mathbf{r}(\mathbf{X})$, this will be the number of outputs $\mathbf{r}$, i.e., the dimension of the image space.
    [in]n_independent_variablesThe number of independent variables that will be used in the definition of the functions that it is desired to compute the sensitivities of. In the computation of $\mathbf{r}(\mathbf{X})$, this will be the number of inputs $\mathbf{X}$, i.e., the dimension of the domain space.
    [in]n_dependent_variablesThe number of scalar functions to be defined that will have a sensitivity to the given independent variables. In the computation of $\mathbf{r}(\mathbf{X})$, this will be the number of outputs $\mathbf{r}$, i.e., the dimension of the image space.
    @@ -509,7 +509,7 @@
    -

    Register the definition of the cell residual vector $\mathbf{r}(\mathbf{X})$.

    +

    Register the definition of the cell residual vector $\mathbf{r}(\mathbf{X})$.

    Parameters
    @@ -549,9 +549,9 @@
    [in]residualA vector of recorded functions that defines the residual. The components of this vector represents the dependent variables.

    Evaluation of the residual for a chosen set of degree of freedom values. This corresponds to the computation of the residual vector, i.e.

    -\[
+<picture><source srcset=\[
   \mathbf{r}(\mathbf{X}) \vert_{\mathbf{X}}
-\] +\]" src="form_910.png"/>

    The values at the evaluation point $\mathbf{X}$ are obtained by calling CellLevelBase::set_dof_values().

    Parameters
    @@ -591,10 +591,10 @@
    -

    Compute the linearization of the residual vector around a chosen set of degree of freedom values. Underlying this is the computation of the gradient (first derivative) of the residual vector $\mathbf{r}$ with respect to all independent variables, i.e.

    -\[
+<p>Compute the linearization of the residual vector around a chosen set of degree of freedom values. Underlying this is the computation of the gradient (first derivative) of the residual vector <picture><source srcset=$\mathbf{r}$ with respect to all independent variables, i.e.

    +\[
   \frac{\partial\mathbf{r}(\mathbf{X})}{\partial\mathbf{X}}
-\] +\]" src="form_904.png"/>

    The values at the evaluation point $\mathbf{X}$ are obtained by calling CellLevelBase::set_dof_values().

    Parameters
    @@ -1532,7 +1532,7 @@
    -

    Set the actual value of the independent variable $X_{i}$.

    +

    Set the actual value of the independent variable $X_{i}$.

    Parameters
    @@ -1573,7 +1573,7 @@
    [in]indexThe index in the vector of independent variables.
    -

    Initialize an independent variable $X_{i}$ such that subsequent operations performed with it are tracked.

    +

    Initialize an independent variable $X_{i}$ such that subsequent operations performed with it are tracked.

    Note
    Care must be taken to mark each independent variable only once.
    The order in which the independent variables are marked defines the order of all future internal operations. They must be manipulated in the same order as that in which they are first marked. If not then, for example, ADOL-C won't throw an error, but rather it might complain nonsensically during later computations or produce garbage results.
    @@ -1648,7 +1648,7 @@
    -

    Initialize an independent variable $X_{i}$.

    +

    Initialize an independent variable $X_{i}$.

    Parameters
    @@ -1779,7 +1779,7 @@
    [out]outAn auto-differentiable number that is ready for use in standard computations. The operations that are performed with it are not recorded on the tape, and so should only be used when not in recording mode.
    -

    Register the definition of the index'th dependent variable $f(\mathbf{X})$.

    +

    Register the definition of the index'th dependent variable $f(\mathbf{X})$.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1ScalarFunction.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1ScalarFunction.html 2024-01-30 03:04:29.564690726 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1ScalarFunction.html 2024-01-30 03:04:29.568690759 +0000 @@ -520,7 +520,7 @@
    [in]indexThe index of the entry in the global list of dependent variables that this function belongs to.
    -

    Register the definition of the scalar field $\Psi(\mathbf{X})$.

    +

    Register the definition of the scalar field $\Psi(\mathbf{X})$.

    Parameters
    @@ -551,7 +551,7 @@
    [in]funcThe recorded function that defines a dependent variable.
    -

    Compute the value of the scalar field $\Psi(\mathbf{X})$ using the tape as opposed to executing the source code.

    +

    Compute the value of the scalar field $\Psi(\mathbf{X})$ using the tape as opposed to executing the source code.

    Returns
    A scalar object with the value for the scalar field evaluated at the point defined by the independent variable values.

    Definition at line 1348 of file ad_helpers.cc.

    @@ -575,9 +575,9 @@

    Compute the gradient (first derivative) of the scalar field with respect to all independent variables, i.e.

    -\[
+<picture><source srcset=\[
   \frac{\partial\Psi(\mathbf{X})}{\partial\mathbf{X}}
-\] +\]" src="form_912.png"/>

    Parameters
    @@ -607,10 +607,10 @@

    Compute the Hessian (second derivative) of the scalar field with respect to all independent variables, i.e.

    -\[
+<picture><source srcset=\[
   \frac{\partial^{2}\Psi(\mathbf{X})}{\partial\mathbf{X} \otimes
 \partial\mathbf{X}}
-\] +\]" src="form_913.png"/>

    Parameters
    @@ -653,10 +653,10 @@
    -

    Extract the function gradient for a subset of independent variables $\mathbf{A} \subset \mathbf{X}$, i.e.

    -\[
+<p>Extract the function gradient for a subset of independent variables <picture><source srcset=$\mathbf{A} \subset \mathbf{X}$, i.e.

    +\[
   \frac{\partial\Psi(\mathbf{X})}{\partial\mathbf{A}}
-\] +\]" src="form_914.png"/>

    Parameters
    @@ -704,13 +704,13 @@
    -

    Extract the function Hessian for a subset of independent variables $\mathbf{A},\mathbf{B} \subset \mathbf{X}$, i.e.

    -\[
+<p>Extract the function Hessian for a subset of independent variables <picture><source srcset=$\mathbf{A},\mathbf{B} \subset \mathbf{X}$, i.e.

    +\[
   \frac{}{\partial\mathbf{B}} \left[
 \frac{\partial\Psi(\mathbf{X})}{\partial\mathbf{A}} \right] =
 \frac{\partial^{2}\Psi(\mathbf{X})}{\partial\mathbf{B} \otimes
 \partial\mathbf{A}}
-\] +\]" src="form_916.png"/>

    Parameters
    @@ -753,11 +753,11 @@
    -

    Extract the function Hessian for a subset of independent variables $\mathbf{A},\mathbf{B} \subset \mathbf{X}$, i.e.

    -\[
+<p>Extract the function Hessian for a subset of independent variables <picture><source srcset=$\mathbf{A},\mathbf{B} \subset \mathbf{X}$, i.e.

    +\[
   \frac{}{\partial\mathbf{B}} \left[
 \frac{\partial\Psi(\mathbf{X})}{\partial\mathbf{A}} \right]
-\] +\]" src="form_917.png"/>

    This function is a specialization of the above for rank-0 tensors (scalars). This corresponds to extracting a single entry of the Hessian matrix because both extractors imply selection of just a single row or column of the matrix.

    @@ -794,11 +794,11 @@
    -

    Extract the function Hessian for a subset of independent variables $\mathbf{A},\mathbf{B} \subset \mathbf{X}$, i.e.

    -\[
+<p>Extract the function Hessian for a subset of independent variables <picture><source srcset=$\mathbf{A},\mathbf{B} \subset \mathbf{X}$, i.e.

    +\[
   \frac{}{\partial\mathbf{B}} \left[
 \frac{\partial\Psi(\mathbf{X})}{\partial\mathbf{A}} \right]
-\] +\]" src="form_917.png"/>

    This function is a specialization of the above for rank-4 symmetric tensors.

    @@ -919,7 +919,7 @@
    -

    Register the subset of independent variables $\mathbf{A} \subset \mathbf{X}$.

    +

    Register the subset of independent variables $\mathbf{A} \subset \mathbf{X}$.

    Parameters
    @@ -1064,7 +1064,7 @@
    [in]valueA field that defines a number of independent variables. When considering taped AD numbers with branching functions, to avoid potential issues with branch switching it may be a good idea to choose these values close or equal to those that will be later evaluated and differentiated around.
    -

    Set the values for a subset of independent variables $\mathbf{A} \subset \mathbf{X}$.

    +

    Set the values for a subset of independent variables $\mathbf{A} \subset \mathbf{X}$.

    Parameters
    @@ -1113,7 +1113,7 @@
    [in]valueA field that defines the values of a number of independent variables.
    -

    Set the actual value of the independent variable $X_{i}$.

    +

    Set the actual value of the independent variable $X_{i}$.

    Parameters
    @@ -1155,7 +1155,7 @@
    [in]indexThe index in the vector of independent variables.
    -

    Set the actual value of the independent variable $X_{i}$.

    +

    Set the actual value of the independent variable $X_{i}$.

    Parameters
    @@ -1907,7 +1907,7 @@
    [in]indexThe index in the vector of independent variables.
    -

    Initialize an independent variable $X_{i}$ such that subsequent operations performed with it are tracked.

    +

    Initialize an independent variable $X_{i}$ such that subsequent operations performed with it are tracked.

    Note
    Care must be taken to mark each independent variable only once.
    The order in which the independent variables are marked defines the order of all future internal operations. They must be manipulated in the same order as that in which they are first marked. If not then, for example, ADOL-C won't throw an error, but rather it might complain nonsensically during later computations or produce garbage results.
    @@ -1982,7 +1982,7 @@
    -

    Initialize an independent variable $X_{i}$.

    +

    Initialize an independent variable $X_{i}$.

    Parameters
    @@ -2113,7 +2113,7 @@
    [out]outAn auto-differentiable number that is ready for use in standard computations. The operations that are performed with it are not recorded on the tape, and so should only be used when not in recording mode.
    -

    Register the definition of the index'th dependent variable $f(\mathbf{X})$.

    +

    Register the definition of the index'th dependent variable $f(\mathbf{X})$.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1VectorFunction.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1VectorFunction.html 2024-01-30 03:04:29.624691226 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1VectorFunction.html 2024-01-30 03:04:29.628691259 +0000 @@ -524,7 +524,7 @@
    [in]indexThe index of the entry in the global list of dependent variables that this function belongs to.
    -

    Register the definition of the vector field $\boldsymbol{\Psi}(\mathbf{X})$.

    +

    Register the definition of the vector field $\boldsymbol{\Psi}(\mathbf{X})$.

    Parameters
    @@ -558,7 +558,7 @@
    [in]funcsA vector of recorded functions that defines the dependent variables.
    const ExtractorType & extractor&#href_anchor"memdoc"> -

    Register the definition of the vector field $\hat{\mathbf{g}}(\mathbf{X}) \subset \boldsymbol{\Psi}(\mathbf{X})$ that may represent a subset of the dependent variables.

    +

    Register the definition of the vector field $\hat{\mathbf{g}}(\mathbf{X}) \subset \boldsymbol{\Psi}(\mathbf{X})$ that may represent a subset of the dependent variables.

    Parameters
    @@ -588,7 +588,7 @@
    [in]funcsThe recorded functions that define a set of dependent variables.
    -

    Compute the value of the vector field $\boldsymbol{\Psi}(\mathbf{X})$.

    +

    Compute the value of the vector field $\boldsymbol{\Psi}(\mathbf{X})$.

    Parameters
    @@ -617,10 +617,10 @@
    [out]valuesA Vector object with the value for each component of the vector field evaluated at the point defined by the independent variable values. The output values vector has a length corresponding to n_dependent_variables.

    Compute the Jacobian (first derivative) of the vector field with respect to all independent variables, i.e.

    -\[
+<picture><source srcset=\[
   \mathbf{J}(\boldsymbol{\Psi})
      = \frac{\partial\boldsymbol{\Psi}(\mathbf{X})}{\partial\mathbf{X}}
-\] +\]" src="form_920.png"/>

    Parameters
    @@ -663,7 +663,7 @@
    -

    Extract the set of functions' values for a subset of dependent variables $\mathbf{g} \subset \boldsymbol{\Psi}(\mathbf{X})$.

    +

    Extract the set of functions' values for a subset of dependent variables $\mathbf{g} \subset \boldsymbol{\Psi}(\mathbf{X})$.

    Parameters
    @@ -709,13 +709,13 @@
    [in]valuesA Vector object with the value for each component of the vector field evaluated at the point defined by the independent variable values.
    -

    Extract the Jacobian of the subset of dependent functions $\mathbf{g} \subset \boldsymbol{\Psi}(\mathbf{X})$ for a subset of independent variables $\mathbf{A} \subset \mathbf{X}$, i.e.

    -\[
+<p>Extract the Jacobian of the subset of dependent functions <picture><source srcset=$\mathbf{g} \subset \boldsymbol{\Psi}(\mathbf{X})$ for a subset of independent variables $\mathbf{A} \subset \mathbf{X}$, i.e.

    +\[
   \mathbf{J}(\mathbf{g})
      = \frac{\partial\mathbf{g}(\mathbf{X})}{\partial\mathbf{A}}
-\] +\]" src="form_922.png"/>

    -

    The first index of the Jacobian matrix $\mathbf{J}(\mathbf{g})$ relates to the dependent variables, while the second index relates to the independent variables.

    +

    The first index of the Jacobian matrix $\mathbf{J}(\mathbf{g})$ relates to the dependent variables, while the second index relates to the independent variables.

    Parameters
    @@ -757,11 +757,11 @@
    [in]jacobianThe Jacobian of the vector function with respect to all independent variables, i.e., that returned by compute_jacobian().
    -

    Extract the Jacobian of the subset of dependent functions $\mathbf{g} \subset \boldsymbol{\Psi}(\mathbf{X})$ for a subset of independent variables $\mathbf{A} \subset \mathbf{X}$, i.e.

    -\[
+<p>Extract the Jacobian of the subset of dependent functions <picture><source srcset=$\mathbf{g} \subset \boldsymbol{\Psi}(\mathbf{X})$ for a subset of independent variables $\mathbf{A} \subset \mathbf{X}$, i.e.

    +\[
   \mathbf{J}(\mathbf{g})
      = \frac{\partial\mathbf{g}(\mathbf{X})}{\partial\mathbf{A}}
-\] +\]" src="form_922.png"/>

    This function is a specialization of the above for rank-0 tensors (scalars). This corresponds to extracting a single entry of the Jacobian matrix because both extractors imply selection of just a single row or column of the matrix.

    @@ -798,11 +798,11 @@
    -

    Extract the Jacobian of the subset of dependent functions $\mathbf{g} \subset \boldsymbol{\Psi}(\mathbf{X})$ for a subset of independent variables $\mathbf{A} \subset \mathbf{X}$, i.e.

    -\[
+<p>Extract the Jacobian of the subset of dependent functions <picture><source srcset=$\mathbf{g} \subset \boldsymbol{\Psi}(\mathbf{X})$ for a subset of independent variables $\mathbf{A} \subset \mathbf{X}$, i.e.

    +\[
   \mathbf{J}(\mathbf{g})
      = \frac{\partial\mathbf{g}(\mathbf{X})}{\partial\mathbf{A}}
-\] +\]" src="form_922.png"/>

    This function is a specialization of the above for rank-4 symmetric tensors.

    @@ -923,7 +923,7 @@
    -

    Register the subset of independent variables $\mathbf{A} \subset \mathbf{X}$.

    +

    Register the subset of independent variables $\mathbf{A} \subset \mathbf{X}$.

    Parameters
    @@ -1068,7 +1068,7 @@
    [in]valueA field that defines a number of independent variables. When considering taped AD numbers with branching functions, to avoid potential issues with branch switching it may be a good idea to choose these values close or equal to those that will be later evaluated and differentiated around.
    -

    Set the values for a subset of independent variables $\mathbf{A} \subset \mathbf{X}$.

    +

    Set the values for a subset of independent variables $\mathbf{A} \subset \mathbf{X}$.

    Parameters
    @@ -1117,7 +1117,7 @@
    [in]valueA field that defines the values of a number of independent variables.
    -

    Set the actual value of the independent variable $X_{i}$.

    +

    Set the actual value of the independent variable $X_{i}$.

    Parameters
    @@ -1159,7 +1159,7 @@
    [in]indexThe index in the vector of independent variables.
    -

    Set the actual value of the independent variable $X_{i}$.

    +

    Set the actual value of the independent variable $X_{i}$.

    Parameters
    @@ -1911,7 +1911,7 @@
    [in]indexThe index in the vector of independent variables.
    -

    Initialize an independent variable $X_{i}$ such that subsequent operations performed with it are tracked.

    +

    Initialize an independent variable $X_{i}$ such that subsequent operations performed with it are tracked.

    Note
    Care must be taken to mark each independent variable only once.
    The order in which the independent variables are marked defines the order of all future internal operations. They must be manipulated in the same order as that in which they are first marked. If not then, for example, ADOL-C won't throw an error, but rather it might complain nonsensically during later computations or produce garbage results.
    @@ -1986,7 +1986,7 @@
    -

    Initialize an independent variable $X_{i}$.

    +

    Initialize an independent variable $X_{i}$.

    Parameters
    @@ -2117,7 +2117,7 @@
    [out]outAn auto-differentiable number that is ready for use in standard computations. The operations that are performed with it are not recorded on the tape, and so should only be used when not in recording mode.
    -

    Register the definition of the index'th dependent variable $f(\mathbf{X})$.

    +

    Register the definition of the index'th dependent variable $f(\mathbf{X})$.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classDiscreteTime.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classDiscreteTime.html 2024-01-30 03:04:29.652691459 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classDiscreteTime.html 2024-01-30 03:04:29.656691492 +0000 @@ -148,11 +148,11 @@
    [in]indexThe index of the entry in the global list of dependent variables that this function belongs to.
     

    Detailed Description

    -

    This class provides a means to keep track of the simulation time in a time-dependent simulation. It manages stepping forward from a start time $T_{\text{start}}$ to an end time $T_{\text{end}}$. It also allows adjusting the time step size during the simulation. This class provides the necessary interface to be incorporated in any time-dependent simulation. The usage of this class is demonstrated in step-19 and step-21.

    +

    This class provides a means to keep track of the simulation time in a time-dependent simulation. It manages stepping forward from a start time $T_{\text{start}}$ to an end time $T_{\text{end}}$. It also allows adjusting the time step size during the simulation. This class provides the necessary interface to be incorporated in any time-dependent simulation. The usage of this class is demonstrated in step-19 and step-21.

    This class provides a number of invariants that are guaranteed to be true at all times.

      -
    • The current simulation time is within the closed interval between the start time and the end time ( $T_{\text{start}} \le t \le T_{\text{end}}$).
    • -
    • Whenever time is incremented, the step size is positive ( $dt > 0$). In other words, time advances in strictly ascending order ( $m < n \Leftrightarrow t_m < t_n$).
    • +
    • The current simulation time is within the closed interval between the start time and the end time ( $T_{\text{start}} \le t \le T_{\text{end}}$).
    • +
    • Whenever time is incremented, the step size is positive ( $dt > 0$). In other words, time advances in strictly ascending order ( $m < n \Leftrightarrow t_m < t_n$).

    The model this class follows is that one sets a desired time step length either through the constructor or using set_desired_next_step_size() function. This step size will then be used in all following calls to the advance_time() function, but may be adjusted slightly towards the end of the simulation to ensure that the simulation time hits the end time exactly. The adjustment is useful for the following reasons:

    Let's say that you loop over all of the time steps by using a for loop

    for (DiscreteTime time(0., 1., 0.3);
    @@ -170,34 +170,34 @@
    time.advance_time();
    }
    -

    In the above example the time starts at $T_{\text{start}} = 0$ until $T_{\text{end}}=1$. Assuming the time step $dt = 0.3$ is not modified inside the loop, the time is advanced from $t = 0$ to $t = 0.3$, $t = 0.6$, $t =
-0.9$ and finally it reaches the end time at $t = 1.0$. Here, the final step size needs to be reduced from its desired value of 0.3 to $dt = 0.1$ in order to ensure that we finish the simulation exactly at the specified end time. In fact, you should assume that not only the last time step length may be adjusted, but also previously ones – for example, this class may take the liberty to spread the decrease in time step size out over several time steps and increment time from $t=0$, to $0.3$, $0.6$, $0.8$, and finally $t=T_{\text{end}}=1$ to avoid too large a change in time step size from one step to another.

    +

    In the above example the time starts at $T_{\text{start}} = 0$ until $T_{\text{end}}=1$. Assuming the time step $dt = 0.3$ is not modified inside the loop, the time is advanced from $t = 0$ to $t = 0.3$, $t = 0.6$, $t =
+0.9$ and finally it reaches the end time at $t = 1.0$. Here, the final step size needs to be reduced from its desired value of 0.3 to $dt = 0.1$ in order to ensure that we finish the simulation exactly at the specified end time. In fact, you should assume that not only the last time step length may be adjusted, but also previously ones – for example, this class may take the liberty to spread the decrease in time step size out over several time steps and increment time from $t=0$, to $0.3$, $0.6$, $0.8$, and finally $t=T_{\text{end}}=1$ to avoid too large a change in time step size from one step to another.

    The other situation in which the time step needs to be adjusted (this time to slightly larger values) is if a time increment falls just short of the final time. Imagine, for example, a similar situation as above, but with different end time:

    for (DiscreteTime time(0., 1.21, 0.3);
    time.is_at_end() == false;
    time.advance_time())
    {
    // Insert simulation code here
    }
    -

    Here, the time step from $t=0.9$ to $t=1.2$ falls just short of the final time $T_{\text{end}}=1.21$. Instead of following up with a very small step of length $dt=0.01$, the class stretches the last time step (or last time steps) slightly to reach the desired end time.

    +

    Here, the time step from $t=0.9$ to $t=1.2$ falls just short of the final time $T_{\text{end}}=1.21$. Instead of following up with a very small step of length $dt=0.01$, the class stretches the last time step (or last time steps) slightly to reach the desired end time.

    The examples above make clear that the time step size given to this class is only a desired step size. You can query the actual time step size using the get_next_step_size() function.

    Details of time-stepping

    Since time is marched forward in a discrete manner in our simulations, we need to discuss how we increment time. During time stepping we enter two separate alternating regimes in every step.

      -
    • The snapshot stage (the current stage, the consistent stage): In this part of the algorithm, we are at $t = t_n$ and all quantities of the simulation (displacements, strains, temperatures, etc.) are up-to-date for $t = t_n$. In this stage, current time refers to $t_n$, next time refers to $t_{n+1}$, previous time refers to $t_{n-1}$. The other useful notation quantities are the next time step size $t_{n+1} - t_n$ and previous time step size $t_n - t_{n-1}$. In this stage, it is a perfect occasion to generate text output using print commands within the user's code. Additionally, post-processed outputs can be prepared here, which can then later be viewed by visualization programs such as Tecplot, Paraview, and VisIt. Additionally, during the snapshot stage, the code can assess the quality of the previous step and decide whether it wants to increase or decrease the time step size. The step size for the next time step can be modified here, by calling set_desired_next_step_size().
    • -
    • The update stage (the transition stage, the inconsistent stage): In this section of the program, the internal state of the simulation is getting updated from $t_n$ to $t_{n+1}$. All of the variables need to be updated one by one, the step number is incremented, the time is incremented by $dt = t_{n+1} - t_n$, and time-integration algorithms are used to update the other simulation quantities. In the middle of this stage, some variables have been updated to $t_{n+1}$ but other variables still represent their value at $t_n$. Thus, we call this the inconsistent stage, requiring that no post-processing output related to the state variables take place within it. The state variables, namely those related to time, the solution field and any internal variables, are not synchronized and then get updated one by one. In general, the order of updating variables is arbitrary, but some care should be taken if there are interdependencies between them. For example, if some variable such as $x$ depends on the calculation of another variable such as $y$, then $y$ must be updated before $x$ can be updated.

      +
    • The snapshot stage (the current stage, the consistent stage): In this part of the algorithm, we are at $t = t_n$ and all quantities of the simulation (displacements, strains, temperatures, etc.) are up-to-date for $t = t_n$. In this stage, current time refers to $t_n$, next time refers to $t_{n+1}$, previous time refers to $t_{n-1}$. The other useful notation quantities are the next time step size $t_{n+1} - t_n$ and previous time step size $t_n - t_{n-1}$. In this stage, it is a perfect occasion to generate text output using print commands within the user's code. Additionally, post-processed outputs can be prepared here, which can then later be viewed by visualization programs such as Tecplot, Paraview, and VisIt. Additionally, during the snapshot stage, the code can assess the quality of the previous step and decide whether it wants to increase or decrease the time step size. The step size for the next time step can be modified here, by calling set_desired_next_step_size().
    • +
    • The update stage (the transition stage, the inconsistent stage): In this section of the program, the internal state of the simulation is getting updated from $t_n$ to $t_{n+1}$. All of the variables need to be updated one by one, the step number is incremented, the time is incremented by $dt = t_{n+1} - t_n$, and time-integration algorithms are used to update the other simulation quantities. In the middle of this stage, some variables have been updated to $t_{n+1}$ but other variables still represent their value at $t_n$. Thus, we call this the inconsistent stage, requiring that no post-processing output related to the state variables take place within it. The state variables, namely those related to time, the solution field and any internal variables, are not synchronized and then get updated one by one. In general, the order of updating variables is arbitrary, but some care should be taken if there are interdependencies between them. For example, if some variable such as $x$ depends on the calculation of another variable such as $y$, then $y$ must be updated before $x$ can be updated.

      The question arises whether time should be incremented before updating state quantities. Multiple possibilities exist, depending on program and formulation requirements, and possibly the programmer's preferences:

        -
      • Time is incremented before the rest of the updates. In this case, even though time is incremented to $t_{n+1}$, not all variables are updated yet. During this update phase, $dt$ equals the previous time step size. Previous means that it is referring to the $dt$ of the advance_time() command that was performed previously. In the following example code, we are assuming that a and b are two state variables that need to be updated in this time step.
        time.advance_time();
        +
      • Time is incremented before the rest of the updates. In this case, even though time is incremented to $t_{n+1}$, not all variables are updated yet. During this update phase, $dt$ equals the previous time step size. Previous means that it is referring to the $dt$ of the advance_time() command that was performed previously. In the following example code, we are assuming that a and b are two state variables that need to be updated in this time step.
        time.advance_time();
        new_a = update_a(a, b, time.get_previous_step_size());
        b = update_b(a, b, time.get_previous_step_size());
        a = new_a;
        Here, the code starts in a consistent state, but once advance_time() is called, the time variable, a, and b are no longer consistent with each other until after the last statement. At that point, the variables are all consistent again.
      • -
      • Time is incremented from $t_n$ to $t_{n+1}$ after all variables have already been updated for $t_{n+1}$. During the update stage, $dt$ is denoted as the next time step size. Next means that $dt$ of the step corresponds to the advance_time() command that will happen subsequently.
        new_a = update_a(a, b, time.get_next_step_size());
        +
      • Time is incremented from $t_n$ to $t_{n+1}$ after all variables have already been updated for $t_{n+1}$. During the update stage, $dt$ is denoted as the next time step size. Next means that $dt$ of the step corresponds to the advance_time() command that will happen subsequently.
        new_a = update_a(a, b, time.get_next_step_size());
        b = update_b(a, b, time.get_next_step_size());
        a = new_a;
        time.advance_time();
      • -
      • Time is incremented in the middle of the other updates: In this case $dt$ would correspond to next or previous depending of whether it is used before or after the call to advance_time().
        new_a = update_a(a, b, time.get_next_step_size());
        +
      • Time is incremented in the middle of the other updates: In this case $dt$ would correspond to next or previous depending of whether it is used before or after the call to advance_time().
        new_a = update_a(a, b, time.get_next_step_size());
        time.advance_time();
        b = update_b(a, b, time.get_previous_step_size());
        a = new_a;
        @@ -205,7 +205,7 @@
    -

    One thing to note is that, during the update phase, $dt$ is referred to either next or previous time step size, depending on whether advance_time() has been called yet. The notion of current time step size is ill-defined. In fact, in the update stage the definition of every variable depends on whether it has been updated yet or not, hence the name the inconsistent stage.

    +

    One thing to note is that, during the update phase, $dt$ is referred to either next or previous time step size, depending on whether advance_time() has been called yet. The notion of current time step size is ill-defined. In fact, in the update stage the definition of every variable depends on whether it has been updated yet or not, hence the name the inconsistent stage.

    The following code snippet shows the code sections for the snapshot stage and the update stage in the context of a complete time-dependent simulation. This code follows the coding conventions incorporated in the tutorial examples. Note that even though this example is written in the format of a for loop, it can equivalently be written as a while or do while loop (as shown in step-21).

    // pre-processing/setup stage {
    make_grid();
    setup_system();
    @@ -601,7 +601,7 @@

    Set the actual value of the next time step size. By calling this method, we are indicating the next time advance_time() is called, time_step_size is to be used to advance the simulation time.

    -
    Note
    The difference between set_next_step_size() and set_desired_next_step_size() is that the former uses the provided $dt$ exactly without any adjustment, but produces an error (in debug mode) if $dt$ is not in the acceptable range. Generally, set_desired_next_step_size() is the preferred method because it can adjust the $dt$ intelligently, based on $T_{\text{end}}$.
    +
    Note
    The difference between set_next_step_size() and set_desired_next_step_size() is that the former uses the provided $dt$ exactly without any adjustment, but produces an error (in debug mode) if $dt$ is not in the acceptable range. Generally, set_desired_next_step_size() is the preferred method because it can adjust the $dt$ intelligently, based on $T_{\text{end}}$.
    Precondition
    $0 < dt \le T_{\text{end}} - t$.

    Definition at line 70 of file discrete_time.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classDoFHandler.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classDoFHandler.html 2024-01-30 03:04:29.724692059 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classDoFHandler.html 2024-01-30 03:04:29.724692059 +0000 @@ -417,7 +417,7 @@

    Detailed Description

    template<int dim, int spacedim = dim>
    -class DoFHandler< dim, spacedim >

    Given a triangulation and a description of a finite element, this class enumerates degrees of freedom on all vertices, edges, faces, and cells of the triangulation. As a result, it also provides a basis for a discrete space $V_h$ whose elements are finite element functions defined on each cell by a FiniteElement object. This class satisfies the MeshType concept requirements.

    +class DoFHandler< dim, spacedim >

    Given a triangulation and a description of a finite element, this class enumerates degrees of freedom on all vertices, edges, faces, and cells of the triangulation. As a result, it also provides a basis for a discrete space $V_h$ whose elements are finite element functions defined on each cell by a FiniteElement object. This class satisfies the MeshType concept requirements.

    It is first used in the step-2 tutorial program.

    For each 0d, 1d, 2d, and 3d subobject, this class stores a list of the indices of degrees of freedom defined on this DoFHandler. These indices refer to the unconstrained degrees of freedom, i.e. constrained degrees of freedom are numbered in the same way as unconstrained ones, and are only later eliminated. This leads to the fact that indices in global vectors and matrices also refer to all degrees of freedom and some kind of condensation is needed to restrict the systems of equations to the unconstrained degrees of freedom only. The actual layout of storage of the indices is described in the internal::DoFHandlerImplementation::DoFLevel class documentation.

    The class offers iterators to traverse all cells, in much the same way as the Triangulation class does. Using the begin() and end() functions (and companions, like begin_active()), one can obtain iterators to walk over cells, and query the degree of freedom structures as well as the triangulation data. These iterators are built on top of those of the Triangulation class, but offer the additional information on degrees of freedom functionality compared to pure triangulation iterators. The order in which dof iterators are presented by the ++ and -- operators is the same as that for the corresponding iterators traversing the triangulation on which this DoFHandler is constructed.

    @@ -434,7 +434,7 @@

    Like many other classes in deal.II, the DoFHandler class can stream its contents to an archive using BOOST's serialization facilities. The data so stored can later be retrieved again from the archive to restore the contents of this object. This facility is frequently used to save the state of a program to disk for possible later resurrection, often in the context of checkpoint/restart strategies for long running computations or on computers that aren't very reliable (e.g. on very large clusters where individual nodes occasionally fail and then bring down an entire MPI job).

    The model for doing so is similar for the DoFHandler class as it is for the Triangulation class (see the section in the general documentation of that class). In particular, the load() function does not exactly restore the same state as was stored previously using the save() function. Rather, the function assumes that you load data into a DoFHandler object that is already associated with a triangulation that has a content that matches the one that was used when the data was saved. Likewise, the load() function assumes that the current object is already associated with a finite element object that matches the one that was associated with it when data was saved; the latter can be achieved by calling DoFHandler::distribute_dofs() using the same kind of finite element before re-loading data from the serialization archive.

    hp-adaptive finite element methods

    -

    Instead of only using one particular FiniteElement on all cells, this class also allows for an enumeration of degrees of freedom on different finite elements on every cells. To this end, one assigns an active_fe_index to every cell that indicates which element within a collection of finite elements (represented by an object of type hp::FECollection) is the one that lives on this cell. The class then enumerates the degree of freedom associated with these finite elements on each cell of a triangulation and, if possible, identifies degrees of freedom at the interfaces of cells if they match. If neighboring cells have degrees of freedom along the common interface that do not immediate match (for example, if you have $Q_2$ and $Q_3$ elements meeting at a common face), then one needs to compute constraints to ensure that the resulting finite element space on the mesh remains conforming.

    +

    Instead of only using one particular FiniteElement on all cells, this class also allows for an enumeration of degrees of freedom on different finite elements on every cells. To this end, one assigns an active_fe_index to every cell that indicates which element within a collection of finite elements (represented by an object of type hp::FECollection) is the one that lives on this cell. The class then enumerates the degree of freedom associated with these finite elements on each cell of a triangulation and, if possible, identifies degrees of freedom at the interfaces of cells if they match. If neighboring cells have degrees of freedom along the common interface that do not immediate match (for example, if you have $Q_2$ and $Q_3$ elements meeting at a common face), then one needs to compute constraints to ensure that the resulting finite element space on the mesh remains conforming.

    The whole process of working with objects of this type is explained in step-27. Many of the algorithms this class implements are described in the hp-paper.

    Active FE indices and their behavior under mesh refinement

    The typical workflow for using this class is to create a mesh, assign an active FE index to every active cell, call DoFHandler::distribute_dofs(), and then assemble a linear system and solve a problem on this finite element space.

    @@ -983,7 +983,7 @@
    -

    Go through the triangulation and "distribute" the degrees of freedom needed for the given finite element. "Distributing" degrees of freedom involves allocating memory to store the indices on all entities on which degrees of freedom can be located (e.g., vertices, edges, faces, etc.) and to then enumerate all degrees of freedom. In other words, while the mesh and the finite element object by themselves simply define a finite element space $V_h$, the process of distributing degrees of freedom makes sure that there is a basis for this space and that the shape functions of this basis are enumerated in an indexable, predictable way.

    +

    Go through the triangulation and "distribute" the degrees of freedom needed for the given finite element. "Distributing" degrees of freedom involves allocating memory to store the indices on all entities on which degrees of freedom can be located (e.g., vertices, edges, faces, etc.) and to then enumerate all degrees of freedom. In other words, while the mesh and the finite element object by themselves simply define a finite element space $V_h$, the process of distributing degrees of freedom makes sure that there is a basis for this space and that the shape functions of this basis are enumerated in an indexable, predictable way.

    The exact order in which degrees of freedom on a mesh are ordered, i.e., the order in which basis functions of the finite element space are enumerated, is something that deal.II treats as an implementation detail. By and large, degrees of freedom are enumerated in the same order in which we traverse cells, but you should not rely on any specific numbering. In contrast, if you want a particular ordering, use the functions in namespace DoFRenumbering.

    This function is first discussed in the introduction to the step-2 tutorial program.

    Note
    This function makes a copy of the finite element given as argument, and stores it as a member variable, similarly to the above function set_fe().
    /usr/share/doc/packages/dealii/doxygen/deal.II/classDynamicSparsityPattern.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classDynamicSparsityPattern.html 2024-01-30 03:04:29.768692426 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classDynamicSparsityPattern.html 2024-01-30 03:04:29.768692426 +0000 @@ -1106,7 +1106,7 @@
    -

    Compute the bandwidth of the matrix represented by this structure. The bandwidth is the maximum of $|i-j|$ for which the index pair $(i,j)$ represents a nonzero entry of the matrix.

    +

    Compute the bandwidth of the matrix represented by this structure. The bandwidth is the maximum of $|i-j|$ for which the index pair $(i,j)$ represents a nonzero entry of the matrix.

    Definition at line 567 of file dynamic_sparsity_pattern.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classEigenInverse.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classEigenInverse.html 2024-01-30 03:04:29.796692659 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classEigenInverse.html 2024-01-30 03:04:29.796692659 +0000 @@ -191,7 +191,7 @@
    template<typename VectorType = Vector<double>>
    class EigenInverse< VectorType >

    Inverse iteration (Wieland) for eigenvalue computations.

    This class implements an adaptive version of the inverse iteration by Wieland.

    -

    There are two choices for the stopping criterion: by default, the norm of the residual $A x - l x$ is computed. Since this might not converge to zero for non-symmetric matrices with non-trivial Jordan blocks, it can be replaced by checking the difference of successive eigenvalues. Use AdditionalData::use_residual for switching this option.

    +

    There are two choices for the stopping criterion: by default, the norm of the residual $A x - l x$ is computed. Since this might not converge to zero for non-symmetric matrices with non-trivial Jordan blocks, it can be replaced by checking the difference of successive eigenvalues. Use AdditionalData::use_residual for switching this option.

    Usually, the initial guess entering this method is updated after each step, replacing it with the new approximation of the eigenvalue. Using a parameter AdditionalData::relaxation between 0 and 1, this update can be damped. With relaxation parameter 0, no update is performed. This damping allows for slower adaption of the shift value to make sure that the method converges to the eigenvalue closest to the initial guess. This can be aided by the parameter AdditionalData::start_adaption, which indicates the first iteration step in which the shift value should be adapted.

    Definition at line 129 of file eigen.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classEigenPower.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classEigenPower.html 2024-01-30 03:04:29.824692892 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classEigenPower.html 2024-01-30 03:04:29.824692892 +0000 @@ -190,7 +190,7 @@

    Detailed Description

    template<typename VectorType = Vector<double>>
    class EigenPower< VectorType >

    Power method (von Mises) for eigenvalue computations.

    -

    This method determines the largest eigenvalue of a matrix by applying increasing powers of this matrix to a vector. If there is an eigenvalue $l$ with dominant absolute value, the iteration vectors will become aligned to its eigenspace and $Ax = lx$.

    +

    This method determines the largest eigenvalue of a matrix by applying increasing powers of this matrix to a vector. If there is an eigenvalue $l$ with dominant absolute value, the iteration vectors will become aligned to its eigenspace and $Ax = lx$.

    A shift parameter allows to shift the spectrum, so it is possible to compute the smallest eigenvalue, too.

    Convergence of this method is known to be slow.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classEllipticalManifold.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classEllipticalManifold.html 2024-01-30 03:04:29.868693259 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classEllipticalManifold.html 2024-01-30 03:04:29.868693259 +0000 @@ -223,16 +223,16 @@

    Detailed Description

    template<int dim, int spacedim = dim>
    class EllipticalManifold< dim, spacedim >

    Elliptical manifold description derived from ChartManifold. More information on the elliptical coordinate system can be found at Wikipedia .

    -

    This is based on the definition of elliptic coordinates $(u,v)$

    -\[
+<p>This is based on the definition of elliptic coordinates <picture><source srcset=$(u,v)$

    +\[
  \left\lbrace\begin{aligned}
  x &=  x_0 + c \cosh(u) \cos(v) \\
  y &=  y_0 + c \sinh(u) \sin(v)
  \end{aligned}\right.
-\] +\]" src="form_1454.png"/>

    -

    in which $(x_0,y_0)$ are coordinates of the center of the cartesian system.

    -

    The current implementation uses coordinates $(c,v)$, instead of $(u,v)$, and fixes $u$ according to a given eccentricity. Therefore, this choice of coordinates generates an elliptical manifold characterized by a constant eccentricity: $e=\frac{1}{\cosh(u)}$, with $e\in\left]0,1\right[$.

    +

    in which $(x_0,y_0)$ are coordinates of the center of the cartesian system.

    +

    The current implementation uses coordinates $(c,v)$, instead of $(u,v)$, and fixes $u$ according to a given eccentricity. Therefore, this choice of coordinates generates an elliptical manifold characterized by a constant eccentricity: $e=\frac{1}{\cosh(u)}$, with $e\in\left]0,1\right[$.

    The constructor of this class will throw an exception if both dim and spacedim are different from two.

    This manifold can be used to produce hyper_shells with elliptical curvature. As an example, the test elliptical_manifold_01 produces the following triangulation:

    @@ -348,7 +348,7 @@ - +
    centerCenter of the manifold.
    major_axis_directionDirection of the major axis of the manifold.
    eccentricityEccentricity of the manifold $e\in\left]0,1\right[$.
    eccentricityEccentricity of the manifold $e\in\left]0,1\right[$.
    @@ -485,7 +485,7 @@

    -

    Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the chartdim-dimensional to the spacedim-dimensional space. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

    +

    Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the chartdim-dimensional to the spacedim-dimensional space. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

    This function is used in the computations required by the get_tangent_vector() function. Since not all users of the Manifold class interface will require calling that function, the current function is implemented but will trigger an exception whenever called. This allows derived classes to avoid implementing the push_forward_gradient function if this functionality is not needed in the user program.

    Refer to the general documentation of this class for more information.

    @@ -520,7 +520,7 @@

    Return the periodicity associated with the submanifold.

    -

    For $\text{dim}=2$ and $\text{spacedim}=2$, the first coordinate is non-periodic, while the second coordinate has a periodicity of $2\pi$.

    +

    For $\text{dim}=2$ and $\text{spacedim}=2$, the first coordinate is non-periodic, while the second coordinate has a periodicity of $2\pi$.

    Definition at line 1244 of file manifold_lib.cc.

    @@ -748,7 +748,7 @@
    -

    Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the chartdim-dimensional to the spacedim-dimensional space. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

    +

    Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the chartdim-dimensional to the spacedim-dimensional space. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

    This function is used in the computations required by the get_tangent_vector() function. Since not all users of the Manifold class interface will require calling that function, the current function is implemented but will trigger an exception whenever called. This allows derived classes to avoid implementing the push_forward_gradient function if this functionality is not needed in the user program.

    Refer to the general documentation of this class for more information.

    @@ -782,7 +782,7 @@
    -

    Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

    +

    Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

    For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

    \begin{align*}
   \zeta(t) &= \xi_1 +  t (\xi_2-\xi_1)
@@ -798,7 +798,7 @@
                                     -F^{-1}(\mathbf x_1)\right]\right).
 \end{align*}

    -

    What the current function is supposed to return is $\mathbf s'(0)$. By the chain rule, this is equal to

    +

    What the current function is supposed to return is $\mathbf s'(0)$. By the chain rule, this is equal to

    \begin{align*}
   \mathbf s'(0) &=
     \frac{d}{dt}\left. F\left(F^{-1}(\mathbf x_1)
/usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluation.html differs (HTML document, ASCII text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluation.html	2024-01-30 03:04:29.984694225 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluation.html	2024-01-30 03:04:29.988694258 +0000
@@ -459,7 +459,7 @@
 <div class=

    unsigned int cell_index
    @ values

    Likewise, a gradient of the finite element solution represented by vector can be interpolated to the quadrature points by fe_eval.get_gradient(q). The combination of read_dof_values(), evaluate() and get_value() is similar to what FEValues::get_function_values or FEValues::get_function_gradients does, but it is in general much faster because it makes use of the tensor product, see the description of the evaluation routines below, and can do this operation for several cells at once through vectorization.

    -

    The second class of tasks done by FEEvaluation are integration tasks for right hand sides. In finite element computations, these typically consist of multiplying a quantity on quadrature points (a function value, or a field interpolated by the finite element space itself) by a set of test functions and integrating over the cell through summation of the values in each quadrature point, multiplied by the quadrature weight and the Jacobian determinant of the transformation. If a generic Function object is given and we want to compute $v_i = \int_\Omega \varphi_i f dx$, this is done by the following cell-wise integration:

    +

    The second class of tasks done by FEEvaluation are integration tasks for right hand sides. In finite element computations, these typically consist of multiplying a quantity on quadrature points (a function value, or a field interpolated by the finite element space itself) by a set of test functions and integrating over the cell through summation of the values in each quadrature point, multiplied by the quadrature weight and the Jacobian determinant of the transformation. If a generic Function object is given and we want to compute $v_i = \int_\Omega \varphi_i f dx$, this is done by the following cell-wise integration:

    Function<dim> &function = ...;
    for (unsigned int cell_index = cell_range.first;
    @@ -1944,8 +1944,8 @@
    -

    Return the derivative of a finite element function at quadrature point number q_point after a call to FEEvaluation::evaluate(EvaluationFlags::gradients) the direction normal to the face: $\boldsymbol \nabla u(\mathbf x_q) \cdot \mathbf n(\mathbf
-x_q)$

    +

    Return the derivative of a finite element function at quadrature point number q_point after a call to FEEvaluation::evaluate(EvaluationFlags::gradients) the direction normal to the face: $\boldsymbol \nabla u(\mathbf x_q) \cdot \mathbf n(\mathbf
+x_q)$

    This call is equivalent to calling get_gradient() * normal_vector() but will use a more efficient internal representation of data.

    Note
    The derived class FEEvaluationAccess overloads this operation with specializations for the scalar case (n_components == 1) and for the vector-valued case (n_components == dim).
    @@ -2209,7 +2209,7 @@
    -

    Return the curl of the vector field, $\nabla \times v$ after a call to evaluate(EvaluationFlags::gradients).

    +

    Return the curl of the vector field, $\nabla \times v$ after a call to evaluate(EvaluationFlags::gradients).

    Note
    Only available for the vector-valued case (n_components == dim).
    @@ -2659,8 +2659,8 @@
    -

    Return the inverse and transposed version $J^{-\mathrm T}$ of the Jacobian of the mapping between the unit to the real cell defined as $J_{ij} = d x_i / d\hat x_j$. The $(i,j)$ entry of the returned tensor contains $d\hat x_j/dx_i$, i.e., columns refer to reference space coordinates and rows to real cell coordinates. Thus, the returned tensor represents a covariant transformation, which is used in the FEEvaluationBase::get_gradient() function to transform the unit cell gradients to gradients on the real cell by a multiplication $J^{-\mathrm
-T} \hat{\nabla} u_h$.

    +

    Return the inverse and transposed version $J^{-\mathrm T}$ of the Jacobian of the mapping between the unit to the real cell defined as $J_{ij} = d x_i / d\hat x_j$. The $(i,j)$ entry of the returned tensor contains $d\hat x_j/dx_i$, i.e., columns refer to reference space coordinates and rows to real cell coordinates. Thus, the returned tensor represents a covariant transformation, which is used in the FEEvaluationBase::get_gradient() function to transform the unit cell gradients to gradients on the real cell by a multiplication $J^{-\mathrm
+T} \hat{\nabla} u_h$.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluationAccess.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluationAccess.html 2024-01-30 03:04:30.080695025 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluationAccess.html 2024-01-30 03:04:30.084695058 +0000 @@ -1154,8 +1154,8 @@
    -

    Return the derivative of a finite element function at quadrature point number q_point after a call to FEEvaluation::evaluate(EvaluationFlags::gradients) the direction normal to the face: $\boldsymbol \nabla u(\mathbf x_q) \cdot \mathbf n(\mathbf
-x_q)$

    +

    Return the derivative of a finite element function at quadrature point number q_point after a call to FEEvaluation::evaluate(EvaluationFlags::gradients) the direction normal to the face: $\boldsymbol \nabla u(\mathbf x_q) \cdot \mathbf n(\mathbf
+x_q)$

    This call is equivalent to calling get_gradient() * normal_vector() but will use a more efficient internal representation of data.

    Note
    The derived class FEEvaluationAccess overloads this operation with specializations for the scalar case (n_components == 1) and for the vector-valued case (n_components == dim).
    @@ -1419,7 +1419,7 @@
    -

    Return the curl of the vector field, $\nabla \times v$ after a call to evaluate(EvaluationFlags::gradients).

    +

    Return the curl of the vector field, $\nabla \times v$ after a call to evaluate(EvaluationFlags::gradients).

    Note
    Only available for the vector-valued case (n_components == dim).
    @@ -1869,8 +1869,8 @@
    -

    Return the inverse and transposed version $J^{-\mathrm T}$ of the Jacobian of the mapping between the unit to the real cell defined as $J_{ij} = d x_i / d\hat x_j$. The $(i,j)$ entry of the returned tensor contains $d\hat x_j/dx_i$, i.e., columns refer to reference space coordinates and rows to real cell coordinates. Thus, the returned tensor represents a covariant transformation, which is used in the FEEvaluationBase::get_gradient() function to transform the unit cell gradients to gradients on the real cell by a multiplication $J^{-\mathrm
-T} \hat{\nabla} u_h$.

    +

    Return the inverse and transposed version $J^{-\mathrm T}$ of the Jacobian of the mapping between the unit to the real cell defined as $J_{ij} = d x_i / d\hat x_j$. The $(i,j)$ entry of the returned tensor contains $d\hat x_j/dx_i$, i.e., columns refer to reference space coordinates and rows to real cell coordinates. Thus, the returned tensor represents a covariant transformation, which is used in the FEEvaluationBase::get_gradient() function to transform the unit cell gradients to gradients on the real cell by a multiplication $J^{-\mathrm
+T} \hat{\nabla} u_h$.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluationAccess_3_011_00_011_00_01Number_00_01is__face_00_01VectorizedArrayType_01_4.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluationAccess_3_011_00_011_00_01Number_00_01is__face_00_01VectorizedArrayType_01_4.html 2024-01-30 03:04:30.172695792 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluationAccess_3_011_00_011_00_01Number_00_01is__face_00_01VectorizedArrayType_01_4.html 2024-01-30 03:04:30.172695792 +0000 @@ -940,8 +940,8 @@

    -

    Return the derivative of a finite element function at quadrature point number q_point after a call to FEEvaluation::evaluate(EvaluationFlags::gradients) the direction normal to the face: $\boldsymbol \nabla u(\mathbf x_q) \cdot \mathbf n(\mathbf
-x_q)$

    +

    Return the derivative of a finite element function at quadrature point number q_point after a call to FEEvaluation::evaluate(EvaluationFlags::gradients) the direction normal to the face: $\boldsymbol \nabla u(\mathbf x_q) \cdot \mathbf n(\mathbf
+x_q)$

    This call is equivalent to calling get_gradient() * normal_vector() but will use a more efficient internal representation of data.

    Note
    The derived class FEEvaluationAccess overloads this operation with specializations for the scalar case (n_components == 1) and for the vector-valued case (n_components == dim).
    @@ -1567,7 +1567,7 @@
    -

    Return the curl of the vector field, $\nabla \times v$ after a call to evaluate(EvaluationFlags::gradients).

    +

    Return the curl of the vector field, $\nabla \times v$ after a call to evaluate(EvaluationFlags::gradients).

    Note
    Only available for the vector-valued case (n_components == dim).
    @@ -1966,8 +1966,8 @@
    -

    Return the inverse and transposed version $J^{-\mathrm T}$ of the Jacobian of the mapping between the unit to the real cell defined as $J_{ij} = d x_i / d\hat x_j$. The $(i,j)$ entry of the returned tensor contains $d\hat x_j/dx_i$, i.e., columns refer to reference space coordinates and rows to real cell coordinates. Thus, the returned tensor represents a covariant transformation, which is used in the FEEvaluationBase::get_gradient() function to transform the unit cell gradients to gradients on the real cell by a multiplication $J^{-\mathrm
-T} \hat{\nabla} u_h$.

    +

    Return the inverse and transposed version $J^{-\mathrm T}$ of the Jacobian of the mapping between the unit to the real cell defined as $J_{ij} = d x_i / d\hat x_j$. The $(i,j)$ entry of the returned tensor contains $d\hat x_j/dx_i$, i.e., columns refer to reference space coordinates and rows to real cell coordinates. Thus, the returned tensor represents a covariant transformation, which is used in the FEEvaluationBase::get_gradient() function to transform the unit cell gradients to gradients on the real cell by a multiplication $J^{-\mathrm
+T} \hat{\nabla} u_h$.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluationAccess_3_01dim_00_011_00_01Number_00_01is__face_00_01VectorizedArrayType_01_4.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluationAccess_3_01dim_00_011_00_01Number_00_01is__face_00_01VectorizedArrayType_01_4.html 2024-01-30 03:04:30.260696525 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluationAccess_3_01dim_00_011_00_01Number_00_01is__face_00_01VectorizedArrayType_01_4.html 2024-01-30 03:04:30.260696525 +0000 @@ -914,8 +914,8 @@

    -

    Return the derivative of a finite element function at quadrature point number q_point after a call to FEEvaluation::evaluate(EvaluationFlags::gradients) the direction normal to the face: $\boldsymbol \nabla u(\mathbf x_q) \cdot \mathbf n(\mathbf
-x_q)$

    +

    Return the derivative of a finite element function at quadrature point number q_point after a call to FEEvaluation::evaluate(EvaluationFlags::gradients) the direction normal to the face: $\boldsymbol \nabla u(\mathbf x_q) \cdot \mathbf n(\mathbf
+x_q)$

    This call is equivalent to calling get_gradient() * normal_vector() but will use a more efficient internal representation of data.

    Note
    The derived class FEEvaluationAccess overloads this operation with specializations for the scalar case (n_components == 1) and for the vector-valued case (n_components == dim).
    @@ -1494,7 +1494,7 @@
    -

    Return the curl of the vector field, $\nabla \times v$ after a call to evaluate(EvaluationFlags::gradients).

    +

    Return the curl of the vector field, $\nabla \times v$ after a call to evaluate(EvaluationFlags::gradients).

    Note
    Only available for the vector-valued case (n_components == dim).
    @@ -1893,8 +1893,8 @@
    -

    Return the inverse and transposed version $J^{-\mathrm T}$ of the Jacobian of the mapping between the unit to the real cell defined as $J_{ij} = d x_i / d\hat x_j$. The $(i,j)$ entry of the returned tensor contains $d\hat x_j/dx_i$, i.e., columns refer to reference space coordinates and rows to real cell coordinates. Thus, the returned tensor represents a covariant transformation, which is used in the FEEvaluationBase::get_gradient() function to transform the unit cell gradients to gradients on the real cell by a multiplication $J^{-\mathrm
-T} \hat{\nabla} u_h$.

    +

    Return the inverse and transposed version $J^{-\mathrm T}$ of the Jacobian of the mapping between the unit to the real cell defined as $J_{ij} = d x_i / d\hat x_j$. The $(i,j)$ entry of the returned tensor contains $d\hat x_j/dx_i$, i.e., columns refer to reference space coordinates and rows to real cell coordinates. Thus, the returned tensor represents a covariant transformation, which is used in the FEEvaluationBase::get_gradient() function to transform the unit cell gradients to gradients on the real cell by a multiplication $J^{-\mathrm
+T} \hat{\nabla} u_h$.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluationAccess_3_01dim_00_01dim_00_01Number_00_01is__face_00_01VectorizedArrayType_01_4.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluationAccess_3_01dim_00_01dim_00_01Number_00_01is__face_00_01VectorizedArrayType_01_4.html 2024-01-30 03:04:30.344697224 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluationAccess_3_01dim_00_01dim_00_01Number_00_01is__face_00_01VectorizedArrayType_01_4.html 2024-01-30 03:04:30.344697224 +0000 @@ -860,7 +860,7 @@
    -

    Return the curl of the vector field, $\nabla \times v$ after a call to evaluate(EvaluationFlags::gradients).

    +

    Return the curl of the vector field, $\nabla \times v$ after a call to evaluate(EvaluationFlags::gradients).

    @@ -1339,8 +1339,8 @@
    -

    Return the derivative of a finite element function at quadrature point number q_point after a call to FEEvaluation::evaluate(EvaluationFlags::gradients) the direction normal to the face: $\boldsymbol \nabla u(\mathbf x_q) \cdot \mathbf n(\mathbf
-x_q)$

    +

    Return the derivative of a finite element function at quadrature point number q_point after a call to FEEvaluation::evaluate(EvaluationFlags::gradients) the direction normal to the face: $\boldsymbol \nabla u(\mathbf x_q) \cdot \mathbf n(\mathbf
+x_q)$

    This call is equivalent to calling get_gradient() * normal_vector() but will use a more efficient internal representation of data.

    Note
    The derived class FEEvaluationAccess overloads this operation with specializations for the scalar case (n_components == 1) and for the vector-valued case (n_components == dim).
    @@ -1790,8 +1790,8 @@
    -

    Return the inverse and transposed version $J^{-\mathrm T}$ of the Jacobian of the mapping between the unit to the real cell defined as $J_{ij} = d x_i / d\hat x_j$. The $(i,j)$ entry of the returned tensor contains $d\hat x_j/dx_i$, i.e., columns refer to reference space coordinates and rows to real cell coordinates. Thus, the returned tensor represents a covariant transformation, which is used in the FEEvaluationBase::get_gradient() function to transform the unit cell gradients to gradients on the real cell by a multiplication $J^{-\mathrm
-T} \hat{\nabla} u_h$.

    +

    Return the inverse and transposed version $J^{-\mathrm T}$ of the Jacobian of the mapping between the unit to the real cell defined as $J_{ij} = d x_i / d\hat x_j$. The $(i,j)$ entry of the returned tensor contains $d\hat x_j/dx_i$, i.e., columns refer to reference space coordinates and rows to real cell coordinates. Thus, the returned tensor represents a covariant transformation, which is used in the FEEvaluationBase::get_gradient() function to transform the unit cell gradients to gradients on the real cell by a multiplication $J^{-\mathrm
+T} \hat{\nabla} u_h$.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluationBase.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluationBase.html 2024-01-30 03:04:30.428697924 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluationBase.html 2024-01-30 03:04:30.428697924 +0000 @@ -1053,8 +1053,8 @@
    -

    Return the derivative of a finite element function at quadrature point number q_point after a call to FEEvaluation::evaluate(EvaluationFlags::gradients) the direction normal to the face: $\boldsymbol \nabla u(\mathbf x_q) \cdot \mathbf n(\mathbf
-x_q)$

    +

    Return the derivative of a finite element function at quadrature point number q_point after a call to FEEvaluation::evaluate(EvaluationFlags::gradients) the direction normal to the face: $\boldsymbol \nabla u(\mathbf x_q) \cdot \mathbf n(\mathbf
+x_q)$

    This call is equivalent to calling get_gradient() * normal_vector() but will use a more efficient internal representation of data.

    Note
    The derived class FEEvaluationAccess overloads this operation with specializations for the scalar case (n_components == 1) and for the vector-valued case (n_components == dim).
    @@ -1249,7 +1249,7 @@
    -

    Return the curl of the vector field, $\nabla \times v$ after a call to evaluate(EvaluationFlags::gradients).

    +

    Return the curl of the vector field, $\nabla \times v$ after a call to evaluate(EvaluationFlags::gradients).

    Note
    Only available for the vector-valued case (n_components == dim).
    @@ -1690,8 +1690,8 @@
    -

    Return the inverse and transposed version $J^{-\mathrm T}$ of the Jacobian of the mapping between the unit to the real cell defined as $J_{ij} = d x_i / d\hat x_j$. The $(i,j)$ entry of the returned tensor contains $d\hat x_j/dx_i$, i.e., columns refer to reference space coordinates and rows to real cell coordinates. Thus, the returned tensor represents a covariant transformation, which is used in the FEEvaluationBase::get_gradient() function to transform the unit cell gradients to gradients on the real cell by a multiplication $J^{-\mathrm
-T} \hat{\nabla} u_h$.

    +

    Return the inverse and transposed version $J^{-\mathrm T}$ of the Jacobian of the mapping between the unit to the real cell defined as $J_{ij} = d x_i / d\hat x_j$. The $(i,j)$ entry of the returned tensor contains $d\hat x_j/dx_i$, i.e., columns refer to reference space coordinates and rows to real cell coordinates. Thus, the returned tensor represents a covariant transformation, which is used in the FEEvaluationBase::get_gradient() function to transform the unit cell gradients to gradients on the real cell by a multiplication $J^{-\mathrm
+T} \hat{\nabla} u_h$.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluationData.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluationData.html 2024-01-30 03:04:30.492698457 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluationData.html 2024-01-30 03:04:30.492698457 +0000 @@ -768,8 +768,8 @@
    -

    Return the inverse and transposed version $J^{-\mathrm T}$ of the Jacobian of the mapping between the unit to the real cell defined as $J_{ij} = d x_i / d\hat x_j$. The $(i,j)$ entry of the returned tensor contains $d\hat x_j/dx_i$, i.e., columns refer to reference space coordinates and rows to real cell coordinates. Thus, the returned tensor represents a covariant transformation, which is used in the FEEvaluationBase::get_gradient() function to transform the unit cell gradients to gradients on the real cell by a multiplication $J^{-\mathrm
-T} \hat{\nabla} u_h$.

    +

    Return the inverse and transposed version $J^{-\mathrm T}$ of the Jacobian of the mapping between the unit to the real cell defined as $J_{ij} = d x_i / d\hat x_j$. The $(i,j)$ entry of the returned tensor contains $d\hat x_j/dx_i$, i.e., columns refer to reference space coordinates and rows to real cell coordinates. Thus, the returned tensor represents a covariant transformation, which is used in the FEEvaluationBase::get_gradient() function to transform the unit cell gradients to gradients on the real cell by a multiplication $J^{-\mathrm
+T} \hat{\nabla} u_h$.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classFEFaceEvaluation.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFEFaceEvaluation.html 2024-01-30 03:04:30.596699324 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFEFaceEvaluation.html 2024-01-30 03:04:30.596699324 +0000 @@ -1579,8 +1579,8 @@
    -

    Return the derivative of a finite element function at quadrature point number q_point after a call to FEEvaluation::evaluate(EvaluationFlags::gradients) the direction normal to the face: $\boldsymbol \nabla u(\mathbf x_q) \cdot \mathbf n(\mathbf
-x_q)$

    +

    Return the derivative of a finite element function at quadrature point number q_point after a call to FEEvaluation::evaluate(EvaluationFlags::gradients) the direction normal to the face: $\boldsymbol \nabla u(\mathbf x_q) \cdot \mathbf n(\mathbf
+x_q)$

    This call is equivalent to calling get_gradient() * normal_vector() but will use a more efficient internal representation of data.

    Note
    The derived class FEEvaluationAccess overloads this operation with specializations for the scalar case (n_components == 1) and for the vector-valued case (n_components == dim).
    @@ -1844,7 +1844,7 @@
    -

    Return the curl of the vector field, $\nabla \times v$ after a call to evaluate(EvaluationFlags::gradients).

    +

    Return the curl of the vector field, $\nabla \times v$ after a call to evaluate(EvaluationFlags::gradients).

    Note
    Only available for the vector-valued case (n_components == dim).
    @@ -2294,8 +2294,8 @@
    -

    Return the inverse and transposed version $J^{-\mathrm T}$ of the Jacobian of the mapping between the unit to the real cell defined as $J_{ij} = d x_i / d\hat x_j$. The $(i,j)$ entry of the returned tensor contains $d\hat x_j/dx_i$, i.e., columns refer to reference space coordinates and rows to real cell coordinates. Thus, the returned tensor represents a covariant transformation, which is used in the FEEvaluationBase::get_gradient() function to transform the unit cell gradients to gradients on the real cell by a multiplication $J^{-\mathrm
-T} \hat{\nabla} u_h$.

    +

    Return the inverse and transposed version $J^{-\mathrm T}$ of the Jacobian of the mapping between the unit to the real cell defined as $J_{ij} = d x_i / d\hat x_j$. The $(i,j)$ entry of the returned tensor contains $d\hat x_j/dx_i$, i.e., columns refer to reference space coordinates and rows to real cell coordinates. Thus, the returned tensor represents a covariant transformation, which is used in the FEEvaluationBase::get_gradient() function to transform the unit cell gradients to gradients on the real cell by a multiplication $J^{-\mathrm
+T} \hat{\nabla} u_h$.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classFEFaceValues.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFEFaceValues.html 2024-01-30 03:04:30.696700157 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFEFaceValues.html 2024-01-30 03:04:30.696700157 +0000 @@ -931,7 +931,7 @@

    If the shape function is vector-valued, then this returns the only non- zero component. If the shape function has more than one non-zero component (i.e. it is not primitive), then throw an exception of type ExcShapeFunctionNotPrimitive. In that case, use the shape_value_component() function.

    Parameters
    - +
    iNumber of the shape function $\varphi_i$ to be evaluated. Note that this number runs from zero to dofs_per_cell, even in the case of an FEFaceValues or FESubfaceValues object.
    iNumber of the shape function $\varphi_i$ to be evaluated. Note that this number runs from zero to dofs_per_cell, even in the case of an FEFaceValues or FESubfaceValues object.
    q_pointNumber of the quadrature point at which function is to be evaluated
    @@ -972,7 +972,7 @@

    Compute one vector component of the value of a shape function at a quadrature point. If the finite element is scalar, then only component zero is allowed and the return value equals that of the shape_value() function. If the finite element is vector valued but all shape functions are primitive (i.e. they are non-zero in only one component), then the value returned by shape_value() equals that of this function for exactly one component. This function is therefore only of greater interest if the shape function is not primitive, but then it is necessary since the other function cannot be used.

    Parameters
    - +
    iNumber of the shape function $\varphi_i$ to be evaluated.
    iNumber of the shape function $\varphi_i$ to be evaluated.
    q_pointNumber of the quadrature point at which function is to be evaluated.
    componentvector component to be evaluated.
    @@ -1011,7 +1011,7 @@

    The same holds for the arguments of this function as for the shape_value() function.

    Parameters
    - +
    iNumber of the shape function $\varphi_i$ to be evaluated.
    iNumber of the shape function $\varphi_i$ to be evaluated.
    q_pointNumber of the quadrature point at which function is to be evaluated.
    @@ -1213,17 +1213,17 @@
    -

    Return the values of a finite element function at the quadrature points of the current cell, face, or subface (selected the last time the reinit() function was called). That is, if the first argument fe_function is a vector of nodal values of a finite element function $u_h(\mathbf x)$ defined on a DoFHandler object, then the output vector (the second argument, values) is the vector of values $u_h(\mathbf x_q^K)$ where $x_q^K$ are the quadrature points on the current cell $K$. This function is first discussed in the Results section of step-4, and the related get_function_gradients() function is also used in step-15 along with numerous other tutorial programs.

    +

    Return the values of a finite element function at the quadrature points of the current cell, face, or subface (selected the last time the reinit() function was called). That is, if the first argument fe_function is a vector of nodal values of a finite element function $u_h(\mathbf x)$ defined on a DoFHandler object, then the output vector (the second argument, values) is the vector of values $u_h(\mathbf x_q^K)$ where $x_q^K$ are the quadrature points on the current cell $K$. This function is first discussed in the Results section of step-4, and the related get_function_gradients() function is also used in step-15 along with numerous other tutorial programs.

    If the current cell is not active (i.e., it has children), then the finite element function is, strictly speaking, defined by shape functions that live on these child cells. Rather than evaluating the shape functions on the child cells, with the quadrature points defined on the current cell, this function first interpolates the finite element function to shape functions defined on the current cell, and then evaluates this interpolated function.

    This function may only be used if the finite element in use is a scalar one, i.e. has only one vector component. To get values of multi-component elements, there is another get_function_values() below, returning a vector of vectors of results.

    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]valuesThe values of the function specified by fe_function at the quadrature points of the current cell. The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the values of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the solution vector.
    [out]valuesThe values of the function specified by fe_function at the quadrature points of the current cell. The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the values of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the solution vector.
    -
    Postcondition
    values[q] will contain the value of the field described by fe_function at the $q$th quadrature point.
    +
    Postcondition
    values[q] will contain the value of the field described by fe_function at the $q$th quadrature point.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1259,7 +1259,7 @@

    This function does the same as the other get_function_values(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    values[q] is a vector of values of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by values[q] equals the number of components of the finite element, i.e. values[q](c) returns the value of the $c$th vector component at the $q$th quadrature point.
    +
    Postcondition
    values[q] is a vector of values of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by values[q] equals the number of components of the finite element, i.e. values[q](c) returns the value of the $c$th vector component at the $q$th quadrature point.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 3320 of file fe_values.cc.

    @@ -1431,16 +1431,16 @@
    -

    Return the gradients of a finite element function at the quadrature points of the current cell, face, or subface (selected the last time the reinit() function was called). That is, if the first argument fe_function is a vector of nodal values of a finite element function $u_h(\mathbf x)$ defined on a DoFHandler object, then the output vector (the second argument, values) is the vector of values $\nabla u_h(\mathbf x_q^K)$ where $x_q^K$ are the quadrature points on the current cell $K$. This function is first discussed in the Results section of step-4, and it is also used in step-15 along with numerous other tutorial programs.

    +

    Return the gradients of a finite element function at the quadrature points of the current cell, face, or subface (selected the last time the reinit() function was called). That is, if the first argument fe_function is a vector of nodal values of a finite element function $u_h(\mathbf x)$ defined on a DoFHandler object, then the output vector (the second argument, values) is the vector of values $\nabla u_h(\mathbf x_q^K)$ where $x_q^K$ are the quadrature points on the current cell $K$. This function is first discussed in the Results section of step-4, and it is also used in step-15 along with numerous other tutorial programs.

    This function may only be used if the finite element in use is a scalar one, i.e. has only one vector component. There is a corresponding function of the same name for vector-valued finite elements.

    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]gradientsThe gradients of the function specified by fe_function at the quadrature points of the current cell. The gradients are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the gradients of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    [out]gradientsThe gradients of the function specified by fe_function at the quadrature points of the current cell. The gradients are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the gradients of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    -
    Postcondition
    gradients[q] will contain the gradient of the field described by fe_function at the $q$th quadrature point. gradients[q][d] represents the derivative in coordinate direction $d$ at quadrature point $q$.
    +
    Postcondition
    gradients[q] will contain the gradient of the field described by fe_function at the $q$th quadrature point. gradients[q][d] represents the derivative in coordinate direction $d$ at quadrature point $q$.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1476,7 +1476,7 @@

    This function does the same as the other get_function_gradients(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    gradients[q] is a vector of gradients of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by gradients[q] equals the number of components of the finite element, i.e. gradients[q][c] returns the gradient of the $c$th vector component at the $q$th quadrature point. Consequently, gradients[q][c][d] is the derivative in coordinate direction $d$ of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    +
    Postcondition
    gradients[q] is a vector of gradients of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by gradients[q] equals the number of components of the finite element, i.e. gradients[q][c] returns the gradient of the $c$th vector component at the $q$th quadrature point. Consequently, gradients[q][c][d] is the derivative in coordinate direction $d$ of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 3463 of file fe_values.cc.

    @@ -1595,11 +1595,11 @@
    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]hessiansThe Hessians of the function specified by fe_function at the quadrature points of the current cell. The Hessians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Hessians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    [out]hessiansThe Hessians of the function specified by fe_function at the quadrature points of the current cell. The Hessians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Hessians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    -
    Postcondition
    hessians[q] will contain the Hessian of the field described by fe_function at the $q$th quadrature point. hessians[q][i][j] represents the $(i,j)$th component of the matrix of second derivatives at quadrature point $q$.
    +
    Postcondition
    hessians[q] will contain the Hessian of the field described by fe_function at the $q$th quadrature point. hessians[q][i][j] represents the $(i,j)$th component of the matrix of second derivatives at quadrature point $q$.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1640,7 +1640,7 @@

    This function does the same as the other get_function_hessians(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    hessians[q] is a vector of Hessians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by hessians[q] equals the number of components of the finite element, i.e. hessians[q][c] returns the Hessian of the $c$th vector component at the $q$th quadrature point. Consequently, hessians[q][c][i][j] is the $(i,j)$th component of the matrix of second derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    +
    Postcondition
    hessians[q] is a vector of Hessians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by hessians[q] equals the number of components of the finite element, i.e. hessians[q][c] returns the Hessian of the $c$th vector component at the $q$th quadrature point. Consequently, hessians[q][c][i][j] is the $(i,j)$th component of the matrix of second derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 3576 of file fe_values.cc.

    @@ -1759,11 +1759,11 @@
    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]laplaciansThe Laplacians of the function specified by fe_function at the quadrature points of the current cell. The Laplacians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Laplacians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the input vector.
    [out]laplaciansThe Laplacians of the function specified by fe_function at the quadrature points of the current cell. The Laplacians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Laplacians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the input vector.
    -
    Postcondition
    laplacians[q] will contain the Laplacian of the field described by fe_function at the $q$th quadrature point.
    +
    Postcondition
    laplacians[q] will contain the Laplacian of the field described by fe_function at the $q$th quadrature point.
    For each component of the output vector, there holds laplacians[q]=trace(hessians[q]), where hessians would be the output of the get_function_hessians() function.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    @@ -1801,7 +1801,7 @@

    This function does the same as the other get_function_laplacians(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    laplacians[q] is a vector of Laplacians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by laplacians[q] equals the number of components of the finite element, i.e. laplacians[q][c] returns the Laplacian of the $c$th vector component at the $q$th quadrature point.
    +
    Postcondition
    laplacians[q] is a vector of Laplacians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by laplacians[q] equals the number of components of the finite element, i.e. laplacians[q][c] returns the Laplacian of the $c$th vector component at the $q$th quadrature point.
    For each component of the output vector, there holds laplacians[q][c]=trace(hessians[q][c]), where hessians would be the output of the get_function_hessians() function.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1964,11 +1964,11 @@
    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]third_derivativesThe third derivatives of the function specified by fe_function at the quadrature points of the current cell. The third derivatives are computed in real space (as opposed to on the unit cell). The object is assumed to already have the correct size. The data type stored by this output vector must be what you get when you multiply the third derivatives of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    [out]third_derivativesThe third derivatives of the function specified by fe_function at the quadrature points of the current cell. The third derivatives are computed in real space (as opposed to on the unit cell). The object is assumed to already have the correct size. The data type stored by this output vector must be what you get when you multiply the third derivatives of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    -
    Postcondition
    third_derivatives[q] will contain the third derivatives of the field described by fe_function at the $q$th quadrature point. third_derivatives[q][i][j][k] represents the $(i,j,k)$th component of the 3rd order tensor of third derivatives at quadrature point $q$.
    +
    Postcondition
    third_derivatives[q] will contain the third derivatives of the field described by fe_function at the $q$th quadrature point. third_derivatives[q][i][j][k] represents the $(i,j,k)$th component of the 3rd order tensor of third derivatives at quadrature point $q$.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_3rd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2009,7 +2009,7 @@

    This function does the same as the other get_function_third_derivatives(), but applied to multi-component (vector- valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    third_derivatives[q] is a vector of third derivatives of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by third_derivatives[q] equals the number of components of the finite element, i.e. third_derivatives[q][c] returns the third derivative of the $c$th vector component at the $q$th quadrature point. Consequently, third_derivatives[q][c][i][j][k] is the $(i,j,k)$th component of the tensor of third derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    +
    Postcondition
    third_derivatives[q] is a vector of third derivatives of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by third_derivatives[q] equals the number of components of the finite element, i.e. third_derivatives[q][c] returns the third derivative of the $c$th vector component at the $q$th quadrature point. Consequently, third_derivatives[q][c][i][j][k] is the $(i,j,k)$th component of the tensor of third derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_3rd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 3826 of file fe_values.cc.

    @@ -2350,7 +2350,7 @@

    Mapped quadrature weight. If this object refers to a volume evaluation (i.e. the derived class is of type FEValues), then this is the Jacobi determinant times the weight of the q_pointth unit quadrature point.

    For surface evaluations (i.e. classes FEFaceValues or FESubfaceValues), it is the mapped surface element times the weight of the quadrature point.

    -

    You can think of the quantity returned by this function as the volume or surface element $dx, ds$ in the integral that we implement here by quadrature.

    +

    You can think of the quantity returned by this function as the volume or surface element $dx, ds$ in the integral that we implement here by quadrature.

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_JxW_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2407,7 +2407,7 @@
    -

    Return the Jacobian of the transformation at the specified quadrature point, i.e. $J_{ij}=dx_i/d\hat x_j$

    +

    Return the Jacobian of the transformation at the specified quadrature point, i.e. $J_{ij}=dx_i/d\hat x_j$

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2465,7 +2465,7 @@
    -

    Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijk}=dJ_{jk}/d\hat x_i$.

    +

    Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijk}=dJ_{jk}/d\hat x_i$.

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_grads flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2523,7 +2523,7 @@
    -

    Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, pushed forward to the real cell coordinates, i.e. $G_{ijk}=dJ_{iJ}/d\hat x_K (J_{jJ})^{-1} (J_{kK})^{-1}$.

    +

    Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, pushed forward to the real cell coordinates, i.e. $G_{ijk}=dJ_{iJ}/d\hat x_K (J_{jJ})^{-1} (J_{kK})^{-1}$.

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_pushed_forward_grads flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2581,7 +2581,7 @@
    -

    Return the third derivative of the transformation from unit to real cell, i.e. the second derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijkl}=\frac{d^2J_{ij}}{d\hat x_k d\hat x_l}$.

    +

    Return the third derivative of the transformation from unit to real cell, i.e. the second derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijkl}=\frac{d^2J_{ij}}{d\hat x_k d\hat x_l}$.

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_2nd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFEFaceValuesBase.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFEFaceValuesBase.html 2024-01-30 03:04:30.792700957 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFEFaceValuesBase.html 2024-01-30 03:04:30.792700957 +0000 @@ -649,7 +649,7 @@

    If the shape function is vector-valued, then this returns the only non- zero component. If the shape function has more than one non-zero component (i.e. it is not primitive), then throw an exception of type ExcShapeFunctionNotPrimitive. In that case, use the shape_value_component() function.

    Parameters
    - +
    iNumber of the shape function $\varphi_i$ to be evaluated. Note that this number runs from zero to dofs_per_cell, even in the case of an FEFaceValues or FESubfaceValues object.
    iNumber of the shape function $\varphi_i$ to be evaluated. Note that this number runs from zero to dofs_per_cell, even in the case of an FEFaceValues or FESubfaceValues object.
    q_pointNumber of the quadrature point at which function is to be evaluated
    @@ -690,7 +690,7 @@

    Compute one vector component of the value of a shape function at a quadrature point. If the finite element is scalar, then only component zero is allowed and the return value equals that of the shape_value() function. If the finite element is vector valued but all shape functions are primitive (i.e. they are non-zero in only one component), then the value returned by shape_value() equals that of this function for exactly one component. This function is therefore only of greater interest if the shape function is not primitive, but then it is necessary since the other function cannot be used.

    Parameters
    - +
    iNumber of the shape function $\varphi_i$ to be evaluated.
    iNumber of the shape function $\varphi_i$ to be evaluated.
    q_pointNumber of the quadrature point at which function is to be evaluated.
    componentvector component to be evaluated.
    @@ -729,7 +729,7 @@

    The same holds for the arguments of this function as for the shape_value() function.

    Parameters
    - +
    iNumber of the shape function $\varphi_i$ to be evaluated.
    iNumber of the shape function $\varphi_i$ to be evaluated.
    q_pointNumber of the quadrature point at which function is to be evaluated.
    @@ -931,17 +931,17 @@
    -

    Return the values of a finite element function at the quadrature points of the current cell, face, or subface (selected the last time the reinit() function was called). That is, if the first argument fe_function is a vector of nodal values of a finite element function $u_h(\mathbf x)$ defined on a DoFHandler object, then the output vector (the second argument, values) is the vector of values $u_h(\mathbf x_q^K)$ where $x_q^K$ are the quadrature points on the current cell $K$. This function is first discussed in the Results section of step-4, and the related get_function_gradients() function is also used in step-15 along with numerous other tutorial programs.

    +

    Return the values of a finite element function at the quadrature points of the current cell, face, or subface (selected the last time the reinit() function was called). That is, if the first argument fe_function is a vector of nodal values of a finite element function $u_h(\mathbf x)$ defined on a DoFHandler object, then the output vector (the second argument, values) is the vector of values $u_h(\mathbf x_q^K)$ where $x_q^K$ are the quadrature points on the current cell $K$. This function is first discussed in the Results section of step-4, and the related get_function_gradients() function is also used in step-15 along with numerous other tutorial programs.

    If the current cell is not active (i.e., it has children), then the finite element function is, strictly speaking, defined by shape functions that live on these child cells. Rather than evaluating the shape functions on the child cells, with the quadrature points defined on the current cell, this function first interpolates the finite element function to shape functions defined on the current cell, and then evaluates this interpolated function.

    This function may only be used if the finite element in use is a scalar one, i.e. has only one vector component. To get values of multi-component elements, there is another get_function_values() below, returning a vector of vectors of results.

    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]valuesThe values of the function specified by fe_function at the quadrature points of the current cell. The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the values of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the solution vector.
    [out]valuesThe values of the function specified by fe_function at the quadrature points of the current cell. The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the values of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the solution vector.
    -
    Postcondition
    values[q] will contain the value of the field described by fe_function at the $q$th quadrature point.
    +
    Postcondition
    values[q] will contain the value of the field described by fe_function at the $q$th quadrature point.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -977,7 +977,7 @@

    This function does the same as the other get_function_values(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    values[q] is a vector of values of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by values[q] equals the number of components of the finite element, i.e. values[q](c) returns the value of the $c$th vector component at the $q$th quadrature point.
    +
    Postcondition
    values[q] is a vector of values of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by values[q] equals the number of components of the finite element, i.e. values[q](c) returns the value of the $c$th vector component at the $q$th quadrature point.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 3320 of file fe_values.cc.

    @@ -1149,16 +1149,16 @@
    -

    Return the gradients of a finite element function at the quadrature points of the current cell, face, or subface (selected the last time the reinit() function was called). That is, if the first argument fe_function is a vector of nodal values of a finite element function $u_h(\mathbf x)$ defined on a DoFHandler object, then the output vector (the second argument, values) is the vector of values $\nabla u_h(\mathbf x_q^K)$ where $x_q^K$ are the quadrature points on the current cell $K$. This function is first discussed in the Results section of step-4, and it is also used in step-15 along with numerous other tutorial programs.

    +

    Return the gradients of a finite element function at the quadrature points of the current cell, face, or subface (selected the last time the reinit() function was called). That is, if the first argument fe_function is a vector of nodal values of a finite element function $u_h(\mathbf x)$ defined on a DoFHandler object, then the output vector (the second argument, values) is the vector of values $\nabla u_h(\mathbf x_q^K)$ where $x_q^K$ are the quadrature points on the current cell $K$. This function is first discussed in the Results section of step-4, and it is also used in step-15 along with numerous other tutorial programs.

    This function may only be used if the finite element in use is a scalar one, i.e. has only one vector component. There is a corresponding function of the same name for vector-valued finite elements.

    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]gradientsThe gradients of the function specified by fe_function at the quadrature points of the current cell. The gradients are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the gradients of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    [out]gradientsThe gradients of the function specified by fe_function at the quadrature points of the current cell. The gradients are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the gradients of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    -
    Postcondition
    gradients[q] will contain the gradient of the field described by fe_function at the $q$th quadrature point. gradients[q][d] represents the derivative in coordinate direction $d$ at quadrature point $q$.
    +
    Postcondition
    gradients[q] will contain the gradient of the field described by fe_function at the $q$th quadrature point. gradients[q][d] represents the derivative in coordinate direction $d$ at quadrature point $q$.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1194,7 +1194,7 @@

    This function does the same as the other get_function_gradients(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    gradients[q] is a vector of gradients of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by gradients[q] equals the number of components of the finite element, i.e. gradients[q][c] returns the gradient of the $c$th vector component at the $q$th quadrature point. Consequently, gradients[q][c][d] is the derivative in coordinate direction $d$ of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    +
    Postcondition
    gradients[q] is a vector of gradients of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by gradients[q] equals the number of components of the finite element, i.e. gradients[q][c] returns the gradient of the $c$th vector component at the $q$th quadrature point. Consequently, gradients[q][c][d] is the derivative in coordinate direction $d$ of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 3463 of file fe_values.cc.

    @@ -1313,11 +1313,11 @@
    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]hessiansThe Hessians of the function specified by fe_function at the quadrature points of the current cell. The Hessians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Hessians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    [out]hessiansThe Hessians of the function specified by fe_function at the quadrature points of the current cell. The Hessians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Hessians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    -
    Postcondition
    hessians[q] will contain the Hessian of the field described by fe_function at the $q$th quadrature point. hessians[q][i][j] represents the $(i,j)$th component of the matrix of second derivatives at quadrature point $q$.
    +
    Postcondition
    hessians[q] will contain the Hessian of the field described by fe_function at the $q$th quadrature point. hessians[q][i][j] represents the $(i,j)$th component of the matrix of second derivatives at quadrature point $q$.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1358,7 +1358,7 @@

    This function does the same as the other get_function_hessians(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    hessians[q] is a vector of Hessians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by hessians[q] equals the number of components of the finite element, i.e. hessians[q][c] returns the Hessian of the $c$th vector component at the $q$th quadrature point. Consequently, hessians[q][c][i][j] is the $(i,j)$th component of the matrix of second derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    +
    Postcondition
    hessians[q] is a vector of Hessians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by hessians[q] equals the number of components of the finite element, i.e. hessians[q][c] returns the Hessian of the $c$th vector component at the $q$th quadrature point. Consequently, hessians[q][c][i][j] is the $(i,j)$th component of the matrix of second derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 3576 of file fe_values.cc.

    @@ -1477,11 +1477,11 @@
    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]laplaciansThe Laplacians of the function specified by fe_function at the quadrature points of the current cell. The Laplacians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Laplacians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the input vector.
    [out]laplaciansThe Laplacians of the function specified by fe_function at the quadrature points of the current cell. The Laplacians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Laplacians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the input vector.
    -
    Postcondition
    laplacians[q] will contain the Laplacian of the field described by fe_function at the $q$th quadrature point.
    +
    Postcondition
    laplacians[q] will contain the Laplacian of the field described by fe_function at the $q$th quadrature point.
    For each component of the output vector, there holds laplacians[q]=trace(hessians[q]), where hessians would be the output of the get_function_hessians() function.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    @@ -1519,7 +1519,7 @@

    This function does the same as the other get_function_laplacians(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    laplacians[q] is a vector of Laplacians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by laplacians[q] equals the number of components of the finite element, i.e. laplacians[q][c] returns the Laplacian of the $c$th vector component at the $q$th quadrature point.
    +
    Postcondition
    laplacians[q] is a vector of Laplacians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by laplacians[q] equals the number of components of the finite element, i.e. laplacians[q][c] returns the Laplacian of the $c$th vector component at the $q$th quadrature point.
    For each component of the output vector, there holds laplacians[q][c]=trace(hessians[q][c]), where hessians would be the output of the get_function_hessians() function.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1682,11 +1682,11 @@
    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]third_derivativesThe third derivatives of the function specified by fe_function at the quadrature points of the current cell. The third derivatives are computed in real space (as opposed to on the unit cell). The object is assumed to already have the correct size. The data type stored by this output vector must be what you get when you multiply the third derivatives of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    [out]third_derivativesThe third derivatives of the function specified by fe_function at the quadrature points of the current cell. The third derivatives are computed in real space (as opposed to on the unit cell). The object is assumed to already have the correct size. The data type stored by this output vector must be what you get when you multiply the third derivatives of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    -
    Postcondition
    third_derivatives[q] will contain the third derivatives of the field described by fe_function at the $q$th quadrature point. third_derivatives[q][i][j][k] represents the $(i,j,k)$th component of the 3rd order tensor of third derivatives at quadrature point $q$.
    +
    Postcondition
    third_derivatives[q] will contain the third derivatives of the field described by fe_function at the $q$th quadrature point. third_derivatives[q][i][j][k] represents the $(i,j,k)$th component of the 3rd order tensor of third derivatives at quadrature point $q$.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_3rd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1727,7 +1727,7 @@

    This function does the same as the other get_function_third_derivatives(), but applied to multi-component (vector- valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    third_derivatives[q] is a vector of third derivatives of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by third_derivatives[q] equals the number of components of the finite element, i.e. third_derivatives[q][c] returns the third derivative of the $c$th vector component at the $q$th quadrature point. Consequently, third_derivatives[q][c][i][j][k] is the $(i,j,k)$th component of the tensor of third derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    +
    Postcondition
    third_derivatives[q] is a vector of third derivatives of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by third_derivatives[q] equals the number of components of the finite element, i.e. third_derivatives[q][c] returns the third derivative of the $c$th vector component at the $q$th quadrature point. Consequently, third_derivatives[q][c][i][j][k] is the $(i,j,k)$th component of the tensor of third derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_3rd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 3826 of file fe_values.cc.

    @@ -2068,7 +2068,7 @@

    Mapped quadrature weight. If this object refers to a volume evaluation (i.e. the derived class is of type FEValues), then this is the Jacobi determinant times the weight of the q_pointth unit quadrature point.

    For surface evaluations (i.e. classes FEFaceValues or FESubfaceValues), it is the mapped surface element times the weight of the quadrature point.

    -

    You can think of the quantity returned by this function as the volume or surface element $dx, ds$ in the integral that we implement here by quadrature.

    +

    You can think of the quantity returned by this function as the volume or surface element $dx, ds$ in the integral that we implement here by quadrature.

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_JxW_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2125,7 +2125,7 @@
    -

    Return the Jacobian of the transformation at the specified quadrature point, i.e. $J_{ij}=dx_i/d\hat x_j$

    +

    Return the Jacobian of the transformation at the specified quadrature point, i.e. $J_{ij}=dx_i/d\hat x_j$

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2183,7 +2183,7 @@
    -

    Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijk}=dJ_{jk}/d\hat x_i$.

    +

    Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijk}=dJ_{jk}/d\hat x_i$.

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_grads flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2241,7 +2241,7 @@
    -

    Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, pushed forward to the real cell coordinates, i.e. $G_{ijk}=dJ_{iJ}/d\hat x_K (J_{jJ})^{-1} (J_{kK})^{-1}$.

    +

    Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, pushed forward to the real cell coordinates, i.e. $G_{ijk}=dJ_{iJ}/d\hat x_K (J_{jJ})^{-1} (J_{kK})^{-1}$.

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_pushed_forward_grads flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2299,7 +2299,7 @@
    -

    Return the third derivative of the transformation from unit to real cell, i.e. the second derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijkl}=\frac{d^2J_{ij}}{d\hat x_k d\hat x_l}$.

    +

    Return the third derivative of the transformation from unit to real cell, i.e. the second derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijkl}=\frac{d^2J_{ij}}{d\hat x_k d\hat x_l}$.

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_2nd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFEInterfaceValues.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFEInterfaceValues.html 2024-01-30 03:04:30.856701491 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFEInterfaceValues.html 2024-01-30 03:04:30.856701491 +0000 @@ -488,8 +488,8 @@
  • If the q_index and mapping_index arguments to this function are explicitly specified (rather than leaving them at their default values), then these indices will be used to select which element of the hp::QCollection and hp::MappingCollection passed to the constructor should serve as the quadrature and mapping to be used.
  • If one of these arguments is left at its default value, then the function will need to choose a quadrature and/or mapping that is appropriate for the two finite element spaces used on the two cells adjacent to the current interface. As the first choice, if the quadrature or mapping collection we are considering has only one element, then that is clearly the one that should be used.
  • If the quadrature or mapping collection have multiple elements, then we need to dig further. For quadrature objects, we can compare whether the two quadrature objects that correspond to the active_fe_index values of the two adjacent cells are identical (i.e., have quadrature points at the same locations, and have the same weights). If this is so, then it does not matter which one of the two we take, and we choose one or the other.
  • -
  • If this has still not helped, we try to find out which of the two finite element spaces on the two adjacent cells is "larger" (say, if you had used $Q_2$ and $Q_4$ elements on the two adjacent cells, then the $Q_4$ element is the larger one); the determination of which space is "larger" is made using the hp::FECollection::find_dominated_fe() function, which is not necessarily intended for this kind of query, but yields a result that serves just fine for our purposes here. We then operate on the assumption that the quadrature object associated with the "larger" of the two spaces is the appropriate one to use for the face that separates these two spaces.
      -
    • If this function returns that one of the two elements in question is dominated by the other, then presumably it is "larger" one and we take the quadrature formula and mapping that corresponds to this "larger" element is. For example, for the $Q_2$ element mentioned above, one would generally use a QGauss(3) quadrature formula, whereas for the $Q_4$ element, one would use QGauss(5). To integrate jump and average terms on the interface between cells using these two elements, QGauss(5) is appropriate. Because, typically, people will order elements in the hp::FECollection in the same order as the quadrature and mapping objects in hp::QCollection and hp::MappingCollection, this function will use the index of the "larger" element in the hp::FECollection to also index into the hp::QCollection and hp::MappingCollection to retrieve quadrature and mapping objects appropriate for the current face.
    • +
    • If this has still not helped, we try to find out which of the two finite element spaces on the two adjacent cells is "larger" (say, if you had used $Q_2$ and $Q_4$ elements on the two adjacent cells, then the $Q_4$ element is the larger one); the determination of which space is "larger" is made using the hp::FECollection::find_dominated_fe() function, which is not necessarily intended for this kind of query, but yields a result that serves just fine for our purposes here. We then operate on the assumption that the quadrature object associated with the "larger" of the two spaces is the appropriate one to use for the face that separates these two spaces.
        +
      • If this function returns that one of the two elements in question is dominated by the other, then presumably it is "larger" one and we take the quadrature formula and mapping that corresponds to this "larger" element is. For example, for the $Q_2$ element mentioned above, one would generally use a QGauss(3) quadrature formula, whereas for the $Q_4$ element, one would use QGauss(5). To integrate jump and average terms on the interface between cells using these two elements, QGauss(5) is appropriate. Because, typically, people will order elements in the hp::FECollection in the same order as the quadrature and mapping objects in hp::QCollection and hp::MappingCollection, this function will use the index of the "larger" element in the hp::FECollection to also index into the hp::QCollection and hp::MappingCollection to retrieve quadrature and mapping objects appropriate for the current face.
      • There are cases where neither element dominates the other. For example, if one uses $Q_2\times Q_1$ and $Q_1\times Q_2$ elements on neighboring cells, neither of the two spaces dominates the other – or, in the context of the current function, neither space is "larger" than the other. In that case, there is no way for the current function to determine quadrature and mapping objects associated with the two elements are the appropriate ones. If that happens, you will get an error – and the only way to avoid the error is to explicitly specify for these interfaces which quadrature and mapping objects you want to use, by providing non-default values for the q_index and mapping_index arguments to this function.
    • @@ -825,7 +825,7 @@
  • Mapped quadrature weight. This value equals the mapped surface element times the weight of the quadrature point.

    -

    You can think of the quantity returned by this function as the surface element $ds$ in the integral that we implement here by quadrature.

    +

    You can think of the quantity returned by this function as the surface element $ds$ in the integral that we implement here by quadrature.

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_JxW_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1097,9 +1097,9 @@ const unsigned int component = 0&#href_anchor"memdoc"> -

    Return the jump $\jump{u}=u_{\text{cell0}} - u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of component component.

    +

    Return the jump $\jump{u}=u_{\text{cell0}} - u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of component component.

    Note that one can define the jump in different ways (the value "there" minus the value "here", or the other way around; both are used in the finite element literature). The definition here uses "value here minus value there", as seen from the first cell.

    -

    If this is a boundary face (at_boundary() returns true), then $\jump{u}=u_{\text{cell0}}$, that is "the value here (minus zero)".

    +

    If this is a boundary face (at_boundary() returns true), then $\jump{u}=u_{\text{cell0}}$, that is "the value here (minus zero)".

    Note
    The name of the function is supposed to be read as "the jump (singular) in the values (plural: one or two possible values) of the shape function (singular)".
    @@ -1155,9 +1155,9 @@ const unsigned int component = 0&#href_anchor"memdoc"> -

    Return the jump in the gradient $\jump{\nabla u}=\nabla u_{\text{cell0}} -
-\nabla u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of component component.

    -

    If this is a boundary face (at_boundary() returns true), then $\jump{\nabla u}=\nabla u_{\text{cell0}}$.

    +

    Return the jump in the gradient $\jump{\nabla u}=\nabla u_{\text{cell0}} -
+\nabla u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of component component.

    +

    If this is a boundary face (at_boundary() returns true), then $\jump{\nabla u}=\nabla u_{\text{cell0}}$.

    Note
    The name of the function is supposed to be read as "the jump (singular) in the gradients (plural: one or two possible gradients) of the shape function (singular)".
    @@ -1213,9 +1213,9 @@ const unsigned int component = 0&#href_anchor"memdoc"> -

    Return the jump in the Hessian $\jump{\nabla^2 u} = \nabla^2
-u_{\text{cell0}} - \nabla^2 u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of component component.

    -

    If this is a boundary face (at_boundary() returns true), then $\jump{\nabla^2 u} = \nabla^2 u_{\text{cell0}}$.

    +

    Return the jump in the Hessian $\jump{\nabla^2 u} = \nabla^2
+u_{\text{cell0}} - \nabla^2 u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of component component.

    +

    If this is a boundary face (at_boundary() returns true), then $\jump{\nabla^2 u} = \nabla^2 u_{\text{cell0}}$.

    Note
    The name of the function is supposed to be read as "the jump (singular) in the Hessians (plural: one or two possible values for the derivative) of the shape function (singular)".
    @@ -1271,9 +1271,9 @@ const unsigned int component = 0&#href_anchor"memdoc"> -

    Return the jump in the third derivative $\jump{\nabla^3 u} = \nabla^3
-u_{\text{cell0}} - \nabla^3 u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of component component.

    -

    If this is a boundary face (at_boundary() returns true), then $\jump{\nabla^3 u} = \nabla^3 u_{\text{cell0}}$.

    +

    Return the jump in the third derivative $\jump{\nabla^3 u} = \nabla^3
+u_{\text{cell0}} - \nabla^3 u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of component component.

    +

    If this is a boundary face (at_boundary() returns true), then $\jump{\nabla^3 u} = \nabla^3 u_{\text{cell0}}$.

    Note
    The name of the function is supposed to be read as "the jump (singular) in the third derivatives (plural: one or two possible values for the derivative) of the shape function (singular)".
    @@ -1329,9 +1329,9 @@ const unsigned int component = 0&#href_anchor"memdoc"> -

    Return the average $\average{u}=\frac{1}{2}u_{\text{cell0}} +
-\frac{1}{2}u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of component component.

    -

    If this is a boundary face (at_boundary() returns true), then $\average{u}=u_{\text{cell0}}$.

    +

    Return the average $\average{u}=\frac{1}{2}u_{\text{cell0}} +
+\frac{1}{2}u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of component component.

    +

    If this is a boundary face (at_boundary() returns true), then $\average{u}=u_{\text{cell0}}$.

    Note
    The name of the function is supposed to be read as "the average (singular) of the values (plural: one or two possible values) of the shape function (singular)".
    @@ -1387,9 +1387,9 @@ const unsigned int component = 0&#href_anchor"memdoc"> -

    Return the average of the gradient $\average{\nabla u} = \frac{1}{2}\nabla
-u_{\text{cell0}} + \frac{1}{2} \nabla u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of component component.

    -

    If this is a boundary face (at_boundary() returns true), then $\average{\nabla u}=\nabla u_{\text{cell0}}$.

    +

    Return the average of the gradient $\average{\nabla u} = \frac{1}{2}\nabla
+u_{\text{cell0}} + \frac{1}{2} \nabla u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of component component.

    +

    If this is a boundary face (at_boundary() returns true), then $\average{\nabla u}=\nabla u_{\text{cell0}}$.

    Note
    The name of the function is supposed to be read as "the average (singular) of the gradients (plural: one or two possible values for the gradient) of the shape function (singular)".
    @@ -1445,10 +1445,10 @@ const unsigned int component = 0&#href_anchor"memdoc"> -

    Return the average of the Hessian $\average{\nabla^2 u} =
+<p>Return the average of the Hessian <picture><source srcset=$\average{\nabla^2 u} =
 \frac{1}{2}\nabla^2 u_{\text{cell0}} + \frac{1}{2} \nabla^2
-u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of component component.

    -

    If this is a boundary face (at_boundary() returns true), then $\average{\nabla^2 u}=\nabla^2 u_{\text{cell0}}$.

    +u_{\text{cell1}}$" src="form_1094.png"/> on the interface for the shape function interface_dof_index at the quadrature point q_point of component component.

    +

    If this is a boundary face (at_boundary() returns true), then $\average{\nabla^2 u}=\nabla^2 u_{\text{cell0}}$.

    Note
    The name of the function is supposed to be read as "the average (singular) of the Hessians (plural: one or two possible values for the second derivatives) of the shape function (singular)".
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFEInterfaceViews_1_1Scalar.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFEInterfaceViews_1_1Scalar.html 2024-01-30 03:04:30.904701891 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFEInterfaceViews_1_1Scalar.html 2024-01-30 03:04:30.904701891 +0000 @@ -454,7 +454,7 @@ const unsigned int q_point&#href_anchor"memdoc"> -

    Return the jump $\jump{u}=u_1 - u_2$ on the interface for the shape function interface_dof_index in the quadrature point q_point of the component selected by this view.

    +

    Return the jump $\jump{u}=u_1 - u_2$ on the interface for the shape function interface_dof_index in the quadrature point q_point of the component selected by this view.

    Note
    The name of the function is supposed to be read as "the jump (singular) in the values (plural: one or two possible values) of the shape function (singular)".
    @@ -500,7 +500,7 @@ const unsigned int q_point&#href_anchor"memdoc"> -

    Return the jump of the gradient $\jump{nabla u}$ on the interface for the shape function interface_dof_index in the quadrature point q_point of the component selected by this view.

    +

    Return the jump of the gradient $\jump{nabla u}$ on the interface for the shape function interface_dof_index in the quadrature point q_point of the component selected by this view.

    Note
    The name of the function is supposed to be read as "the jump (singular) in the gradients (plural: one or two possible gradients) of the shape function (singular)".
    @@ -546,8 +546,8 @@ const unsigned int q_point&#href_anchor"memdoc"> -

    Return the jump in the gradient $\jump{\nabla u}=\nabla u_{\text{cell0}}
-- \nabla u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of the component selected by this view.

    +

    Return the jump in the gradient $\jump{\nabla u}=\nabla u_{\text{cell0}}
+- \nabla u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of the component selected by this view.

    Note
    The name of the function is supposed to be read as "the jump (singular) in the Hessians (plural: one or two possible values for the second derivative) of the shape function (singular)".
    @@ -593,8 +593,8 @@ const unsigned int q_point&#href_anchor"memdoc"> -

    Return the jump in the third derivative $\jump{\nabla^3 u} = \nabla^3
-u_{\text{cell0}} - \nabla^3 u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of the component selected by this view.

    +

    Return the jump in the third derivative $\jump{\nabla^3 u} = \nabla^3
+u_{\text{cell0}} - \nabla^3 u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of the component selected by this view.

    Note
    The name of the function is supposed to be read as "the jump (singular) in the third derivatives (plural: one or two possible values for the third derivative) of the shape function (singular)".
    @@ -640,7 +640,7 @@ const unsigned int q_point&#href_anchor"memdoc"> -

    Return the average value $\average{u}=\frac{1}{2}(u_1 + u_2)$ on the interface for the shape function interface_dof_index in the quadrature point q_point of the component selected by this view.

    +

    Return the average value $\average{u}=\frac{1}{2}(u_1 + u_2)$ on the interface for the shape function interface_dof_index in the quadrature point q_point of the component selected by this view.

    Note
    The name of the function is supposed to be read as "the average (singular) of the values (plural: one or two possible values) of the shape function (singular)".
    @@ -708,7 +708,7 @@ const unsigned int q_point&#href_anchor"memdoc"> -

    Return the average of the gradient $\average{\nabla u}$ on the interface for the shape function interface_dof_index in the quadrature point q_point of the component selected by this view.

    +

    Return the average of the gradient $\average{\nabla u}$ on the interface for the shape function interface_dof_index in the quadrature point q_point of the component selected by this view.

    Note
    The name of the function is supposed to be read as "the average (singular) of the gradients (plural: one or two possible values of the derivative) of the shape function (singular)".
    @@ -754,9 +754,9 @@ const unsigned int q_point&#href_anchor"memdoc"> -

    Return the average of the Hessian $\average{\nabla^2 u} =
+<p>Return the average of the Hessian <picture><source srcset=$\average{\nabla^2 u} =
 \frac{1}{2}\nabla^2 u_{\text{cell0}} + \frac{1}{2} \nabla^2
-u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of the component selected by this view.

    +u_{\text{cell1}}$" src="form_1094.png"/> on the interface for the shape function interface_dof_index at the quadrature point q_point of the component selected by this view.

    Note
    The name of the function is supposed to be read as "the average (singular) in the Hessians (plural: one or two possible values of the second derivative) of the shape function (singular)".
    @@ -811,7 +811,7 @@ std::vector< solution_value_type< typename InputVector::value_type > > & values&#href_anchor"memdoc">

    Return the values of the selected scalar component of the finite element function characterized by fe_function at the quadrature points of the cell interface selected the last time the reinit function of the FEInterfaceValues object was called.

    The argument here_or_there selects between the value on cell 0 (here, true) and cell 1 (there, false). You can also interpret it as "upstream" (true) and "downstream" (false) as defined by the direction of the normal vector in this quadrature point. If here_or_there is true, the values from the first cell of the interface is used.

    -

    The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -878,7 +878,7 @@ std::vector< solution_value_type< typename InputVector::value_type > > & values&#href_anchor"memdoc">

    Return the jump in the values of the selected scalar component of the finite element function characterized by fe_function at the quadrature points of the cell interface selected the last time the reinit function of the FEInterfaceValues object was called.

    -

    The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -926,7 +926,7 @@ std::vector< solution_gradient_type< typename InputVector::value_type > > & gradients&#href_anchor"memdoc">

    Return the jump in the gradients of the selected scalar components of the finite element function characterized by fe_function at the quadrature points of the cell interface selected the last time the reinit function of the FEInterfaceValues object was called.

    -

    The data type stored by the output vector must be what you get when you multiply the gradients of shape functions (i.e., gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the gradients of shape functions (i.e., gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -974,7 +974,7 @@ std::vector< solution_hessian_type< typename InputVector::value_type > > & hessians&#href_anchor"memdoc">

    Return the jump in the Hessians of the selected scalar component of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEInterfaceValues object was called.

    -

    The data type stored by the output vector must be what you get when you multiply the Hessians of shape functions (i.e., hessian_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the Hessians of shape functions (i.e., hessian_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1022,7 +1022,7 @@ std::vector< solution_third_derivative_type< typename InputVector::value_type > > & third_derivatives&#href_anchor"memdoc">

    Return the jump in the third derivatives of the selected scalar component of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEInterfaceValues object was called.

    -

    The data type stored by the output vector must be what you get when you multiply the third derivatives of shape functions (i.e., third_derivative_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the third derivatives of shape functions (i.e., third_derivative_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_third_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1070,7 +1070,7 @@ std::vector< solution_value_type< typename InputVector::value_type > > & values&#href_anchor"memdoc">

    Return the average of the values of the selected scalar component of the finite element function characterized by fe_function at the quadrature points of the cell interface selected the last time the reinit function of the FEInterfaceValues object was called.

    -

    The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1118,7 +1118,7 @@ std::vector< solution_gradient_type< typename InputVector::value_type > > & gradients&#href_anchor"memdoc">

    Return the average of the gradients of the selected scalar components of the finite element function characterized by fe_function at the quadrature points of the cell interface selected the last time the reinit function of the FEInterfaceValues object was called.

    -

    The data type stored by the output vector must be what you get when you multiply the gradients of shape functions (i.e., gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the gradients of shape functions (i.e., gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1166,7 +1166,7 @@ std::vector< solution_hessian_type< typename InputVector::value_type > > & hessians&#href_anchor"memdoc">

    Return the average of the Hessians of the selected scalar component of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEInterfaceValues object was called.

    -

    The data type stored by the output vector must be what you get when you multiply the Hessians of shape functions (i.e., hessian_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the Hessians of shape functions (i.e., hessian_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFEInterfaceViews_1_1Vector.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFEInterfaceViews_1_1Vector.html 2024-01-30 03:04:30.952702290 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFEInterfaceViews_1_1Vector.html 2024-01-30 03:04:30.952702290 +0000 @@ -455,7 +455,7 @@ const unsigned int q_point&#href_anchor"memdoc"> -

    Return the jump vector $[\mathbf{u}]=\mathbf{u_1} - \mathbf{u_2}$ on the interface for the shape function interface_dof_index in the quadrature point q_point.

    +

    Return the jump vector $[\mathbf{u}]=\mathbf{u_1} - \mathbf{u_2}$ on the interface for the shape function interface_dof_index in the quadrature point q_point.

    Note
    The name of the function is supposed to be read as "the jump (singular) in the values (plural: one or two possible values) of the shape function (singular)".
    @@ -501,8 +501,8 @@ const unsigned int q_point&#href_anchor"memdoc"> -

    Return the jump of the gradient (a tensor of rank 2) $\jump{\nabla
-\mathbf{u}}$ on the interface for the shape function interface_dof_index in the quadrature point q_point.

    +

    Return the jump of the gradient (a tensor of rank 2) $\jump{\nabla
+\mathbf{u}}$ on the interface for the shape function interface_dof_index in the quadrature point q_point.

    Note
    The name of the function is supposed to be read as "the jump (singular) in the gradients (plural: one or two possible gradients) of the shape function (singular)".
    @@ -548,8 +548,8 @@ const unsigned int q_point&#href_anchor"memdoc"> -

    Return the jump in the gradient $\jump{\nabla u}=\nabla u_{\text{cell0}}
-- \nabla u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of the component selected by this view.

    +

    Return the jump in the gradient $\jump{\nabla u}=\nabla u_{\text{cell0}}
+- \nabla u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of the component selected by this view.

    Note
    The name of the function is supposed to be read as "the jump (singular) in the Hessians (plural: one or two possible values for the second derivative) of the shape function (singular)".
    @@ -595,8 +595,8 @@ const unsigned int q_point&#href_anchor"memdoc"> -

    Return the jump in the third derivative $\jump{\nabla^3 u} = \nabla^3
-u_{\text{cell0}} - \nabla^3 u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of the component selected by this view.

    +

    Return the jump in the third derivative $\jump{\nabla^3 u} = \nabla^3
+u_{\text{cell0}} - \nabla^3 u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of the component selected by this view.

    Note
    The name of the function is supposed to be read as "the jump (singular) in the third derivatives (plural: one or two possible values for the third derivative) of the shape function (singular)".
    @@ -642,8 +642,8 @@ const unsigned int q_point&#href_anchor"memdoc"> -

    Return the average vector $\average{\mathbf{u}}=\frac{1}{2}(\mathbf{u_1}
-+ \mathbf{u_2})$ on the interface for the shape function interface_dof_index in the quadrature point q_point.

    +

    Return the average vector $\average{\mathbf{u}}=\frac{1}{2}(\mathbf{u_1}
++ \mathbf{u_2})$ on the interface for the shape function interface_dof_index in the quadrature point q_point.

    Note
    The name of the function is supposed to be read as "the average (singular) of the values (plural: one or two possible values) of the shape function (singular)".
    @@ -689,8 +689,8 @@ const unsigned int q_point&#href_anchor"memdoc"> -

    Return the average of the gradient (a tensor of rank 2) $\average{\nabla
-\mathbf{u}}$ on the interface for the shape function interface_dof_index in the quadrature point q_point.

    +

    Return the average of the gradient (a tensor of rank 2) $\average{\nabla
+\mathbf{u}}$ on the interface for the shape function interface_dof_index in the quadrature point q_point.

    Note
    The name of the function is supposed to be read as "the average (singular) of the gradients (plural: one or two possible values of the derivative) of the shape function (singular)".
    @@ -736,9 +736,9 @@ const unsigned int q_point&#href_anchor"memdoc"> -

    Return the average of the Hessian $\average{\nabla^2 u} =
+<p>Return the average of the Hessian <picture><source srcset=$\average{\nabla^2 u} =
 \frac{1}{2}\nabla^2 u_{\text{cell0}} + \frac{1}{2} \nabla^2
-u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of the component selected by this view.

    +u_{\text{cell1}}$" src="form_1094.png"/> on the interface for the shape function interface_dof_index at the quadrature point q_point of the component selected by this view.

    Note
    The name of the function is supposed to be read as "the average (singular) in the Hessians (plural: one or two possible values of the second derivative) of the shape function (singular)".
    @@ -796,7 +796,7 @@

    Return the values of the selected vector component of the finite element function characterized by fe_function at the quadrature points of the cell interface selected the last time the reinit function of the FEInterfaceValues object was called.

    The argument here_or_there selects between the value on cell 0 (here, true) and cell 1 (there, false). You can also interpret it as "upstream" (true) and "downstream" (false) as defined by the direction of the normal vector in this quadrature point. If here_or_there is true, the values from the first cell of the interface is used.

    -

    The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -869,7 +869,7 @@

    Return the jump in the values of the selected vector component of the finite element function characterized by fe_function at the quadrature points of the cell interface selected the last time the reinit function of the FEInterfaceValues object was called.

    -

    The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -923,7 +923,7 @@

    Return the jump in the gradients of the selected vector components of the finite element function characterized by fe_function at the quadrature points of the cell interface selected the last time the reinit function of the FEInterfaceValues object was called.

    -

    The data type stored by the output vector must be what you get when you multiply the gradients of shape functions (i.e., gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the gradients of shape functions (i.e., gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -977,7 +977,7 @@

    Return the jump in the Hessians of the selected vector component of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEInterfaceValues object was called.

    -

    The data type stored by the output vector must be what you get when you multiply the Hessians of shape functions (i.e., hessian_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the Hessians of shape functions (i.e., hessian_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1031,7 +1031,7 @@

    Return the jump in the third derivatives of the selected vector component of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEInterfaceValues object was called.

    -

    The data type stored by the output vector must be what you get when you multiply the third derivatives of shape functions (i.e., third_derivative_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the third derivatives of shape functions (i.e., third_derivative_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_third_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1085,7 +1085,7 @@

    Return the average of the values of the selected vector component of the finite element function characterized by fe_function at the quadrature points of the cell interface selected the last time the reinit function of the FEInterfaceValues object was called.

    -

    The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1139,7 +1139,7 @@

    Return the average of the gradients of the selected vector components of the finite element function characterized by fe_function at the quadrature points of the cell interface selected the last time the reinit function of the FEInterfaceValues object was called.

    -

    The data type stored by the output vector must be what you get when you multiply the gradients of shape functions (i.e., gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the gradients of shape functions (i.e., gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1193,7 +1193,7 @@

    Return the average of the Hessians of the selected vector component of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEInterfaceValues object was called.

    -

    The data type stored by the output vector must be what you get when you multiply the Hessians of shape functions (i.e., hessian_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the Hessians of shape functions (i.e., hessian_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFESeries_1_1Fourier.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFESeries_1_1Fourier.html 2024-01-30 03:04:30.984702557 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFESeries_1_1Fourier.html 2024-01-30 03:04:30.984702557 +0000 @@ -199,25 +199,25 @@

    Detailed Description

    template<int dim, int spacedim = dim>
    -class FESeries::Fourier< dim, spacedim >

    A class to calculate expansion of a scalar FE (or a single component of vector-valued FE) field into Fourier series on a reference element. The exponential form of the Fourier series is based on completeness and Hermitian orthogonality of the set of exponential functions $ \phi_{\bf k}({\bf x}) = \exp(2 \pi i\, {\bf k} \cdot {\bf x})$. For example in 1d the L2-orthogonality condition reads

    -\[
+class FESeries::Fourier< dim, spacedim ></div><p>A class to calculate expansion of a scalar FE (or a single component of vector-valued FE) field into <a class=Fourier series on a reference element. The exponential form of the Fourier series is based on completeness and Hermitian orthogonality of the set of exponential functions $ \phi_{\bf k}({\bf x}) = \exp(2 \pi i\, {\bf k} \cdot {\bf x})$. For example in 1d the L2-orthogonality condition reads

    +\[
   \int_0^1 \phi_k(x) \phi_l^\ast(x) dx=\delta_{kl}.
-\] +\]" src="form_1176.png"/>

    -

    Note that $ \phi_{\bf k} = \phi_{-\bf k}^\ast $.

    +

    Note that $ \phi_{\bf k} = \phi_{-\bf k}^\ast $.

    The arbitrary scalar FE field on the reference element can be expanded in the complete orthogonal exponential basis as

    -\[
+<picture><source srcset=\[
    u({\bf x})
    = \sum_{\bf k} c_{\bf k} \phi_{\bf k}({\bf x}).
-\] +\]" src="form_1178.png"/>

    From the orthogonality property of the basis, it follows that

    -\[
+<picture><source srcset=\[
    c_{\bf k} =
    \int_{[0,1]^d} u({\bf x}) \phi_{\bf k}^\ast ({\bf x}) d{\bf x}\,.
-\] +\]" src="form_1179.png"/>

    -

    It is this complex-valued expansion coefficients, that are calculated by this class. Note that $ u({\bf x}) = \sum_i u_i N_i({\bf x})$, where $ N_i({\bf x}) $ are real-valued FiniteElement shape functions. Consequently $ c_{\bf k} \equiv c_{-\bf k}^\ast $ and we only need to compute $ c_{\bf k} $ for positive indices $ \bf k $ .

    +

    It is this complex-valued expansion coefficients, that are calculated by this class. Note that $ u({\bf x}) = \sum_i u_i N_i({\bf x})$, where $ N_i({\bf x}) $ are real-valued FiniteElement shape functions. Consequently $ c_{\bf k} \equiv c_{-\bf k}^\ast $ and we only need to compute $ c_{\bf k} $ for positive indices $ \bf k $ .

    Definition at line 90 of file fe_series.h.

    Member Typedef Documentation

    @@ -822,7 +822,7 @@
    -

    Angular frequencies $ 2 \pi {\bf k} $ .

    +

    Angular frequencies $ 2 \pi {\bf k} $ .

    Definition at line 196 of file fe_series.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classFESeries_1_1Legendre.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFESeries_1_1Legendre.html 2024-01-30 03:04:31.016702823 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFESeries_1_1Legendre.html 2024-01-30 03:04:31.016702823 +0000 @@ -196,39 +196,39 @@
    template<int dim, int spacedim = dim>
    class FESeries::Legendre< dim, spacedim >

    A class to calculate expansion of a scalar FE (or a single component of vector-valued FE) field into series of Legendre functions on a reference element.

    Legendre functions are solutions to Legendre's differential equation

    -\[
+<picture><source srcset=\[
    \frac{d}{dx}\left([1-x^2] \frac{d}{dx} P_n(x)\right) +
    n[n+1] P_n(x) = 0
-\] +\]" src="form_1185.png"/>

    and can be expressed using Rodrigues' formula

    -\[
+<picture><source srcset=\[
    P_n(x) = \frac{1}{2^n n!} \frac{d^n}{dx^n}[x^2-1]^n.
-\] +\]" src="form_1186.png"/>

    -

    These polynomials are orthogonal with respect to the $ L^2 $ inner product on the interval $ [-1;1] $

    -\[
+<p> These polynomials are orthogonal with respect to the <picture><source srcset=$ L^2 $ inner product on the interval $ [-1;1] $

    +\[
    \int_{-1}^1 P_m(x) P_n(x) = \frac{2}{2n + 1} \delta_{mn}
-\] +\]" src="form_1189.png"/>

    -

    and are complete. A family of $ L^2 $-orthogonal polynomials on $ [0;1] $ can be constructed via

    -\[
+<p> and are complete. A family of <picture><source srcset=$ L^2 $-orthogonal polynomials on $ [0;1] $ can be constructed via

    +\[
    \widetilde P_m = \sqrt{2} P_m(2x-1).
-\] +\]" src="form_1191.png"/>

    -

    An arbitrary scalar FE field on the reference element $ [0;1] $ can be expanded in the complete orthogonal basis as

    -\[
+<p>An arbitrary scalar FE field on the reference element <picture><source srcset=$ [0;1] $ can be expanded in the complete orthogonal basis as

    +\[
    u(x)
    = \sum_{m} c_m \widetilde P_{m}(x).
-\] +\]" src="form_1192.png"/>

    From the orthogonality property of the basis, it follows that

    -\[
+<picture><source srcset=\[
    c_m = \frac{2m+1}{2}
    \int_0^1 u(x) \widetilde P_m(x) dx .
-\] +\]" src="form_1193.png"/>

    -

    This class calculates coefficients $ c_{\bf k} $ using $ dim $-dimensional Legendre polynomials constructed from $ \widetilde P_m(x) $ using tensor product rule.

    +

    This class calculates coefficients $ c_{\bf k} $ using $ dim $-dimensional Legendre polynomials constructed from $ \widetilde P_m(x) $ using tensor product rule.

    Definition at line 260 of file fe_series.h.

    Member Typedef Documentation

    /usr/share/doc/packages/dealii/doxygen/deal.II/classFESubfaceValues.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFESubfaceValues.html 2024-01-30 03:04:31.112703623 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFESubfaceValues.html 2024-01-30 03:04:31.112703623 +0000 @@ -959,7 +959,7 @@

    If the shape function is vector-valued, then this returns the only non- zero component. If the shape function has more than one non-zero component (i.e. it is not primitive), then throw an exception of type ExcShapeFunctionNotPrimitive. In that case, use the shape_value_component() function.

    Parameters
    - +
    iNumber of the shape function $\varphi_i$ to be evaluated. Note that this number runs from zero to dofs_per_cell, even in the case of an FEFaceValues or FESubfaceValues object.
    iNumber of the shape function $\varphi_i$ to be evaluated. Note that this number runs from zero to dofs_per_cell, even in the case of an FEFaceValues or FESubfaceValues object.
    q_pointNumber of the quadrature point at which function is to be evaluated
    @@ -1000,7 +1000,7 @@

    Compute one vector component of the value of a shape function at a quadrature point. If the finite element is scalar, then only component zero is allowed and the return value equals that of the shape_value() function. If the finite element is vector valued but all shape functions are primitive (i.e. they are non-zero in only one component), then the value returned by shape_value() equals that of this function for exactly one component. This function is therefore only of greater interest if the shape function is not primitive, but then it is necessary since the other function cannot be used.

    Parameters
    - +
    iNumber of the shape function $\varphi_i$ to be evaluated.
    iNumber of the shape function $\varphi_i$ to be evaluated.
    q_pointNumber of the quadrature point at which function is to be evaluated.
    componentvector component to be evaluated.
    @@ -1039,7 +1039,7 @@

    The same holds for the arguments of this function as for the shape_value() function.

    Parameters
    - +
    iNumber of the shape function $\varphi_i$ to be evaluated.
    iNumber of the shape function $\varphi_i$ to be evaluated.
    q_pointNumber of the quadrature point at which function is to be evaluated.
    @@ -1241,17 +1241,17 @@
    -

    Return the values of a finite element function at the quadrature points of the current cell, face, or subface (selected the last time the reinit() function was called). That is, if the first argument fe_function is a vector of nodal values of a finite element function $u_h(\mathbf x)$ defined on a DoFHandler object, then the output vector (the second argument, values) is the vector of values $u_h(\mathbf x_q^K)$ where $x_q^K$ are the quadrature points on the current cell $K$. This function is first discussed in the Results section of step-4, and the related get_function_gradients() function is also used in step-15 along with numerous other tutorial programs.

    +

    Return the values of a finite element function at the quadrature points of the current cell, face, or subface (selected the last time the reinit() function was called). That is, if the first argument fe_function is a vector of nodal values of a finite element function $u_h(\mathbf x)$ defined on a DoFHandler object, then the output vector (the second argument, values) is the vector of values $u_h(\mathbf x_q^K)$ where $x_q^K$ are the quadrature points on the current cell $K$. This function is first discussed in the Results section of step-4, and the related get_function_gradients() function is also used in step-15 along with numerous other tutorial programs.

    If the current cell is not active (i.e., it has children), then the finite element function is, strictly speaking, defined by shape functions that live on these child cells. Rather than evaluating the shape functions on the child cells, with the quadrature points defined on the current cell, this function first interpolates the finite element function to shape functions defined on the current cell, and then evaluates this interpolated function.

    This function may only be used if the finite element in use is a scalar one, i.e. has only one vector component. To get values of multi-component elements, there is another get_function_values() below, returning a vector of vectors of results.

    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]valuesThe values of the function specified by fe_function at the quadrature points of the current cell. The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the values of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the solution vector.
    [out]valuesThe values of the function specified by fe_function at the quadrature points of the current cell. The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the values of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the solution vector.
    -
    Postcondition
    values[q] will contain the value of the field described by fe_function at the $q$th quadrature point.
    +
    Postcondition
    values[q] will contain the value of the field described by fe_function at the $q$th quadrature point.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1287,7 +1287,7 @@

    This function does the same as the other get_function_values(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    values[q] is a vector of values of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by values[q] equals the number of components of the finite element, i.e. values[q](c) returns the value of the $c$th vector component at the $q$th quadrature point.
    +
    Postcondition
    values[q] is a vector of values of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by values[q] equals the number of components of the finite element, i.e. values[q](c) returns the value of the $c$th vector component at the $q$th quadrature point.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 3320 of file fe_values.cc.

    @@ -1459,16 +1459,16 @@
    -

    Return the gradients of a finite element function at the quadrature points of the current cell, face, or subface (selected the last time the reinit() function was called). That is, if the first argument fe_function is a vector of nodal values of a finite element function $u_h(\mathbf x)$ defined on a DoFHandler object, then the output vector (the second argument, values) is the vector of values $\nabla u_h(\mathbf x_q^K)$ where $x_q^K$ are the quadrature points on the current cell $K$. This function is first discussed in the Results section of step-4, and it is also used in step-15 along with numerous other tutorial programs.

    +

    Return the gradients of a finite element function at the quadrature points of the current cell, face, or subface (selected the last time the reinit() function was called). That is, if the first argument fe_function is a vector of nodal values of a finite element function $u_h(\mathbf x)$ defined on a DoFHandler object, then the output vector (the second argument, values) is the vector of values $\nabla u_h(\mathbf x_q^K)$ where $x_q^K$ are the quadrature points on the current cell $K$. This function is first discussed in the Results section of step-4, and it is also used in step-15 along with numerous other tutorial programs.

    This function may only be used if the finite element in use is a scalar one, i.e. has only one vector component. There is a corresponding function of the same name for vector-valued finite elements.

    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]gradientsThe gradients of the function specified by fe_function at the quadrature points of the current cell. The gradients are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the gradients of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    [out]gradientsThe gradients of the function specified by fe_function at the quadrature points of the current cell. The gradients are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the gradients of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    -
    Postcondition
    gradients[q] will contain the gradient of the field described by fe_function at the $q$th quadrature point. gradients[q][d] represents the derivative in coordinate direction $d$ at quadrature point $q$.
    +
    Postcondition
    gradients[q] will contain the gradient of the field described by fe_function at the $q$th quadrature point. gradients[q][d] represents the derivative in coordinate direction $d$ at quadrature point $q$.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1504,7 +1504,7 @@

    This function does the same as the other get_function_gradients(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    gradients[q] is a vector of gradients of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by gradients[q] equals the number of components of the finite element, i.e. gradients[q][c] returns the gradient of the $c$th vector component at the $q$th quadrature point. Consequently, gradients[q][c][d] is the derivative in coordinate direction $d$ of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    +
    Postcondition
    gradients[q] is a vector of gradients of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by gradients[q] equals the number of components of the finite element, i.e. gradients[q][c] returns the gradient of the $c$th vector component at the $q$th quadrature point. Consequently, gradients[q][c][d] is the derivative in coordinate direction $d$ of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 3463 of file fe_values.cc.

    @@ -1623,11 +1623,11 @@
    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]hessiansThe Hessians of the function specified by fe_function at the quadrature points of the current cell. The Hessians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Hessians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    [out]hessiansThe Hessians of the function specified by fe_function at the quadrature points of the current cell. The Hessians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Hessians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    -
    Postcondition
    hessians[q] will contain the Hessian of the field described by fe_function at the $q$th quadrature point. hessians[q][i][j] represents the $(i,j)$th component of the matrix of second derivatives at quadrature point $q$.
    +
    Postcondition
    hessians[q] will contain the Hessian of the field described by fe_function at the $q$th quadrature point. hessians[q][i][j] represents the $(i,j)$th component of the matrix of second derivatives at quadrature point $q$.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1668,7 +1668,7 @@

    This function does the same as the other get_function_hessians(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    hessians[q] is a vector of Hessians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by hessians[q] equals the number of components of the finite element, i.e. hessians[q][c] returns the Hessian of the $c$th vector component at the $q$th quadrature point. Consequently, hessians[q][c][i][j] is the $(i,j)$th component of the matrix of second derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    +
    Postcondition
    hessians[q] is a vector of Hessians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by hessians[q] equals the number of components of the finite element, i.e. hessians[q][c] returns the Hessian of the $c$th vector component at the $q$th quadrature point. Consequently, hessians[q][c][i][j] is the $(i,j)$th component of the matrix of second derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 3576 of file fe_values.cc.

    @@ -1787,11 +1787,11 @@
    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]laplaciansThe Laplacians of the function specified by fe_function at the quadrature points of the current cell. The Laplacians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Laplacians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the input vector.
    [out]laplaciansThe Laplacians of the function specified by fe_function at the quadrature points of the current cell. The Laplacians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Laplacians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the input vector.
    -
    Postcondition
    laplacians[q] will contain the Laplacian of the field described by fe_function at the $q$th quadrature point.
    +
    Postcondition
    laplacians[q] will contain the Laplacian of the field described by fe_function at the $q$th quadrature point.
    For each component of the output vector, there holds laplacians[q]=trace(hessians[q]), where hessians would be the output of the get_function_hessians() function.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    @@ -1829,7 +1829,7 @@

    This function does the same as the other get_function_laplacians(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    laplacians[q] is a vector of Laplacians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by laplacians[q] equals the number of components of the finite element, i.e. laplacians[q][c] returns the Laplacian of the $c$th vector component at the $q$th quadrature point.
    +
    Postcondition
    laplacians[q] is a vector of Laplacians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by laplacians[q] equals the number of components of the finite element, i.e. laplacians[q][c] returns the Laplacian of the $c$th vector component at the $q$th quadrature point.
    For each component of the output vector, there holds laplacians[q][c]=trace(hessians[q][c]), where hessians would be the output of the get_function_hessians() function.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1992,11 +1992,11 @@
    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]third_derivativesThe third derivatives of the function specified by fe_function at the quadrature points of the current cell. The third derivatives are computed in real space (as opposed to on the unit cell). The object is assumed to already have the correct size. The data type stored by this output vector must be what you get when you multiply the third derivatives of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    [out]third_derivativesThe third derivatives of the function specified by fe_function at the quadrature points of the current cell. The third derivatives are computed in real space (as opposed to on the unit cell). The object is assumed to already have the correct size. The data type stored by this output vector must be what you get when you multiply the third derivatives of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    -
    Postcondition
    third_derivatives[q] will contain the third derivatives of the field described by fe_function at the $q$th quadrature point. third_derivatives[q][i][j][k] represents the $(i,j,k)$th component of the 3rd order tensor of third derivatives at quadrature point $q$.
    +
    Postcondition
    third_derivatives[q] will contain the third derivatives of the field described by fe_function at the $q$th quadrature point. third_derivatives[q][i][j][k] represents the $(i,j,k)$th component of the 3rd order tensor of third derivatives at quadrature point $q$.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_3rd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2037,7 +2037,7 @@

    This function does the same as the other get_function_third_derivatives(), but applied to multi-component (vector- valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    third_derivatives[q] is a vector of third derivatives of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by third_derivatives[q] equals the number of components of the finite element, i.e. third_derivatives[q][c] returns the third derivative of the $c$th vector component at the $q$th quadrature point. Consequently, third_derivatives[q][c][i][j][k] is the $(i,j,k)$th component of the tensor of third derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    +
    Postcondition
    third_derivatives[q] is a vector of third derivatives of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by third_derivatives[q] equals the number of components of the finite element, i.e. third_derivatives[q][c] returns the third derivative of the $c$th vector component at the $q$th quadrature point. Consequently, third_derivatives[q][c][i][j][k] is the $(i,j,k)$th component of the tensor of third derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_3rd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 3826 of file fe_values.cc.

    @@ -2378,7 +2378,7 @@

    Mapped quadrature weight. If this object refers to a volume evaluation (i.e. the derived class is of type FEValues), then this is the Jacobi determinant times the weight of the q_pointth unit quadrature point.

    For surface evaluations (i.e. classes FEFaceValues or FESubfaceValues), it is the mapped surface element times the weight of the quadrature point.

    -

    You can think of the quantity returned by this function as the volume or surface element $dx, ds$ in the integral that we implement here by quadrature.

    +

    You can think of the quantity returned by this function as the volume or surface element $dx, ds$ in the integral that we implement here by quadrature.

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_JxW_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2435,7 +2435,7 @@
    -

    Return the Jacobian of the transformation at the specified quadrature point, i.e. $J_{ij}=dx_i/d\hat x_j$

    +

    Return the Jacobian of the transformation at the specified quadrature point, i.e. $J_{ij}=dx_i/d\hat x_j$

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2493,7 +2493,7 @@
    -

    Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijk}=dJ_{jk}/d\hat x_i$.

    +

    Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijk}=dJ_{jk}/d\hat x_i$.

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_grads flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2551,7 +2551,7 @@
    -

    Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, pushed forward to the real cell coordinates, i.e. $G_{ijk}=dJ_{iJ}/d\hat x_K (J_{jJ})^{-1} (J_{kK})^{-1}$.

    +

    Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, pushed forward to the real cell coordinates, i.e. $G_{ijk}=dJ_{iJ}/d\hat x_K (J_{jJ})^{-1} (J_{kK})^{-1}$.

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_pushed_forward_grads flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2609,7 +2609,7 @@
    -

    Return the third derivative of the transformation from unit to real cell, i.e. the second derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijkl}=\frac{d^2J_{ij}}{d\hat x_k d\hat x_l}$.

    +

    Return the third derivative of the transformation from unit to real cell, i.e. the second derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijkl}=\frac{d^2J_{ij}}{d\hat x_k d\hat x_l}$.

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_2nd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFESystem.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFESystem.html 2024-01-30 03:04:31.264704889 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFESystem.html 2024-01-30 03:04:31.268704923 +0000 @@ -500,18 +500,18 @@

    Detailed Description

    template<int dim, int spacedim = dim>
    -class FESystem< dim, spacedim >

    This class provides an interface to group several elements together into one, vector-valued element. As example, consider the Taylor-Hood element that is used for the solution of the Stokes and Navier-Stokes equations: There, the velocity (of which there are as many components as the dimension $d$ of the domain) is discretized with $Q_2$ elements and the pressure with $Q_1$ elements. Mathematically, the finite element space for the coupled problem is then often written as $V_h = Q_2^d \times Q_1$ where the exponentiation is understood to be the tensor product of spaces – i.e., in 2d, we have $V_h=Q_2\times Q_2\times Q_1$ – and tensor products lead to vectors where each component of the vector-valued function space corresponds to a scalar function in one of the $Q_2$ or $Q_1$ spaces. Using the FESystem class, this space is created using

    FESystem<dim> taylor_hood_fe (FE_Q<dim>(2)^dim, // velocity components
    +class FESystem< dim, spacedim >

    This class provides an interface to group several elements together into one, vector-valued element. As example, consider the Taylor-Hood element that is used for the solution of the Stokes and Navier-Stokes equations: There, the velocity (of which there are as many components as the dimension $d$ of the domain) is discretized with $Q_2$ elements and the pressure with $Q_1$ elements. Mathematically, the finite element space for the coupled problem is then often written as $V_h = Q_2^d \times Q_1$ where the exponentiation is understood to be the tensor product of spaces – i.e., in 2d, we have $V_h=Q_2\times Q_2\times Q_1$ – and tensor products lead to vectors where each component of the vector-valued function space corresponds to a scalar function in one of the $Q_2$ or $Q_1$ spaces. Using the FESystem class, this space is created using

    FESystem<dim> taylor_hood_fe (FE_Q<dim>(2)^dim, // velocity components
    FE_Q<dim>(1)); // pressure component
    Definition fe_q.h:551
    -

    The creation of this element here corresponds to taking tensor-product powers of the $Q_2$ element in the first line of the list of arguments to the FESystem constructor, and then concatenation via another tensor product with the element in the second line. This kind of construction is used, for example, in the step-22 tutorial program.

    -

    Similarly, step-8 solves an elasticity equation where we need to solve for the displacement of a solid object. The displacement again has $d$ components if the domain is $d$-dimensional, and so the combined finite element is created using

    FESystem<dim> displacement_fe (FE_Q<dim>(1)^dim);
    +

    The creation of this element here corresponds to taking tensor-product powers of the $Q_2$ element in the first line of the list of arguments to the FESystem constructor, and then concatenation via another tensor product with the element in the second line. This kind of construction is used, for example, in the step-22 tutorial program.

    +

    Similarly, step-8 solves an elasticity equation where we need to solve for the displacement of a solid object. The displacement again has $d$ components if the domain is $d$-dimensional, and so the combined finite element is created using

    FESystem<dim> displacement_fe (FE_Q<dim>(1)^dim);

    where now each (vector) component of the combined element corresponds to a $Q_1$ space.

    To the outside world, FESystem objects look just like a usual finite element object, they just happen to be composed of several other finite elements that are possibly of different type. These "base elements" can themselves have multiple components and, in particular, could also be vector-valued – for example, if one of the base elements is an FESystem itself (see also below). An example is given in the documentation of namespace FETools::Compositing, when using the "tensor product" strategy.

    Vector valued elements are discussed in a number of tutorial programs, for example step-8, step-20, step-21, step-22, and in particular in the Handling vector valued problems module.

    Note
    The material presented here is also discussed in video lecture 19, video lecture 20. (All video lectures are also available here.)

    FESystem, components and blocks

    -

    An FESystem, except in the most trivial case, produces a vector-valued finite element with several components. The number of components n_components() corresponds to the dimension of the solution function in the PDE system, and correspondingly also to the number of equations your PDE system has. For example, the mixed Laplace system covered in step-20 has $d+1$ components in $d$ space dimensions: the scalar pressure and the $d$ components of the velocity vector. Similarly, the elasticity equation covered in step-8 has $d$ components in $d$ space dimensions. In general, the number of components of a FESystem element is the accumulated number of components of all base elements times their multiplicities. A bit more on components is also given in the glossary entry on components.

    +

    An FESystem, except in the most trivial case, produces a vector-valued finite element with several components. The number of components n_components() corresponds to the dimension of the solution function in the PDE system, and correspondingly also to the number of equations your PDE system has. For example, the mixed Laplace system covered in step-20 has $d+1$ components in $d$ space dimensions: the scalar pressure and the $d$ components of the velocity vector. Similarly, the elasticity equation covered in step-8 has $d$ components in $d$ space dimensions. In general, the number of components of a FESystem element is the accumulated number of components of all base elements times their multiplicities. A bit more on components is also given in the glossary entry on components.

    While the concept of components is important from the viewpoint of a partial differential equation, the finite element side looks a bit different Since not only FESystem, but also vector-valued elements like FE_RaviartThomas, have several components. The concept needed here is a block. Each block encompasses the set of degrees of freedom associated with a single base element of an FESystem, where base elements with multiplicities count multiple times. These blocks are usually addressed using the information in DoFHandler::block_info(). The number of blocks of a FESystem object is simply the sum of all multiplicities of base elements and is given by n_blocks().

    For example, the FESystem for the Taylor-Hood element for the three-dimensional Stokes problem can be built using the code

    const FE_Q<3> u(2);
    const FE_Q<3> p(1);
    @@ -3836,7 +3836,7 @@

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3944,7 +3944,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -4162,9 +4162,9 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFEValues.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFEValues.html 2024-01-30 03:04:31.368705756 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFEValues.html 2024-01-30 03:04:31.364705722 +0000 @@ -743,7 +743,7 @@

    If the shape function is vector-valued, then this returns the only non- zero component. If the shape function has more than one non-zero component (i.e. it is not primitive), then throw an exception of type ExcShapeFunctionNotPrimitive. In that case, use the shape_value_component() function.

    Parameters
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).
    - +
    iNumber of the shape function $\varphi_i$ to be evaluated. Note that this number runs from zero to dofs_per_cell, even in the case of an FEFaceValues or FESubfaceValues object.
    iNumber of the shape function $\varphi_i$ to be evaluated. Note that this number runs from zero to dofs_per_cell, even in the case of an FEFaceValues or FESubfaceValues object.
    q_pointNumber of the quadrature point at which function is to be evaluated
    @@ -784,7 +784,7 @@

    Compute one vector component of the value of a shape function at a quadrature point. If the finite element is scalar, then only component zero is allowed and the return value equals that of the shape_value() function. If the finite element is vector valued but all shape functions are primitive (i.e. they are non-zero in only one component), then the value returned by shape_value() equals that of this function for exactly one component. This function is therefore only of greater interest if the shape function is not primitive, but then it is necessary since the other function cannot be used.

    Parameters
    - +
    iNumber of the shape function $\varphi_i$ to be evaluated.
    iNumber of the shape function $\varphi_i$ to be evaluated.
    q_pointNumber of the quadrature point at which function is to be evaluated.
    componentvector component to be evaluated.
    @@ -823,7 +823,7 @@

    The same holds for the arguments of this function as for the shape_value() function.

    Parameters
    - +
    iNumber of the shape function $\varphi_i$ to be evaluated.
    iNumber of the shape function $\varphi_i$ to be evaluated.
    q_pointNumber of the quadrature point at which function is to be evaluated.
    @@ -1025,17 +1025,17 @@
    -

    Return the values of a finite element function at the quadrature points of the current cell, face, or subface (selected the last time the reinit() function was called). That is, if the first argument fe_function is a vector of nodal values of a finite element function $u_h(\mathbf x)$ defined on a DoFHandler object, then the output vector (the second argument, values) is the vector of values $u_h(\mathbf x_q^K)$ where $x_q^K$ are the quadrature points on the current cell $K$. This function is first discussed in the Results section of step-4, and the related get_function_gradients() function is also used in step-15 along with numerous other tutorial programs.

    +

    Return the values of a finite element function at the quadrature points of the current cell, face, or subface (selected the last time the reinit() function was called). That is, if the first argument fe_function is a vector of nodal values of a finite element function $u_h(\mathbf x)$ defined on a DoFHandler object, then the output vector (the second argument, values) is the vector of values $u_h(\mathbf x_q^K)$ where $x_q^K$ are the quadrature points on the current cell $K$. This function is first discussed in the Results section of step-4, and the related get_function_gradients() function is also used in step-15 along with numerous other tutorial programs.

    If the current cell is not active (i.e., it has children), then the finite element function is, strictly speaking, defined by shape functions that live on these child cells. Rather than evaluating the shape functions on the child cells, with the quadrature points defined on the current cell, this function first interpolates the finite element function to shape functions defined on the current cell, and then evaluates this interpolated function.

    This function may only be used if the finite element in use is a scalar one, i.e. has only one vector component. To get values of multi-component elements, there is another get_function_values() below, returning a vector of vectors of results.

    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]valuesThe values of the function specified by fe_function at the quadrature points of the current cell. The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the values of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the solution vector.
    [out]valuesThe values of the function specified by fe_function at the quadrature points of the current cell. The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the values of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the solution vector.
    -
    Postcondition
    values[q] will contain the value of the field described by fe_function at the $q$th quadrature point.
    +
    Postcondition
    values[q] will contain the value of the field described by fe_function at the $q$th quadrature point.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1071,7 +1071,7 @@

    This function does the same as the other get_function_values(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    values[q] is a vector of values of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by values[q] equals the number of components of the finite element, i.e. values[q](c) returns the value of the $c$th vector component at the $q$th quadrature point.
    +
    Postcondition
    values[q] is a vector of values of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by values[q] equals the number of components of the finite element, i.e. values[q](c) returns the value of the $c$th vector component at the $q$th quadrature point.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 3320 of file fe_values.cc.

    @@ -1243,16 +1243,16 @@
    -

    Return the gradients of a finite element function at the quadrature points of the current cell, face, or subface (selected the last time the reinit() function was called). That is, if the first argument fe_function is a vector of nodal values of a finite element function $u_h(\mathbf x)$ defined on a DoFHandler object, then the output vector (the second argument, values) is the vector of values $\nabla u_h(\mathbf x_q^K)$ where $x_q^K$ are the quadrature points on the current cell $K$. This function is first discussed in the Results section of step-4, and it is also used in step-15 along with numerous other tutorial programs.

    +

    Return the gradients of a finite element function at the quadrature points of the current cell, face, or subface (selected the last time the reinit() function was called). That is, if the first argument fe_function is a vector of nodal values of a finite element function $u_h(\mathbf x)$ defined on a DoFHandler object, then the output vector (the second argument, values) is the vector of values $\nabla u_h(\mathbf x_q^K)$ where $x_q^K$ are the quadrature points on the current cell $K$. This function is first discussed in the Results section of step-4, and it is also used in step-15 along with numerous other tutorial programs.

    This function may only be used if the finite element in use is a scalar one, i.e. has only one vector component. There is a corresponding function of the same name for vector-valued finite elements.

    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]gradientsThe gradients of the function specified by fe_function at the quadrature points of the current cell. The gradients are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the gradients of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    [out]gradientsThe gradients of the function specified by fe_function at the quadrature points of the current cell. The gradients are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the gradients of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    -
    Postcondition
    gradients[q] will contain the gradient of the field described by fe_function at the $q$th quadrature point. gradients[q][d] represents the derivative in coordinate direction $d$ at quadrature point $q$.
    +
    Postcondition
    gradients[q] will contain the gradient of the field described by fe_function at the $q$th quadrature point. gradients[q][d] represents the derivative in coordinate direction $d$ at quadrature point $q$.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1288,7 +1288,7 @@

    This function does the same as the other get_function_gradients(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    gradients[q] is a vector of gradients of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by gradients[q] equals the number of components of the finite element, i.e. gradients[q][c] returns the gradient of the $c$th vector component at the $q$th quadrature point. Consequently, gradients[q][c][d] is the derivative in coordinate direction $d$ of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    +
    Postcondition
    gradients[q] is a vector of gradients of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by gradients[q] equals the number of components of the finite element, i.e. gradients[q][c] returns the gradient of the $c$th vector component at the $q$th quadrature point. Consequently, gradients[q][c][d] is the derivative in coordinate direction $d$ of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 3463 of file fe_values.cc.

    @@ -1407,11 +1407,11 @@
    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]hessiansThe Hessians of the function specified by fe_function at the quadrature points of the current cell. The Hessians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Hessians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    [out]hessiansThe Hessians of the function specified by fe_function at the quadrature points of the current cell. The Hessians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Hessians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    -
    Postcondition
    hessians[q] will contain the Hessian of the field described by fe_function at the $q$th quadrature point. hessians[q][i][j] represents the $(i,j)$th component of the matrix of second derivatives at quadrature point $q$.
    +
    Postcondition
    hessians[q] will contain the Hessian of the field described by fe_function at the $q$th quadrature point. hessians[q][i][j] represents the $(i,j)$th component of the matrix of second derivatives at quadrature point $q$.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1452,7 +1452,7 @@

    This function does the same as the other get_function_hessians(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    hessians[q] is a vector of Hessians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by hessians[q] equals the number of components of the finite element, i.e. hessians[q][c] returns the Hessian of the $c$th vector component at the $q$th quadrature point. Consequently, hessians[q][c][i][j] is the $(i,j)$th component of the matrix of second derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    +
    Postcondition
    hessians[q] is a vector of Hessians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by hessians[q] equals the number of components of the finite element, i.e. hessians[q][c] returns the Hessian of the $c$th vector component at the $q$th quadrature point. Consequently, hessians[q][c][i][j] is the $(i,j)$th component of the matrix of second derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 3576 of file fe_values.cc.

    @@ -1571,11 +1571,11 @@
    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]laplaciansThe Laplacians of the function specified by fe_function at the quadrature points of the current cell. The Laplacians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Laplacians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the input vector.
    [out]laplaciansThe Laplacians of the function specified by fe_function at the quadrature points of the current cell. The Laplacians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Laplacians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the input vector.
    -
    Postcondition
    laplacians[q] will contain the Laplacian of the field described by fe_function at the $q$th quadrature point.
    +
    Postcondition
    laplacians[q] will contain the Laplacian of the field described by fe_function at the $q$th quadrature point.
    For each component of the output vector, there holds laplacians[q]=trace(hessians[q]), where hessians would be the output of the get_function_hessians() function.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    @@ -1613,7 +1613,7 @@

    This function does the same as the other get_function_laplacians(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    laplacians[q] is a vector of Laplacians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by laplacians[q] equals the number of components of the finite element, i.e. laplacians[q][c] returns the Laplacian of the $c$th vector component at the $q$th quadrature point.
    +
    Postcondition
    laplacians[q] is a vector of Laplacians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by laplacians[q] equals the number of components of the finite element, i.e. laplacians[q][c] returns the Laplacian of the $c$th vector component at the $q$th quadrature point.
    For each component of the output vector, there holds laplacians[q][c]=trace(hessians[q][c]), where hessians would be the output of the get_function_hessians() function.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1776,11 +1776,11 @@
    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]third_derivativesThe third derivatives of the function specified by fe_function at the quadrature points of the current cell. The third derivatives are computed in real space (as opposed to on the unit cell). The object is assumed to already have the correct size. The data type stored by this output vector must be what you get when you multiply the third derivatives of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    [out]third_derivativesThe third derivatives of the function specified by fe_function at the quadrature points of the current cell. The third derivatives are computed in real space (as opposed to on the unit cell). The object is assumed to already have the correct size. The data type stored by this output vector must be what you get when you multiply the third derivatives of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    -
    Postcondition
    third_derivatives[q] will contain the third derivatives of the field described by fe_function at the $q$th quadrature point. third_derivatives[q][i][j][k] represents the $(i,j,k)$th component of the 3rd order tensor of third derivatives at quadrature point $q$.
    +
    Postcondition
    third_derivatives[q] will contain the third derivatives of the field described by fe_function at the $q$th quadrature point. third_derivatives[q][i][j][k] represents the $(i,j,k)$th component of the 3rd order tensor of third derivatives at quadrature point $q$.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_3rd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1821,7 +1821,7 @@

    This function does the same as the other get_function_third_derivatives(), but applied to multi-component (vector- valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    third_derivatives[q] is a vector of third derivatives of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by third_derivatives[q] equals the number of components of the finite element, i.e. third_derivatives[q][c] returns the third derivative of the $c$th vector component at the $q$th quadrature point. Consequently, third_derivatives[q][c][i][j][k] is the $(i,j,k)$th component of the tensor of third derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    +
    Postcondition
    third_derivatives[q] is a vector of third derivatives of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by third_derivatives[q] equals the number of components of the finite element, i.e. third_derivatives[q][c] returns the third derivative of the $c$th vector component at the $q$th quadrature point. Consequently, third_derivatives[q][c][i][j][k] is the $(i,j,k)$th component of the tensor of third derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_3rd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 3826 of file fe_values.cc.

    @@ -2162,7 +2162,7 @@

    Mapped quadrature weight. If this object refers to a volume evaluation (i.e. the derived class is of type FEValues), then this is the Jacobi determinant times the weight of the q_pointth unit quadrature point.

    For surface evaluations (i.e. classes FEFaceValues or FESubfaceValues), it is the mapped surface element times the weight of the quadrature point.

    -

    You can think of the quantity returned by this function as the volume or surface element $dx, ds$ in the integral that we implement here by quadrature.

    +

    You can think of the quantity returned by this function as the volume or surface element $dx, ds$ in the integral that we implement here by quadrature.

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_JxW_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2219,7 +2219,7 @@
    -

    Return the Jacobian of the transformation at the specified quadrature point, i.e. $J_{ij}=dx_i/d\hat x_j$

    +

    Return the Jacobian of the transformation at the specified quadrature point, i.e. $J_{ij}=dx_i/d\hat x_j$

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2277,7 +2277,7 @@
    -

    Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijk}=dJ_{jk}/d\hat x_i$.

    +

    Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijk}=dJ_{jk}/d\hat x_i$.

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_grads flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2335,7 +2335,7 @@
    -

    Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, pushed forward to the real cell coordinates, i.e. $G_{ijk}=dJ_{iJ}/d\hat x_K (J_{jJ})^{-1} (J_{kK})^{-1}$.

    +

    Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, pushed forward to the real cell coordinates, i.e. $G_{ijk}=dJ_{iJ}/d\hat x_K (J_{jJ})^{-1} (J_{kK})^{-1}$.

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_pushed_forward_grads flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2393,7 +2393,7 @@
    -

    Return the third derivative of the transformation from unit to real cell, i.e. the second derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijkl}=\frac{d^2J_{ij}}{d\hat x_k d\hat x_l}$.

    +

    Return the third derivative of the transformation from unit to real cell, i.e. the second derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijkl}=\frac{d^2J_{ij}}{d\hat x_k d\hat x_l}$.

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_2nd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesBase.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesBase.html 2024-01-30 03:04:31.460706522 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesBase.html 2024-01-30 03:04:31.456706489 +0000 @@ -614,7 +614,7 @@

    If the shape function is vector-valued, then this returns the only non- zero component. If the shape function has more than one non-zero component (i.e. it is not primitive), then throw an exception of type ExcShapeFunctionNotPrimitive. In that case, use the shape_value_component() function.

    Parameters
    - +
    iNumber of the shape function $\varphi_i$ to be evaluated. Note that this number runs from zero to dofs_per_cell, even in the case of an FEFaceValues or FESubfaceValues object.
    iNumber of the shape function $\varphi_i$ to be evaluated. Note that this number runs from zero to dofs_per_cell, even in the case of an FEFaceValues or FESubfaceValues object.
    q_pointNumber of the quadrature point at which function is to be evaluated
    @@ -648,7 +648,7 @@

    Compute one vector component of the value of a shape function at a quadrature point. If the finite element is scalar, then only component zero is allowed and the return value equals that of the shape_value() function. If the finite element is vector valued but all shape functions are primitive (i.e. they are non-zero in only one component), then the value returned by shape_value() equals that of this function for exactly one component. This function is therefore only of greater interest if the shape function is not primitive, but then it is necessary since the other function cannot be used.

    Parameters
    - +
    iNumber of the shape function $\varphi_i$ to be evaluated.
    iNumber of the shape function $\varphi_i$ to be evaluated.
    q_pointNumber of the quadrature point at which function is to be evaluated.
    componentvector component to be evaluated.
    @@ -680,7 +680,7 @@

    The same holds for the arguments of this function as for the shape_value() function.

    Parameters
    - +
    iNumber of the shape function $\varphi_i$ to be evaluated.
    iNumber of the shape function $\varphi_i$ to be evaluated.
    q_pointNumber of the quadrature point at which function is to be evaluated.
    @@ -840,17 +840,17 @@ std::vector< typename InputVector::value_type > & values&#href_anchor"memdoc"> -

    Return the values of a finite element function at the quadrature points of the current cell, face, or subface (selected the last time the reinit() function was called). That is, if the first argument fe_function is a vector of nodal values of a finite element function $u_h(\mathbf x)$ defined on a DoFHandler object, then the output vector (the second argument, values) is the vector of values $u_h(\mathbf x_q^K)$ where $x_q^K$ are the quadrature points on the current cell $K$. This function is first discussed in the Results section of step-4, and the related get_function_gradients() function is also used in step-15 along with numerous other tutorial programs.

    +

    Return the values of a finite element function at the quadrature points of the current cell, face, or subface (selected the last time the reinit() function was called). That is, if the first argument fe_function is a vector of nodal values of a finite element function $u_h(\mathbf x)$ defined on a DoFHandler object, then the output vector (the second argument, values) is the vector of values $u_h(\mathbf x_q^K)$ where $x_q^K$ are the quadrature points on the current cell $K$. This function is first discussed in the Results section of step-4, and the related get_function_gradients() function is also used in step-15 along with numerous other tutorial programs.

    If the current cell is not active (i.e., it has children), then the finite element function is, strictly speaking, defined by shape functions that live on these child cells. Rather than evaluating the shape functions on the child cells, with the quadrature points defined on the current cell, this function first interpolates the finite element function to shape functions defined on the current cell, and then evaluates this interpolated function.

    This function may only be used if the finite element in use is a scalar one, i.e. has only one vector component. To get values of multi-component elements, there is another get_function_values() below, returning a vector of vectors of results.

    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]valuesThe values of the function specified by fe_function at the quadrature points of the current cell. The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the values of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the solution vector.
    [out]valuesThe values of the function specified by fe_function at the quadrature points of the current cell. The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the values of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the solution vector.
    -
    Postcondition
    values[q] will contain the value of the field described by fe_function at the $q$th quadrature point.
    +
    Postcondition
    values[q] will contain the value of the field described by fe_function at the $q$th quadrature point.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -879,7 +879,7 @@ std::vector< Vector< typename InputVector::value_type > > & values&#href_anchor"memdoc">

    This function does the same as the other get_function_values(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    values[q] is a vector of values of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by values[q] equals the number of components of the finite element, i.e. values[q](c) returns the value of the $c$th vector component at the $q$th quadrature point.
    +
    Postcondition
    values[q] is a vector of values of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by values[q] equals the number of components of the finite element, i.e. values[q](c) returns the value of the $c$th vector component at the $q$th quadrature point.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 3320 of file fe_values.cc.

    @@ -1022,16 +1022,16 @@ std::vector< Tensor< 1, spacedim, typename InputVector::value_type > > & gradients&#href_anchor"memdoc"> -

    Return the gradients of a finite element function at the quadrature points of the current cell, face, or subface (selected the last time the reinit() function was called). That is, if the first argument fe_function is a vector of nodal values of a finite element function $u_h(\mathbf x)$ defined on a DoFHandler object, then the output vector (the second argument, values) is the vector of values $\nabla u_h(\mathbf x_q^K)$ where $x_q^K$ are the quadrature points on the current cell $K$. This function is first discussed in the Results section of step-4, and it is also used in step-15 along with numerous other tutorial programs.

    +

    Return the gradients of a finite element function at the quadrature points of the current cell, face, or subface (selected the last time the reinit() function was called). That is, if the first argument fe_function is a vector of nodal values of a finite element function $u_h(\mathbf x)$ defined on a DoFHandler object, then the output vector (the second argument, values) is the vector of values $\nabla u_h(\mathbf x_q^K)$ where $x_q^K$ are the quadrature points on the current cell $K$. This function is first discussed in the Results section of step-4, and it is also used in step-15 along with numerous other tutorial programs.

    This function may only be used if the finite element in use is a scalar one, i.e. has only one vector component. There is a corresponding function of the same name for vector-valued finite elements.

    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]gradientsThe gradients of the function specified by fe_function at the quadrature points of the current cell. The gradients are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the gradients of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    [out]gradientsThe gradients of the function specified by fe_function at the quadrature points of the current cell. The gradients are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the gradients of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    -
    Postcondition
    gradients[q] will contain the gradient of the field described by fe_function at the $q$th quadrature point. gradients[q][d] represents the derivative in coordinate direction $d$ at quadrature point $q$.
    +
    Postcondition
    gradients[q] will contain the gradient of the field described by fe_function at the $q$th quadrature point. gradients[q][d] represents the derivative in coordinate direction $d$ at quadrature point $q$.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1060,7 +1060,7 @@ std::vector< std::vector< Tensor< 1, spacedim, typename InputVector::value_type > > > & gradients&#href_anchor"memdoc">

    This function does the same as the other get_function_gradients(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    gradients[q] is a vector of gradients of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by gradients[q] equals the number of components of the finite element, i.e. gradients[q][c] returns the gradient of the $c$th vector component at the $q$th quadrature point. Consequently, gradients[q][c][d] is the derivative in coordinate direction $d$ of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    +
    Postcondition
    gradients[q] is a vector of gradients of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by gradients[q] equals the number of components of the finite element, i.e. gradients[q][c] returns the gradient of the $c$th vector component at the $q$th quadrature point. Consequently, gradients[q][c][d] is the derivative in coordinate direction $d$ of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 3463 of file fe_values.cc.

    @@ -1158,11 +1158,11 @@
    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]hessiansThe Hessians of the function specified by fe_function at the quadrature points of the current cell. The Hessians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Hessians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    [out]hessiansThe Hessians of the function specified by fe_function at the quadrature points of the current cell. The Hessians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Hessians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    -
    Postcondition
    hessians[q] will contain the Hessian of the field described by fe_function at the $q$th quadrature point. hessians[q][i][j] represents the $(i,j)$th component of the matrix of second derivatives at quadrature point $q$.
    +
    Postcondition
    hessians[q] will contain the Hessian of the field described by fe_function at the $q$th quadrature point. hessians[q][i][j] represents the $(i,j)$th component of the matrix of second derivatives at quadrature point $q$.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1196,7 +1196,7 @@ const bool quadrature_points_fastest = false&#href_anchor"memdoc">

    This function does the same as the other get_function_hessians(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    hessians[q] is a vector of Hessians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by hessians[q] equals the number of components of the finite element, i.e. hessians[q][c] returns the Hessian of the $c$th vector component at the $q$th quadrature point. Consequently, hessians[q][c][i][j] is the $(i,j)$th component of the matrix of second derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    +
    Postcondition
    hessians[q] is a vector of Hessians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by hessians[q] equals the number of components of the finite element, i.e. hessians[q][c] returns the Hessian of the $c$th vector component at the $q$th quadrature point. Consequently, hessians[q][c][i][j] is the $(i,j)$th component of the matrix of second derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 3576 of file fe_values.cc.

    @@ -1294,11 +1294,11 @@
    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]laplaciansThe Laplacians of the function specified by fe_function at the quadrature points of the current cell. The Laplacians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Laplacians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the input vector.
    [out]laplaciansThe Laplacians of the function specified by fe_function at the quadrature points of the current cell. The Laplacians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Laplacians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the input vector.
    -
    Postcondition
    laplacians[q] will contain the Laplacian of the field described by fe_function at the $q$th quadrature point.
    +
    Postcondition
    laplacians[q] will contain the Laplacian of the field described by fe_function at the $q$th quadrature point.
    For each component of the output vector, there holds laplacians[q]=trace(hessians[q]), where hessians would be the output of the get_function_hessians() function.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    @@ -1329,7 +1329,7 @@ std::vector< Vector< typename InputVector::value_type > > & laplacians&#href_anchor"memdoc">

    This function does the same as the other get_function_laplacians(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    laplacians[q] is a vector of Laplacians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by laplacians[q] equals the number of components of the finite element, i.e. laplacians[q][c] returns the Laplacian of the $c$th vector component at the $q$th quadrature point.
    +
    Postcondition
    laplacians[q] is a vector of Laplacians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by laplacians[q] equals the number of components of the finite element, i.e. laplacians[q][c] returns the Laplacian of the $c$th vector component at the $q$th quadrature point.
    For each component of the output vector, there holds laplacians[q][c]=trace(hessians[q][c]), where hessians would be the output of the get_function_hessians() function.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1463,11 +1463,11 @@
    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]third_derivativesThe third derivatives of the function specified by fe_function at the quadrature points of the current cell. The third derivatives are computed in real space (as opposed to on the unit cell). The object is assumed to already have the correct size. The data type stored by this output vector must be what you get when you multiply the third derivatives of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    [out]third_derivativesThe third derivatives of the function specified by fe_function at the quadrature points of the current cell. The third derivatives are computed in real space (as opposed to on the unit cell). The object is assumed to already have the correct size. The data type stored by this output vector must be what you get when you multiply the third derivatives of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    -
    Postcondition
    third_derivatives[q] will contain the third derivatives of the field described by fe_function at the $q$th quadrature point. third_derivatives[q][i][j][k] represents the $(i,j,k)$th component of the 3rd order tensor of third derivatives at quadrature point $q$.
    +
    Postcondition
    third_derivatives[q] will contain the third derivatives of the field described by fe_function at the $q$th quadrature point. third_derivatives[q][i][j][k] represents the $(i,j,k)$th component of the 3rd order tensor of third derivatives at quadrature point $q$.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_3rd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1501,7 +1501,7 @@ const bool quadrature_points_fastest = false&#href_anchor"memdoc">

    This function does the same as the other get_function_third_derivatives(), but applied to multi-component (vector- valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    third_derivatives[q] is a vector of third derivatives of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by third_derivatives[q] equals the number of components of the finite element, i.e. third_derivatives[q][c] returns the third derivative of the $c$th vector component at the $q$th quadrature point. Consequently, third_derivatives[q][c][i][j][k] is the $(i,j,k)$th component of the tensor of third derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    +
    Postcondition
    third_derivatives[q] is a vector of third derivatives of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by third_derivatives[q] equals the number of components of the finite element, i.e. third_derivatives[q][c] returns the third derivative of the $c$th vector component at the $q$th quadrature point. Consequently, third_derivatives[q][c][i][j][k] is the $(i,j,k)$th component of the tensor of third derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_3rd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 3826 of file fe_values.cc.

    @@ -1770,7 +1770,7 @@

    Mapped quadrature weight. If this object refers to a volume evaluation (i.e. the derived class is of type FEValues), then this is the Jacobi determinant times the weight of the q_pointth unit quadrature point.

    For surface evaluations (i.e. classes FEFaceValues or FESubfaceValues), it is the mapped surface element times the weight of the quadrature point.

    -

    You can think of the quantity returned by this function as the volume or surface element $dx, ds$ in the integral that we implement here by quadrature.

    +

    You can think of the quantity returned by this function as the volume or surface element $dx, ds$ in the integral that we implement here by quadrature.

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_JxW_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1811,7 +1811,7 @@
    -

    Return the Jacobian of the transformation at the specified quadrature point, i.e. $J_{ij}=dx_i/d\hat x_j$

    +

    Return the Jacobian of the transformation at the specified quadrature point, i.e. $J_{ij}=dx_i/d\hat x_j$

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1853,7 +1853,7 @@
    -

    Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijk}=dJ_{jk}/d\hat x_i$.

    +

    Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijk}=dJ_{jk}/d\hat x_i$.

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_grads flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1895,7 +1895,7 @@
    -

    Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, pushed forward to the real cell coordinates, i.e. $G_{ijk}=dJ_{iJ}/d\hat x_K (J_{jJ})^{-1} (J_{kK})^{-1}$.

    +

    Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, pushed forward to the real cell coordinates, i.e. $G_{ijk}=dJ_{iJ}/d\hat x_K (J_{jJ})^{-1} (J_{kK})^{-1}$.

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_pushed_forward_grads flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1937,7 +1937,7 @@
    -

    Return the third derivative of the transformation from unit to real cell, i.e. the second derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijkl}=\frac{d^2J_{ij}}{d\hat x_k d\hat x_l}$.

    +

    Return the third derivative of the transformation from unit to real cell, i.e. the second derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijkl}=\frac{d^2J_{ij}}{d\hat x_k d\hat x_l}$.

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_2nd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1Scalar.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1Scalar.html 2024-01-30 03:04:31.496706822 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1Scalar.html 2024-01-30 03:04:31.496706822 +0000 @@ -701,7 +701,7 @@

    Return the values of the selected scalar component of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

    This function is the equivalent of the FEValuesBase::get_function_values function but it only works on the selected scalar component.

    -

    The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 1545 of file fe_values.cc.

    @@ -774,7 +774,7 @@

    Return the gradients of the selected scalar component of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

    This function is the equivalent of the FEValuesBase::get_function_gradients function but it only works on the selected scalar component.

    -

    The data type stored by the output vector must be what you get when you multiply the gradients of shape functions (i.e., gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the gradients of shape functions (i.e., gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 1600 of file fe_values.cc.

    @@ -833,7 +833,7 @@

    Return the Hessians of the selected scalar component of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

    This function is the equivalent of the FEValuesBase::get_function_hessians function but it only works on the selected scalar component.

    -

    The data type stored by the output vector must be what you get when you multiply the Hessians of shape functions (i.e., hessian_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the Hessians of shape functions (i.e., hessian_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 1654 of file fe_values.cc.

    @@ -892,7 +892,7 @@

    Return the Laplacians of the selected scalar component of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called. The Laplacians are the trace of the Hessians.

    This function is the equivalent of the FEValuesBase::get_function_laplacians function but it only works on the selected scalar component.

    -

    The data type stored by the output vector must be what you get when you multiply the Laplacians of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the Laplacians of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 1708 of file fe_values.cc.

    @@ -951,7 +951,7 @@

    Return the third derivatives of the selected scalar component of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

    This function is the equivalent of the FEValuesBase::get_function_third_derivatives function but it only works on the selected scalar component.

    -

    The data type stored by the output vector must be what you get when you multiply the third derivatives of shape functions (i.e., third_derivative_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the third derivatives of shape functions (i.e., third_derivative_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_third_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 1762 of file fe_values.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1SymmetricTensor_3_012_00_01dim_00_01spacedim_01_4.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1SymmetricTensor_3_012_00_01dim_00_01spacedim_01_4.html 2024-01-30 03:04:31.524707056 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1SymmetricTensor_3_012_00_01dim_00_01spacedim_01_4.html 2024-01-30 03:04:31.524707056 +0000 @@ -156,9 +156,9 @@

    Detailed Description

    template<int dim, int spacedim>
    class FEValuesViews::SymmetricTensor< 2, dim, spacedim >

    A class representing a view to a set of (dim*dim + dim)/2 components forming a symmetric second-order tensor from a vector-valued finite element. Views are discussed in the Handling vector valued problems module.

    -

    This class allows to query the value and divergence of (components of) shape functions and solutions representing symmetric tensors. The divergence of a symmetric tensor $S_{ij}, 0\le i,j<\text{dim}$ is defined as $d_i = \sum_j \frac{\partial S_{ij}}{\partial x_j}, 0\le
-i<\text{dim}$, which due to the symmetry of the tensor is also $d_i =
-\sum_j \frac{\partial S_{ji}}{\partial x_j}$. In other words, it due to the symmetry of $S$ it does not matter whether we apply the nabla operator by row or by column to get the divergence.

    +

    This class allows to query the value and divergence of (components of) shape functions and solutions representing symmetric tensors. The divergence of a symmetric tensor $S_{ij}, 0\le i,j<\text{dim}$ is defined as $d_i = \sum_j \frac{\partial S_{ij}}{\partial x_j}, 0\le
+i<\text{dim}$, which due to the symmetry of the tensor is also $d_i =
+\sum_j \frac{\partial S_{ji}}{\partial x_j}$. In other words, it due to the symmetry of $S$ it does not matter whether we apply the nabla operator by row or by column to get the divergence.

    You get an object of this type if you apply a FEValuesExtractors::SymmetricTensor to an FEValues, FEFaceValues or FESubfaceValues object.

    Definition at line 1477 of file fe_values.h.

    @@ -485,7 +485,7 @@

    Return the values of the selected vector components of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

    This function is the equivalent of the FEValuesBase::get_function_values function but it only works on the selected vector components.

    -

    The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 2263 of file fe_values.cc.

    @@ -559,7 +559,7 @@

    Return the divergence of the selected vector components of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

    There is no equivalent function such as FEValuesBase::get_function_divergences in the FEValues classes but the information can be obtained from FEValuesBase::get_function_gradients, of course.

    See the general discussion of this class for a definition of the divergence.

    -

    The data type stored by the output vector must be what you get when you multiply the divergences of shape functions (i.e., divergence_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the divergences of shape functions (i.e., divergence_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 2317 of file fe_values.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1Tensor_3_012_00_01dim_00_01spacedim_01_4.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1Tensor_3_012_00_01dim_00_01spacedim_01_4.html 2024-01-30 03:04:31.552707289 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1Tensor_3_012_00_01dim_00_01spacedim_01_4.html 2024-01-30 03:04:31.552707289 +0000 @@ -169,8 +169,8 @@

    Detailed Description

    template<int dim, int spacedim>
    class FEValuesViews::Tensor< 2, dim, spacedim >

    A class representing a view to a set of dim*dim components forming a second-order tensor from a vector-valued finite element. Views are discussed in the Handling vector valued problems module.

    -

    This class allows to query the value, gradient and divergence of (components of) shape functions and solutions representing tensors. The divergence of a tensor $T_{ij},\, 0\le i,j<\text{dim}$ is defined as $d_i =
-\sum_j \frac{\partial T_{ij}}{\partial x_j}, \, 0\le i<\text{dim}$, whereas its gradient is $G_{ijk} = \frac{\partial T_{ij}}{\partial x_k}$.

    +

    This class allows to query the value, gradient and divergence of (components of) shape functions and solutions representing tensors. The divergence of a tensor $T_{ij},\, 0\le i,j<\text{dim}$ is defined as $d_i =
+\sum_j \frac{\partial T_{ij}}{\partial x_j}, \, 0\le i<\text{dim}$, whereas its gradient is $G_{ijk} = \frac{\partial T_{ij}}{\partial x_k}$.

    You get an object of this type if you apply a FEValuesExtractors::Tensor to an FEValues, FEFaceValues or FESubfaceValues object.

    Definition at line 1815 of file fe_values.h.

    @@ -591,7 +591,7 @@

    Return the values of the selected vector components of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

    This function is the equivalent of the FEValuesBase::get_function_values function but it only works on the selected vector components.

    -

    The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 2373 of file fe_values.cc.

    @@ -665,7 +665,7 @@

    Return the divergence of the selected vector components of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

    There is no equivalent function such as FEValuesBase::get_function_divergences in the FEValues classes but the information can be obtained from FEValuesBase::get_function_gradients, of course.

    See the general discussion of this class for a definition of the divergence.

    -

    The data type stored by the output vector must be what you get when you multiply the divergences of shape functions (i.e., divergence_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the divergences of shape functions (i.e., divergence_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 2427 of file fe_values.cc.

    @@ -724,7 +724,7 @@

    Return the gradient of the selected vector components of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

    See the general discussion of this class for a definition of the gradient.

    -

    The data type stored by the output vector must be what you get when you multiply the gradients of shape functions (i.e., gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the gradients of shape functions (i.e., gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 2482 of file fe_values.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1Vector.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1Vector.html 2024-01-30 03:04:31.600707689 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1Vector.html 2024-01-30 03:04:31.600707689 +0000 @@ -229,8 +229,8 @@
    template<int dim, int spacedim = dim>
    class FEValuesViews::Vector< dim, spacedim >

    A class representing a view to a set of spacedim components forming a vector part of a vector-valued finite element. Views are discussed in the Handling vector valued problems module.

    Note that in the current context, a vector is meant in the sense physics uses it: it has spacedim components that behave in specific ways under coordinate system transformations. Examples include velocity or displacement fields. This is opposed to how mathematics uses the word "vector" (and how we use this word in other contexts in the library, for example in the Vector class), where it really stands for a collection of numbers. An example of this latter use of the word could be the set of concentrations of chemical species in a flame; however, these are really just a collection of scalar variables, since they do not change if the coordinate system is rotated, unlike the components of a velocity vector, and consequently, this class should not be used for this context.

    -

    This class allows to query the value, gradient and divergence of (components of) shape functions and solutions representing vectors. The gradient of a vector $d_{k}, 0\le k<\text{dim}$ is defined as $S_{ij} =
-\frac{\partial d_{i}}{\partial x_j}, 0\le i,j<\text{dim}$.

    +

    This class allows to query the value, gradient and divergence of (components of) shape functions and solutions representing vectors. The gradient of a vector $d_{k}, 0\le k<\text{dim}$ is defined as $S_{ij} =
+\frac{\partial d_{i}}{\partial x_j}, 0\le i,j<\text{dim}$.

    You get an object of this type if you apply a FEValuesExtractors::Vector to an FEValues, FEFaceValues or FESubfaceValues object.

    Definition at line 675 of file fe_values.h.

    @@ -288,8 +288,8 @@

    An alias for the type of symmetrized gradients of the view this class represents. Here, for a set of dim components of the finite element, the symmetrized gradient is a SymmetricTensor<2,spacedim>.

    -

    The symmetric gradient of a vector field $\mathbf v$ is defined as $\varepsilon(\mathbf v)=\frac 12 (\nabla \mathbf v + \nabla \mathbf
-v^T)$.

    +

    The symmetric gradient of a vector field $\mathbf v$ is defined as $\varepsilon(\mathbf v)=\frac 12 (\nabla \mathbf v + \nabla \mathbf
+v^T)$.

    Definition at line 705 of file fe_values.h.

    @@ -812,8 +812,8 @@ const unsigned int q_point&#href_anchor"memdoc">

    Return the symmetric gradient (a symmetric tensor of rank 2) of the vector component selected by this view, for the shape function and quadrature point selected by the arguments.

    -

    The symmetric gradient is defined as $\frac 12 [(\nabla \phi_i(x_q)) +
-(\nabla \phi_i(x_q))^T]$, where $\phi_i$ represents the dim components selected from the FEValuesBase object, and $x_q$ is the location of the $q$-th quadrature point.

    +

    The symmetric gradient is defined as $\frac 12 [(\nabla \phi_i(x_q)) +
+(\nabla \phi_i(x_q))^T]$, where $\phi_i$ represents the dim components selected from the FEValuesBase object, and $x_q$ is the location of the $q$-th quadrature point.

    Note
    The meaning of the arguments is as documented for the value() function.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -862,16 +862,16 @@ const unsigned int q_point&#href_anchor"memdoc">

    Return the vector curl of the vector components selected by this view, for the shape function and quadrature point selected by the arguments. For 1d this function does not make any sense. Thus it is not implemented for spacedim=1. In 2d the curl is defined as

    -\begin{equation*}
+<picture><source srcset=\begin{equation*}
 \operatorname{curl}(u) \dealcoloneq \frac{du_2}{dx} -\frac{du_1}{dy},
-\end{equation*} +\end{equation*}" src="form_1247.png"/>

    whereas in 3d it is given by

    -\begin{equation*}
+<picture><source srcset=\begin{equation*}
 \operatorname{curl}(u) \dealcoloneq \left( \begin{array}{c}
 \frac{du_3}{dy}-\frac{du_2}{dz}\\ \frac{du_1}{dz}-\frac{du_3}{dx}\\
 \frac{du_2}{dx}-\frac{du_1}{dy} \end{array} \right).
-\end{equation*} +\end{equation*}" src="form_1248.png"/>

    Note
    The meaning of the arguments is as documented for the value() function.
    @@ -951,7 +951,7 @@

    Return the values of the selected vector components of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

    This function is the equivalent of the FEValuesBase::get_function_values function but it only works on the selected vector components.

    -

    The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 1818 of file fe_values.cc.

    @@ -1024,7 +1024,7 @@

    Return the gradients of the selected vector components of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

    This function is the equivalent of the FEValuesBase::get_function_gradients function but it only works on the selected vector components.

    -

    The data type stored by the output vector must be what you get when you multiply the gradients of shape functions (i.e., gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the gradients of shape functions (i.e., gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 1872 of file fe_values.cc.

    @@ -1082,10 +1082,10 @@

    Return the symmetrized gradients of the selected vector components of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

    -

    The symmetric gradient of a vector field $\mathbf v$ is defined as $\varepsilon(\mathbf v)=\frac 12 (\nabla \mathbf v + \nabla \mathbf
-v^T)$.

    +

    The symmetric gradient of a vector field $\mathbf v$ is defined as $\varepsilon(\mathbf v)=\frac 12 (\nabla \mathbf v + \nabla \mathbf
+v^T)$.

    Note
    There is no equivalent function such as FEValuesBase::get_function_symmetric_gradients in the FEValues classes but the information can be obtained from FEValuesBase::get_function_gradients, of course.
    -

    The data type stored by the output vector must be what you get when you multiply the symmetric gradients of shape functions (i.e., symmetric_gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the symmetric gradients of shape functions (i.e., symmetric_gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 1926 of file fe_values.cc.

    @@ -1144,7 +1144,7 @@

    Return the divergence of the selected vector components of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

    There is no equivalent function such as FEValuesBase::get_function_divergences in the FEValues classes but the information can be obtained from FEValuesBase::get_function_gradients, of course.

    -

    The data type stored by the output vector must be what you get when you multiply the divergences of shape functions (i.e., divergence_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the divergences of shape functions (i.e., divergence_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 1982 of file fe_values.cc.

    @@ -1203,7 +1203,7 @@

    Return the curl of the selected vector components of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

    There is no equivalent function such as FEValuesBase::get_function_curls in the FEValues classes but the information can be obtained from FEValuesBase::get_function_gradients, of course.

    -

    The data type stored by the output vector must be what you get when you multiply the curls of shape functions (i.e., curl_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the curls of shape functions (i.e., curl_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 2037 of file fe_values.cc.

    @@ -1262,7 +1262,7 @@

    Return the Hessians of the selected vector components of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

    This function is the equivalent of the FEValuesBase::get_function_hessians function but it only works on the selected vector components.

    -

    The data type stored by the output vector must be what you get when you multiply the Hessians of shape functions (i.e., hessian_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the Hessians of shape functions (i.e., hessian_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 2091 of file fe_values.cc.

    @@ -1321,7 +1321,7 @@

    Return the Laplacians of the selected vector components of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called. The Laplacians are the trace of the Hessians.

    This function is the equivalent of the FEValuesBase::get_function_laplacians function but it only works on the selected vector components.

    -

    The data type stored by the output vector must be what you get when you multiply the Laplacians of shape functions (i.e., laplacian_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the Laplacians of shape functions (i.e., laplacian_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 2145 of file fe_values.cc.

    @@ -1380,7 +1380,7 @@

    Return the third derivatives of the selected scalar component of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

    This function is the equivalent of the FEValuesBase::get_function_third_derivatives function but it only works on the selected scalar component.

    -

    The data type stored by the output vector must be what you get when you multiply the third derivatives of shape functions (i.e., third_derivative_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the third derivatives of shape functions (i.e., third_derivative_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_third_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 2207 of file fe_values.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__ABF.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__ABF.html 2024-01-30 03:04:31.740708855 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__ABF.html 2024-01-30 03:04:31.740708855 +0000 @@ -748,11 +748,11 @@
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    @@ -3466,7 +3466,7 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3574,7 +3574,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3884,9 +3884,9 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__BDM.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__BDM.html 2024-01-30 03:04:31.888710088 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__BDM.html 2024-01-30 03:04:31.892710121 +0000 @@ -724,11 +724,11 @@
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    @@ -3406,7 +3406,7 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3514,7 +3514,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3824,9 +3824,9 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__BernardiRaugel.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__BernardiRaugel.html 2024-01-30 03:04:32.036711321 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__BernardiRaugel.html 2024-01-30 03:04:32.036711321 +0000 @@ -490,8 +490,8 @@

    Detailed Description

    template<int dim>
    class FE_BernardiRaugel< dim >

    The Bernardi-Raugel element.

    -

    This class implements the non-standard Bernardi-Raugel (BR) element that can be used as one part of a stable velocity/pressure pair for the Stokes equation. The BR element can be seen as either an enriched version of the $Q_1^d$ element with added bubble functions on each edge (in 2d) or face (in 3d), or as a reduced version of the $Q_2^d$ element. It addresses the fact that the $Q_1^d\times
-Q_0$ combination is not inf-sup stable (requiring a larger velocity space), and that the $Q_2^d\times Q_1$ combination is stable but sub-optimal since the velocity space is too large relative to the pressure space to provide additional accuracy commensurate with the cost of the large number of velocity unknowns.

    +

    This class implements the non-standard Bernardi-Raugel (BR) element that can be used as one part of a stable velocity/pressure pair for the Stokes equation. The BR element can be seen as either an enriched version of the $Q_1^d$ element with added bubble functions on each edge (in 2d) or face (in 3d), or as a reduced version of the $Q_2^d$ element. It addresses the fact that the $Q_1^d\times
+Q_0$ combination is not inf-sup stable (requiring a larger velocity space), and that the $Q_2^d\times Q_1$ combination is stable but sub-optimal since the velocity space is too large relative to the pressure space to provide additional accuracy commensurate with the cost of the large number of velocity unknowns.

    The element was introduced in the following paper:

    @article{BR85,
    author = {Christine Bernardi and Genevi{\`e}ve Raugel},
    title = {Analysis of some finite elements for the {S}tokes problem},
    @@ -506,7 +506,7 @@
    }

    Degrees of freedom

    The BR1 element has dim degrees of freedom on each vertex and 1 on each face. The shape functions are ordered by the $(Q_1)^d$ shape functions supported on each vertex, increasing according to vertex ordering on the element in GeometryInfo, then the bubble functions follow in the ordering given in PolynomialsBernardiRaugel.

    -

    This element only has 1 degree (degree $p=1$) because it yields an LBB stable pair BR1-P0 for Stokes problems which is lower degree than the Taylor-Hood element. The pair is sometimes referred to as an enriched P1-P0 element or a reduced P2-P0 element.

    +

    This element only has 1 degree (degree $p=1$) because it yields an LBB stable pair BR1-P0 for Stokes problems which is lower degree than the Taylor-Hood element. The pair is sometimes referred to as an enriched P1-P0 element or a reduced P2-P0 element.

    This element does not support hanging nodes or multigrid in the current implementation.

    Some numerical experiments have shown that this element may converge with first-order accuracy when using the BR1-Q0 pair for the mixed Laplace equation in step-20.

    @@ -638,7 +638,7 @@

    Constructor for the Bernardi-Raugel element of degree p. The only supported degree is 1.

      -
    • p: The degree of the element $p=1$ for $BR_1$.
    • +
    • p: The degree of the element $p=1$ for $BR_1$.

    Definition at line 44 of file fe_bernardi_raugel.cc.

    @@ -735,11 +735,11 @@
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    @@ -3387,7 +3387,7 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3495,7 +3495,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3805,9 +3805,9 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Bernstein.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Bernstein.html 2024-01-30 03:04:32.180712521 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Bernstein.html 2024-01-30 03:04:32.180712521 +0000 @@ -2402,17 +2402,17 @@

    Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

    Before the correction, the Hessians would be given by

    -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1140.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1142.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    @@ -2447,21 +2447,21 @@

    Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

    Before the correction, the third derivatives would be given by

    -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1144.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1145.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    @@ -3609,7 +3609,7 @@
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3717,7 +3717,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3996,9 +3996,9 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    @@ -4033,11 +4033,11 @@
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGBDM.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGBDM.html 2024-01-30 03:04:32.328713754 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGBDM.html 2024-01-30 03:04:32.328713754 +0000 @@ -3284,7 +3284,7 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3392,7 +3392,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3702,9 +3702,9 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    @@ -3739,11 +3739,11 @@
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGNedelec.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGNedelec.html 2024-01-30 03:04:32.472714954 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGNedelec.html 2024-01-30 03:04:32.472714954 +0000 @@ -3284,7 +3284,7 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3392,7 +3392,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3702,9 +3702,9 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    @@ -3739,11 +3739,11 @@
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGP.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGP.html 2024-01-30 03:04:32.616716153 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGP.html 2024-01-30 03:04:32.616716153 +0000 @@ -486,10 +486,10 @@

    This can be understood by the following 2-d example: consider the cell with vertices at $(0,0),(1,0),(0,1),(s,s)$:

    -

    For this cell, a bilinear transformation $F$ produces the relations $x=\hat
-x+\hat x\hat y$ and $y=\hat y+\hat x\hat y$ that correlate reference coordinates $\hat x,\hat y$ and coordinates in real space $x,y$. Under this mapping, the constant function is clearly mapped onto itself, but the two other shape functions of the $P_1$ space, namely $\phi_1(\hat x,\hat
+<p>For this cell, a bilinear transformation <picture><source srcset=$F$ produces the relations $x=\hat
+x+\hat x\hat y$ and $y=\hat y+\hat x\hat y$ that correlate reference coordinates $\hat x,\hat y$ and coordinates in real space $x,y$. Under this mapping, the constant function is clearly mapped onto itself, but the two other shape functions of the $P_1$ space, namely $\phi_1(\hat x,\hat
 y)=\hat x$ and $\phi_2(\hat x,\hat y)=\hat y$ are mapped onto $\phi_1(x,y)=\frac{x-t}{t(s-1)},\phi_2(x,y)=t$ where $t=\frac{y}{s-x+sx+y-sy}$.

    -

    For the simple case that $s=1$, i.e. if the real cell is the unit square, the expressions can be simplified to $t=y$ and $\phi_1(x,y)=x,\phi_2(x,y)=y$. However, for all other cases, the functions $\phi_1(x,y),\phi_2(x,y)$ are not linear any more, and neither is any linear combination of them. Consequently, the linear functions are not within the range of the mapped $P_1$ polynomials.

    +

    For the simple case that $s=1$, i.e. if the real cell is the unit square, the expressions can be simplified to $t=y$ and $\phi_1(x,y)=x,\phi_2(x,y)=y$. However, for all other cases, the functions $\phi_1(x,y),\phi_2(x,y)$ are not linear any more, and neither is any linear combination of them. Consequently, the linear functions are not within the range of the mapped $P_1$ polynomials.

    Visualization of shape functions

    In 2d, the shape functions of this element look as follows.

    $P_0$ element

    @@ -505,7 +505,7 @@

    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.
    -

    $P_1$ element

    +

    $P_1$ element

    - @@ -529,7 +529,7 @@

    -
    @@ -517,9 +517,9 @@

    $P_1$ element, shape function 0

    +

    $P_1$ element, shape function 0

    -

    $P_1$ element, shape function 1

    +

    $P_1$ element, shape function 1

    $P_1$ element, shape function 2

    +

    $P_1$ element, shape function 2

    @@ -2366,17 +2366,17 @@

    Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

    Before the correction, the Hessians would be given by

    -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1140.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1142.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    @@ -2411,21 +2411,21 @@

    Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

    Before the correction, the third derivatives would be given by

    -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1144.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1145.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    @@ -3734,7 +3734,7 @@

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3842,7 +3842,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -4121,9 +4121,9 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    @@ -4158,11 +4158,11 @@
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGPMonomial.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGPMonomial.html 2024-01-30 03:04:32.768717420 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGPMonomial.html 2024-01-30 03:04:32.768717420 +0000 @@ -496,10 +496,10 @@

    This can be understood by the following 2-d example: consider the cell with vertices at $(0,0),(1,0),(0,1),(s,s)$:

    -

    For this cell, a bilinear transformation $F$ produces the relations $x=\hat
-x+\hat x\hat y$ and $y=\hat y+\hat x\hat y$ that correlate reference coordinates $\hat x,\hat y$ and coordinates in real space $x,y$. Under this mapping, the constant function is clearly mapped onto itself, but the two other shape functions of the $P_1$ space, namely $\phi_1(\hat x,\hat
+<p>For this cell, a bilinear transformation <picture><source srcset=$F$ produces the relations $x=\hat
+x+\hat x\hat y$ and $y=\hat y+\hat x\hat y$ that correlate reference coordinates $\hat x,\hat y$ and coordinates in real space $x,y$. Under this mapping, the constant function is clearly mapped onto itself, but the two other shape functions of the $P_1$ space, namely $\phi_1(\hat x,\hat
 y)=\hat x$ and $\phi_2(\hat x,\hat y)=\hat y$ are mapped onto $\phi_1(x,y)=\frac{x-t}{t(s-1)},\phi_2(x,y)=t$ where $t=\frac{y}{s-x+sx+y-sy}$.

    -

    For the simple case that $s=1$, i.e. if the real cell is the unit square, the expressions can be simplified to $t=y$ and $\phi_1(x,y)=x,\phi_2(x,y)=y$. However, for all other cases, the functions $\phi_1(x,y),\phi_2(x,y)$ are not linear any more, and neither is any linear combination of them. Consequently, the linear functions are not within the range of the mapped $P_1$ polynomials.

    +

    For the simple case that $s=1$, i.e. if the real cell is the unit square, the expressions can be simplified to $t=y$ and $\phi_1(x,y)=x,\phi_2(x,y)=y$. However, for all other cases, the functions $\phi_1(x,y),\phi_2(x,y)$ are not linear any more, and neither is any linear combination of them. Consequently, the linear functions are not within the range of the mapped $P_1$ polynomials.

    Visualization of shape functions

    In 2d, the shape functions of this element look as follows.

    $P_0$ element

    @@ -515,7 +515,7 @@

    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.
    -

    $P_1$ element

    +

    $P_1$ element

    - @@ -539,7 +539,7 @@

    -
    @@ -527,9 +527,9 @@

    $P_1$ element, shape function 0

    +

    $P_1$ element, shape function 0

    -

    $P_1$ element, shape function 1

    +

    $P_1$ element, shape function 1

    $P_1$ element, shape function 2

    +

    $P_1$ element, shape function 2

    @@ -2484,17 +2484,17 @@

    Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

    Before the correction, the Hessians would be given by

    -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1140.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1142.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    @@ -2529,21 +2529,21 @@

    Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

    Before the correction, the third derivatives would be given by

    -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1144.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1145.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    @@ -4050,7 +4050,7 @@

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -4158,7 +4158,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -4468,9 +4468,9 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    @@ -4505,11 +4505,11 @@
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGPNonparametric.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGPNonparametric.html 2024-01-30 03:04:32.912718619 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGPNonparametric.html 2024-01-30 03:04:32.912718619 +0000 @@ -476,7 +476,7 @@
    template<int dim, int spacedim = dim>
    class FE_DGPNonparametric< dim, spacedim >

    Discontinuous finite elements evaluated at the mapped quadrature points.

    Warning: this class does not work properly, yet. Don't use it!

    -

    This finite element implements complete polynomial spaces, that is, $d$-dimensional polynomials of order $k$.

    +

    This finite element implements complete polynomial spaces, that is, $d$-dimensional polynomials of order $k$.

    The polynomials are not mapped. Therefore, they are constant, linear, quadratic, etc. on any grid cell.

    Since the polynomials are evaluated at the quadrature points of the actual grid cell, no grid transfer and interpolation matrices are available.

    The purpose of this class is experimental, therefore the implementation will remain incomplete.

    @@ -496,7 +496,7 @@

    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.
    -

    $P_1$ element

    +

    $P_1$ element

    - @@ -520,7 +520,7 @@

    -
    @@ -508,9 +508,9 @@

    $P_1$ element, shape function 0

    +

    $P_1$ element, shape function 0

    -

    $P_1$ element, shape function 1

    +

    $P_1$ element, shape function 1

    $P_1$ element, shape function 2

    +

    $P_1$ element, shape function 2

    @@ -3241,7 +3241,7 @@

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3349,7 +3349,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3659,9 +3659,9 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    @@ -3696,11 +3696,11 @@
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQ.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQ.html 2024-01-30 03:04:33.052719786 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQ.html 2024-01-30 03:04:33.056719819 +0000 @@ -506,7 +506,7 @@ *

    with node 13 being placed in the interior of the hex.

    Note, however, that these are just the Lagrange interpolation points of the shape functions. Even though they may physically be on the boundary of the cell, they are logically in the interior since there are no continuity requirements for these shape functions across cell boundaries. While discontinuous, when restricted to a single cell the shape functions of this element are exactly the same as those of the FE_Q element where they are shown visually.

    Unit support point distribution and conditioning of interpolation

    -

    When constructing an FE_DGQ element at polynomial degrees one or two, equidistant support points at 0 and 1 (linear case) or 0, 0.5, and 1 (quadratic case) are used. The unit support or nodal points xi are those points where the jth Lagrange polynomial satisfies the $\delta_{ij}$ property, i.e., where one polynomial is one and all the others are zero. For higher polynomial degrees, the support points are non-equidistant by default, and chosen to be the support points of the (degree+1)-order Gauss-Lobatto quadrature rule. This point distribution yields well-conditioned Lagrange interpolation at arbitrary polynomial degrees. By contrast, polynomials based on equidistant points get increasingly ill-conditioned as the polynomial degree increases. In interpolation, this effect is known as the Runge phenomenon. For Galerkin methods, the Runge phenomenon is typically not visible in the solution quality but rather in the condition number of the associated system matrices. For example, the elemental mass matrix of equidistant points at degree 10 has condition number 2.6e6, whereas the condition number for Gauss-Lobatto points is around 400.

    +

    When constructing an FE_DGQ element at polynomial degrees one or two, equidistant support points at 0 and 1 (linear case) or 0, 0.5, and 1 (quadratic case) are used. The unit support or nodal points xi are those points where the jth Lagrange polynomial satisfies the $\delta_{ij}$ property, i.e., where one polynomial is one and all the others are zero. For higher polynomial degrees, the support points are non-equidistant by default, and chosen to be the support points of the (degree+1)-order Gauss-Lobatto quadrature rule. This point distribution yields well-conditioned Lagrange interpolation at arbitrary polynomial degrees. By contrast, polynomials based on equidistant points get increasingly ill-conditioned as the polynomial degree increases. In interpolation, this effect is known as the Runge phenomenon. For Galerkin methods, the Runge phenomenon is typically not visible in the solution quality but rather in the condition number of the associated system matrices. For example, the elemental mass matrix of equidistant points at degree 10 has condition number 2.6e6, whereas the condition number for Gauss-Lobatto points is around 400.

    The Gauss-Lobatto points in 1d include the end points 0 and +1 of the unit interval. The interior points are shifted towards the end points, which gives a denser point distribution close to the element boundary.

    Definition at line 112 of file fe_dgq.h.

    @@ -2308,17 +2308,17 @@

    Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

    Before the correction, the Hessians would be given by

    -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1140.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1142.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    @@ -2353,21 +2353,21 @@

    Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

    Before the correction, the third derivatives would be given by

    -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1144.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1145.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    @@ -3578,7 +3578,7 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3686,7 +3686,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3965,9 +3965,9 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQArbitraryNodes.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQArbitraryNodes.html 2024-01-30 03:04:33.196720986 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQArbitraryNodes.html 2024-01-30 03:04:33.196720986 +0000 @@ -2236,17 +2236,17 @@

    Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

    Before the correction, the Hessians would be given by

    -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1140.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1142.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    @@ -2281,21 +2281,21 @@

    Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

    Before the correction, the third derivatives would be given by

    -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1144.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1145.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    @@ -3506,7 +3506,7 @@
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3614,7 +3614,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3893,9 +3893,9 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQHermite.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQHermite.html 2024-01-30 03:04:33.332722119 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQHermite.html 2024-01-30 03:04:33.336722152 +0000 @@ -2240,17 +2240,17 @@

    Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

    Before the correction, the Hessians would be given by

    -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1140.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1142.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    @@ -2285,21 +2285,21 @@

    Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

    Before the correction, the third derivatives would be given by

    -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1144.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1145.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    @@ -3510,7 +3510,7 @@
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3618,7 +3618,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3897,9 +3897,9 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQLegendre.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQLegendre.html 2024-01-30 03:04:33.472723285 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQLegendre.html 2024-01-30 03:04:33.472723285 +0000 @@ -2238,17 +2238,17 @@

    Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

    Before the correction, the Hessians would be given by

    -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1140.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1142.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    @@ -2283,21 +2283,21 @@

    Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

    Before the correction, the third derivatives would be given by

    -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1144.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1145.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    @@ -3508,7 +3508,7 @@
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3616,7 +3616,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3895,9 +3895,9 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGRaviartThomas.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGRaviartThomas.html 2024-01-30 03:04:33.616724485 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGRaviartThomas.html 2024-01-30 03:04:33.620724518 +0000 @@ -3284,7 +3284,7 @@
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3392,7 +3392,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3702,9 +3702,9 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    @@ -3739,11 +3739,11 @@
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGVector.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGVector.html 2024-01-30 03:04:33.776725819 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGVector.html 2024-01-30 03:04:33.772725785 +0000 @@ -3293,7 +3293,7 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3401,7 +3401,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3711,9 +3711,9 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    @@ -3748,11 +3748,11 @@
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Enriched.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Enriched.html 2024-01-30 03:04:33.924727052 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Enriched.html 2024-01-30 03:04:33.924727052 +0000 @@ -484,12 +484,12 @@

    Detailed Description

    template<int dim, int spacedim = dim>
    class FE_Enriched< dim, spacedim >

    Implementation of a partition of unity finite element method (PUM) by Babuska and Melenk which enriches a standard finite element with an enrichment function multiplied with another (usually linear) finite element:

    -\[
+<picture><source srcset=\[
 U(\mathbf x) = \sum_i N_i(\mathbf x) U_i + \sum_j N_j(\mathbf x) \sum_k
-F_k(\mathbf x) U_{jk} \] +F_k(\mathbf x) U_{jk} \]" src="form_1077.png"/>

    -

    where $ N_i(\mathbf x) $ and $ N_j(\mathbf x) $ are the underlying finite elements (including the mapping from the isoparametric element to the real element); $ F_k(\mathbf x) $ are the scalar enrichment functions in real space (e.g. $ 1/r $, $ \exp(-r) $, etc); $ U_i $ and $
-U_{jk} $ are the standard and enriched DoFs. This allows to include in the finite element space a priori knowledge about the partial differential equation being solved which in turn improves the local approximation properties of the spaces. This can be useful for highly oscillatory solutions, problems with domain corners or on unbounded domains or sudden changes of boundary conditions. PUM method uses finite element spaces which satisfy the partition of unity property (e.g. FE_Q). Among other properties this makes the resulting space to reproduce enrichment functions exactly.

    +

    where $ N_i(\mathbf x) $ and $ N_j(\mathbf x) $ are the underlying finite elements (including the mapping from the isoparametric element to the real element); $ F_k(\mathbf x) $ are the scalar enrichment functions in real space (e.g. $ 1/r $, $ \exp(-r) $, etc); $ U_i $ and $
+U_{jk} $ are the standard and enriched DoFs. This allows to include in the finite element space a priori knowledge about the partial differential equation being solved which in turn improves the local approximation properties of the spaces. This can be useful for highly oscillatory solutions, problems with domain corners or on unbounded domains or sudden changes of boundary conditions. PUM method uses finite element spaces which satisfy the partition of unity property (e.g. FE_Q). Among other properties this makes the resulting space to reproduce enrichment functions exactly.

    The simplest constructor of this class takes two finite element objects and an enrichment function to be used. For example

    @@ -497,7 +497,7 @@
    Definition fe_q.h:551

    In this case, standard DoFs are distributed by FE_Q<dim>(2), whereas enriched DoFs are coming from a single finite element FE_Q<dim>(1) used with a single enrichment function function. In this case, the total number of DoFs on the enriched element is the sum of DoFs from FE_Q<dim>(2) and FE_Q<dim>(1).

    -

    As an example of an enrichment function, consider $ \exp(-x) $, which leads to the following shape functions on the unit element:

    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.
    +

    As an example of an enrichment function, consider $ \exp(-x) $, which leads to the following shape functions on the unit element:

    @@ -510,7 +510,7 @@
    1d element, base and enriched shape functions. enriched shape function corresponding to the central vertex.

    Note that evaluation of gradients (hessians) of the enriched shape functions or the finite element field requires evaluation of gradients (gradients and hessians) of the enrichment functions:

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   U(\mathbf x)
     &= \sum_i N_i(\mathbf x) U_i
     + \sum_{j,k} N_j(\mathbf x) F_k(\mathbf x) U_{jk} \\
@@ -524,10 +524,10 @@
 F_k(\mathbf x) + \mathbf \nabla F_k(\mathbf x) \mathbf \nabla N_j(\mathbf x)
 + \mathbf \nabla N_j(\mathbf x) \mathbf \nabla F_k(\mathbf x) + N_j(\mathbf
 x) \mathbf \nabla \mathbf \nabla F_k(\mathbf x) \right] U_{jk}
-\end{align*} +\end{align*}" src="form_1086.png"/>

    Using enriched and non-enriched FEs together

    -

    In most applications it is beneficial to introduce enrichments only in some part of the domain (e.g. around a crack tip) and use standard FE (e.g. FE_Q) elsewhere. This can be achieved by using the hp-finite element framework in deal.II that allows for the use of different elements on different cells. To make the resulting space $C^0$ continuous, it is then necessary for the DoFHandler class and DoFTools::make_hanging_node_constraints() function to be able to figure out what to do at the interface between enriched and non-enriched cells. Specifically, we want the degrees of freedom corresponding to enriched shape functions to be zero at these interfaces. These classes and functions can not to do this automatically, but the effect can be achieved by using not just a regular FE_Q on cells without enrichment, but to wrap the FE_Q into an FE_Enriched object without actually enriching it. This can be done as follows:

    FE_Enriched<dim> fe_non_enriched(FE_Q<dim>(1));
    +

    In most applications it is beneficial to introduce enrichments only in some part of the domain (e.g. around a crack tip) and use standard FE (e.g. FE_Q) elsewhere. This can be achieved by using the hp-finite element framework in deal.II that allows for the use of different elements on different cells. To make the resulting space $C^0$ continuous, it is then necessary for the DoFHandler class and DoFTools::make_hanging_node_constraints() function to be able to figure out what to do at the interface between enriched and non-enriched cells. Specifically, we want the degrees of freedom corresponding to enriched shape functions to be zero at these interfaces. These classes and functions can not to do this automatically, but the effect can be achieved by using not just a regular FE_Q on cells without enrichment, but to wrap the FE_Q into an FE_Enriched object without actually enriching it. This can be done as follows:

    FE_Enriched<dim> fe_non_enriched(FE_Q<dim>(1));

    This constructor is equivalent to calling

    FE_Enriched<dim> fe_non_enriched(FE_Q<dim>(1),
    FE_Nothing<dim>(1,true),
    nullptr);
    @@ -3288,7 +3288,7 @@

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3396,7 +3396,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3706,9 +3706,9 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    @@ -3743,11 +3743,11 @@
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceP.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceP.html 2024-01-30 03:04:34.064728218 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceP.html 2024-01-30 03:04:34.068728251 +0000 @@ -3354,7 +3354,7 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3462,7 +3462,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3741,9 +3741,9 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    @@ -3778,11 +3778,11 @@
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceP_3_011_00_01spacedim_01_4.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceP_3_011_00_01spacedim_01_4.html 2024-01-30 03:04:34.200729351 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceP_3_011_00_01spacedim_01_4.html 2024-01-30 03:04:34.204729384 +0000 @@ -3441,7 +3441,7 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3543,7 +3543,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3800,9 +3800,9 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    @@ -3835,11 +3835,11 @@
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceQ.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceQ.html 2024-01-30 03:04:34.336730484 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceQ.html 2024-01-30 03:04:34.336730484 +0000 @@ -3395,7 +3395,7 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3503,7 +3503,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3782,9 +3782,9 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceQ_3_011_00_01spacedim_01_4.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceQ_3_011_00_01spacedim_01_4.html 2024-01-30 03:04:34.472731617 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceQ_3_011_00_01spacedim_01_4.html 2024-01-30 03:04:34.476731650 +0000 @@ -2992,7 +2992,7 @@
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3094,7 +3094,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3351,9 +3351,9 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    @@ -3386,11 +3386,11 @@
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Nedelec.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Nedelec.html 2024-01-30 03:04:34.632732950 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Nedelec.html 2024-01-30 03:04:34.632732950 +0000 @@ -508,12 +508,12 @@

    Detailed Description

    template<int dim>
    class FE_Nedelec< dim >
    Warning
    Several aspects of the implementation are experimental. For the moment, it is safe to use the element on globally refined meshes with consistent orientation of faces. See the todo entries below for more detailed caveats.
    -

    Implementation of Nédélec elements. The Nédélec space is designed to solve problems in which the solution only lives in the space $H^\text{curl}=\{ {\mathbf u} \in L_2: \text{curl}\, {\mathbf u} \in L_2\}$, rather than in the more commonly used space $H^1=\{ u \in L_2: \nabla u \in L_2\}$. In other words, the solution must be a vector field whose curl is square integrable, but for which the gradient may not be square integrable. The typical application for this space (and these elements) is to the Maxwell equations and corresponding simplifications, such as the reduced version of the Maxwell equation that only involves the electric field $\mathbf E$ which has to satisfy the equation $\text{curl}\, \text{curl}\, {\mathbf E} = 0$ in the time independent case when no currents are present, or the equation $\text{curl}\,\text{curl}\,{\mathbf A} = 4\pi{\mathbf j}$ that the magnetic vector potential $\mathbf A$ has to satisfy in the time independent case.

    -

    The defining characteristic of functions in $H^\text{curl}$ is that they are in general discontinuous – but that if you draw a line in 2d (or a surface in 3d), then the tangential component(s) of the vector field must be continuous across the line (or surface) even though the normal component may not be. As a consequence, the Nédélec element is constructed in such a way that (i) it is vector-valued, (ii) the shape functions are discontinuous, but (iii) the tangential component(s) of the vector field represented by each shape function are continuous across the faces of cells.

    +

    Implementation of Nédélec elements. The Nédélec space is designed to solve problems in which the solution only lives in the space $H^\text{curl}=\{ {\mathbf u} \in L_2: \text{curl}\, {\mathbf u} \in L_2\}$, rather than in the more commonly used space $H^1=\{ u \in L_2: \nabla u \in L_2\}$. In other words, the solution must be a vector field whose curl is square integrable, but for which the gradient may not be square integrable. The typical application for this space (and these elements) is to the Maxwell equations and corresponding simplifications, such as the reduced version of the Maxwell equation that only involves the electric field $\mathbf E$ which has to satisfy the equation $\text{curl}\, \text{curl}\, {\mathbf E} = 0$ in the time independent case when no currents are present, or the equation $\text{curl}\,\text{curl}\,{\mathbf A} = 4\pi{\mathbf j}$ that the magnetic vector potential $\mathbf A$ has to satisfy in the time independent case.

    +

    The defining characteristic of functions in $H^\text{curl}$ is that they are in general discontinuous – but that if you draw a line in 2d (or a surface in 3d), then the tangential component(s) of the vector field must be continuous across the line (or surface) even though the normal component may not be. As a consequence, the Nédélec element is constructed in such a way that (i) it is vector-valued, (ii) the shape functions are discontinuous, but (iii) the tangential component(s) of the vector field represented by each shape function are continuous across the faces of cells.

    Other properties of the Nédélec element are that (i) it is not a primitive element ; (ii) the shape functions are defined so that certain integrals over the faces are either zero or one, rather than the common case of certain point values being either zero or one.

    We follow the commonly used – though confusing – definition of the "degree" of Nédélec elements. Specifically, the "degree" of the element denotes the polynomial degree of the largest complete polynomial subspace contained in the finite element space, even if the space may contain shape functions of higher polynomial degree. The lowest order element is consequently FE_Nedelec(0), i.e., the Raviart-Thomas element "of degree zero", even though the functions of this space are in general polynomials of degree one in each variable. This choice of "degree" implies that the approximation order of the function itself is degree+1, as with usual polynomial spaces. The numbering so chosen implies the sequence

    -\[
+<picture><source srcset=\[
   Q_{k+1}
   \stackrel{\text{grad}}{\rightarrow}
   \text{Nedelec}_k
@@ -521,7 +521,7 @@
   \text{RaviartThomas}_k
   \stackrel{\text{div}}{\rightarrow}
   DGQ_{k}
-\] +\]" src="form_1119.png"/>

    Note that this follows the convention of Brezzi and Raviart, though not the one used in the original paper by Nédélec.

    This class is not implemented for the codimension one case (spacedim != dim).

    @@ -1373,11 +1373,11 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    @@ -4183,7 +4183,7 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -4291,7 +4291,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -4570,9 +4570,9 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__NedelecSZ.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__NedelecSZ.html 2024-01-30 03:04:34.768734083 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__NedelecSZ.html 2024-01-30 03:04:34.772734116 +0000 @@ -2897,7 +2897,7 @@
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -2999,7 +2999,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3283,9 +3283,9 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    @@ -3318,11 +3318,11 @@
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__NedelecSZ_1_1InternalData.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__NedelecSZ_1_1InternalData.html 2024-01-30 03:04:34.800734350 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__NedelecSZ_1_1InternalData.html 2024-01-30 03:04:34.800734350 +0000 @@ -147,9 +147,9 @@ class FE_NedelecSZ< dim, spacedim >::InternalData

    Derived Internal data which is used to store cell-independent data. Note that due to the nature of this element, a number of useful pre-computed quantities are stored for the computation of cell-dependent shape functions.

    The main quantities which are stored are associated with edge and face parameterizations. These are:

    • -$\lambda_{i}$ - trilinear function, equal to one at the $i$-th vertex and zero at all other vertices.
    • +$\lambda_{i}$ - trilinear function, equal to one at the $i$-th vertex and zero at all other vertices.
    • -$\sigma_{i}$ - linear functional associated with the $i$-th vertex.
    • +$\sigma_{i}$ - linear functional associated with the $i$-th vertex.

    The definitions of these functionals, as well as the edge and face parameterizations and edge and face extension parameters, can be found on page 82 of Zaglmayr's thesis. The details of the definition of the globally-defined edge and face orientations can be found on page 67.

    @@ -280,9 +280,9 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Storage for all possible edge parameterization between vertices. These are required in the computation of edge- and face-based DoFs, which are cell-dependent.

    -

    The edge parameterization of an edge, E, starting at vertex i and ending at vertex $j$ is given by $\sigma_{E} = \sigma_{i} - \sigma{j}$.

    -

    sigma_imj_values[q][i][j] stores the value of the edge parameterization connected by vertices $i$ and $j$ at the q-th quadrature point.

    -

    Note that not all of the $i$ and $j$ combinations result in valid edges on the hexahedral cell, but they are computed in this fashion for use with non-standard edge and face orientations.

    +

    The edge parameterization of an edge, E, starting at vertex i and ending at vertex $j$ is given by $\sigma_{E} = \sigma_{i} - \sigma{j}$.

    +

    sigma_imj_values[q][i][j] stores the value of the edge parameterization connected by vertices $i$ and $j$ at the q-th quadrature point.

    +

    Note that not all of the $i$ and $j$ combinations result in valid edges on the hexahedral cell, but they are computed in this fashion for use with non-standard edge and face orientations.

    Definition at line 287 of file fe_nedelec_sz.h.

    @@ -302,8 +302,8 @@

    Storage for gradients of all possible edge parameterizations between vertices. These are required in the computation of edge- and face-based DoFs, which are cell-dependent. Note that the components of the gradient are constant.

    -

    The edge parameterization of an edge, $E$, starting at vertex $i$ and ending at vertex $j$ is given by $\sigma_{E} = \sigma_{i} - \sigma{j}$.

    -

    sigma_imj_grads[i][j][d] stores the gradient of the edge parameterization connected by vertices $i$ and $j$ in component $d$.

    +

    The edge parameterization of an edge, $E$, starting at vertex $i$ and ending at vertex $j$ is given by $\sigma_{E} = \sigma_{i} - \sigma{j}$.

    +

    sigma_imj_grads[i][j][d] stores the gradient of the edge parameterization connected by vertices $i$ and $j$ in component $d$.

    Note that the gradient of the edge parameterization is constant on an edge, so we do not need to store it at every quadrature point.

    Definition at line 304 of file fe_nedelec_sz.h.

    @@ -366,10 +366,10 @@

    Storage for edge extension parameters at quadrature points. These are stored for the 12 edges such that the global vertex numbering would follow the order defined by the "standard" deal.II cell.

    -

    The edge extension parameter of an edge, $E$, starting at vertex $i$ and ending at vertex $j$ is given by $\lambda_{E} = \lambda_{i} +
-\lambda_{j}$.

    -

    Note that under this definition, the values of $\lambda_{E}$ do not change with the orientation of the edge.

    -

    edge_lambda_values[m][q] stores the edge extension parameter value at the $q$-th quadrature point on edge $m$.

    +

    The edge extension parameter of an edge, $E$, starting at vertex $i$ and ending at vertex $j$ is given by $\lambda_{E} = \lambda_{i} +
+\lambda_{j}$.

    +

    Note that under this definition, the values of $\lambda_{E}$ do not change with the orientation of the edge.

    +

    edge_lambda_values[m][q] stores the edge extension parameter value at the $q$-th quadrature point on edge $m$.

    Definition at line 347 of file fe_nedelec_sz.h.

    @@ -389,7 +389,7 @@

    Storage for gradients of edge extension parameters in 2d. In this case they are constant. These are stored for the 12 edges such that the global vertex numbering* would follow the order defined by the "standard" deal.II cell.

    -

    edge_lambda_grads_2d[m][d] stores the gradient of the edge extension parameter for component $d$ on edge $m$.

    +

    edge_lambda_grads_2d[m][d] stores the gradient of the edge extension parameter for component $d$ on edge $m$.

    Definition at line 358 of file fe_nedelec_sz.h.

    @@ -409,7 +409,7 @@

    Storage for gradients of edge extension parameters in 3d. In this case they are non-constant. These are stored for the 12 edges such that the global vertex numbering* would follow the order defined by the "standard" deal.II cell.

    -

    edge_lambda_grads_3d[m][q][d] stores the gradient of the edge extension parameter for component $d$ at the $q$-th quadrature point on edge m.

    +

    edge_lambda_grads_3d[m][q][d] stores the gradient of the edge extension parameter for component $d$ at the $q$-th quadrature point on edge m.

    Definition at line 369 of file fe_nedelec_sz.h.

    @@ -429,7 +429,7 @@

    Storage for 2nd derivatives of edge extension parameters in 3d, which are constant across the cell. These are stored for the 12 edges such that the global vertex numbering* would follow the order defined by the "standard" deal.II cell.

    -

    edge_lambda_gradgrads_3d[m][d1][d2] stores the 2nd derivatives of the edge extension parameters with respect to components d1 and d2 on edge $m$.

    +

    edge_lambda_gradgrads_3d[m][d1][d2] stores the 2nd derivatives of the edge extension parameters with respect to components d1 and d2 on edge $m$.

    Definition at line 381 of file fe_nedelec_sz.h.

    @@ -449,10 +449,10 @@

    Storage for the face extension parameters. These are stored for the 6 faces such that the global vertex numbering would follow the order defined by the "standard" deal.II cell.

    -

    The face extension parameter of a face, F, defined by the vertices v1, v2, v3, v4 is given by $\lambda_{F} = \lambda_{v1} + \lambda_{v2} + \lambda_{v3} +
-\lambda_{v4}$.

    -

    Note that under this definition, the values of $\lambda_{F}$ do not change with the orientation of the face.

    -

    face_lambda_values[m][q] stores the face extension parameter value at the $q$-th quadrature point on face $m$.

    +

    The face extension parameter of a face, F, defined by the vertices v1, v2, v3, v4 is given by $\lambda_{F} = \lambda_{v1} + \lambda_{v2} + \lambda_{v3} +
+\lambda_{v4}$.

    +

    Note that under this definition, the values of $\lambda_{F}$ do not change with the orientation of the face.

    +

    face_lambda_values[m][q] stores the face extension parameter value at the $q$-th quadrature point on face $m$.

    Definition at line 399 of file fe_nedelec_sz.h.

    @@ -472,7 +472,7 @@

    Storage for gradients of face extension parameters. These are stored for the 6 faces such that the global vertex numbering would follow the order defined by the "standard" deal.II cell.

    -

    face_lambda_grads[m][d] stores the gradient of the face extension parameters for component $d$ on face $m$.

    +

    face_lambda_grads[m][d] stores the gradient of the face extension parameters for component $d$ on face $m$.

    Definition at line 409 of file fe_nedelec_sz.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Nothing.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Nothing.html 2024-01-30 03:04:34.936735483 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Nothing.html 2024-01-30 03:04:34.936735483 +0000 @@ -466,7 +466,7 @@ class FE_Nothing< dim, spacedim >

    Definition of a finite element space with zero degrees of freedom and that, consequently, can only represent a single function: the zero function.

    This class is useful (in the context of an hp-method) to represent empty cells in the triangulation on which no degrees of freedom should be allocated, or to describe a field that is extended by zero to a part of the domain where we don't need it. Thus a triangulation may be divided into two regions: an active region where normal elements are used, and an inactive region where FE_Nothing elements are used. The DoFHandler will therefore assign no degrees of freedom to the FE_Nothing cells, and this subregion is therefore implicitly deleted from the computation. step-10 and step-46 show use cases for this element. An interesting application for this element is also presented in the paper [Cangiani2012].

    FE_Nothing as seen as a function space

    -

    Finite elements are often best interpreted as forming a function space, i.e., a set of functions that form a vector space. One can indeed interpret FE_Nothing in this light: It corresponds to the function space $V_h=\{0\}$, i.e., the set of functions that are zero everywhere. (The constructor can take an argument that, if greater than one, extends the space to one of vector-valued functions with more than one component, with all components equal to zero everywhere.) Indeed, this is a vector space since every linear combination of elements in the vector space is also an element in the vector space, as is every multiple of the single element zero. It is obvious that the function space has no degrees of freedom, thus the name of the class.

    +

    Finite elements are often best interpreted as forming a function space, i.e., a set of functions that form a vector space. One can indeed interpret FE_Nothing in this light: It corresponds to the function space $V_h=\{0\}$, i.e., the set of functions that are zero everywhere. (The constructor can take an argument that, if greater than one, extends the space to one of vector-valued functions with more than one component, with all components equal to zero everywhere.) Indeed, this is a vector space since every linear combination of elements in the vector space is also an element in the vector space, as is every multiple of the single element zero. It is obvious that the function space has no degrees of freedom, thus the name of the class.

    FE_Nothing in combination with other elements

    In situations such as those of step-46, one uses FE_Nothing on cells where one is not interested in a solution variable. For example, in fluid structure interaction problems, the fluid velocity is only defined on cells inside the fluid part of the domain. One then uses FE_Nothing on cells in the solid part of the domain to describe the finite element space for the velocity. In other words, the velocity lives everywhere conceptually, but it is identically zero in those parts of the domain where it is not of interest and doesn't use up any degrees of freedom there.

    The question is what happens at the interface between areas where one is interested in the solution (and uses a "normal" finite element) and where one is not interested (and uses FE_Nothing): Should the solution at that interface be zero – i.e., we consider a "continuous" finite element field that happens to be zero in that area where FE_Nothing is used – or is there no requirement for continuity at the interface. In the deal.II language, this is encoded by what the function FiniteElement::compare_for_domination() returns: If the FE_Nothing "dominates", then the solution must be zero at the interface; if it does not, then there is no requirement and one can think of FE_Nothing as a function space that is in general discontinuous (i.e., there is no requirement for any kind of continuity at cell interfaces) but on every cell equal to zero.

    @@ -2968,7 +2968,7 @@

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3076,7 +3076,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3355,9 +3355,9 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    @@ -3392,11 +3392,11 @@
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__P1NC.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__P1NC.html 2024-01-30 03:04:35.068736582 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__P1NC.html 2024-01-30 03:04:35.068736582 +0000 @@ -473,13 +473,13 @@

    Detailed Description

    Implementation of the scalar version of the P1 nonconforming finite element, a piecewise linear element on quadrilaterals in 2d. This implementation is only for 2d cells in a 2d space (i.e., codimension 0).

    Unlike the usual continuous, $H^1$ conforming finite elements, the P1 nonconforming element does not enforce continuity across edges. However, it requires the continuity in an integral sense: any function in the space should have the same integral values on two sides of the common edge shared by two adjacent elements.

    -

    Thus, each function in the nonconforming element space can be discontinuous, and consequently not included in $H^1_0$, just like the basis functions in Discontinuous Galerkin (DG) finite element spaces. On the other hand, basis functions in DG spaces are completely discontinuous across edges without any relation between the values from both sides. This is a reason why usual weak formulations for DG schemes contain additional penalty terms for jump across edges to control discontinuity. However, nonconforming elements usually do not need additional terms in their weak formulations because their integrals along edges are the same from both sides, i.e., there is some level of continuity.

    +

    Thus, each function in the nonconforming element space can be discontinuous, and consequently not included in $H^1_0$, just like the basis functions in Discontinuous Galerkin (DG) finite element spaces. On the other hand, basis functions in DG spaces are completely discontinuous across edges without any relation between the values from both sides. This is a reason why usual weak formulations for DG schemes contain additional penalty terms for jump across edges to control discontinuity. However, nonconforming elements usually do not need additional terms in their weak formulations because their integrals along edges are the same from both sides, i.e., there is some level of continuity.

    Dice Rule

    Since any function in the P1 nonconforming space is piecewise linear on each element, the function value at the midpoint of each edge is same as the mean value on the edge. Thus the continuity of the integral value across each edge is equivalent to the continuity of the midpoint value of each edge in this case.

    Thus for the P1 nonconforming element, the function values at midpoints on edges of a cell are important. The first attempt to define (local) degrees of freedom (DoFs) on a quadrilateral is by using midpoint values of a function.

    However, these 4 functionals are not linearly independent because a linear function on 2d is uniquely determined by only 3 independent values. A simple observation reads that any linear function on a quadrilateral should satisfy the 'dice rule': the sum of two function values at the midpoints of the edge pair on opposite sides of a cell is equal to the sum of those at the midpoints of the other edge pair. This is called the 'dice rule' because the number of points on opposite sides of a dice always adds up to the same number as well (in the case of dice, to seven).

    -

    In formulas, the dice rule is written as $\phi(m_0) + \phi(m_1) = \phi(m_2) +
-  \phi(m_3)$ for all $\phi$ in the function space where $m_j$ is the midpoint of the edge $e_j$. Here, we assume the standard numbering convention for edges used in deal.II and described in class GeometryInfo.

    +

    In formulas, the dice rule is written as $\phi(m_0) + \phi(m_1) = \phi(m_2) +
+  \phi(m_3)$ for all $\phi$ in the function space where $m_j$ is the midpoint of the edge $e_j$. Here, we assume the standard numbering convention for edges used in deal.II and described in class GeometryInfo.

    Conversely if 4 values at midpoints satisfying the dice rule are given, then there always exists the unique linear function which coincides with 4 midpoints values.

    Due to the dice rule, three values at any three midpoints can determine the last value at the last midpoint. It means that the number of independent local functionals on a cell is 3, and this is also the dimension of the linear polynomial space on a cell in 2d.

    Shape functions

    @@ -495,11 +495,11 @@ * | | * | | * 0---------|---------1 -*

    For each vertex $v_j$ of given cell, there are two edges of which $v_j$ is one of end points. Consider a linear function such that it has value 0.5 at the midpoints of two adjacent edges, and 0.0 at the two midpoints of the other edges. Note that the set of these values satisfies the dice rule which is described above. We denote such a function associated with vertex $v_j$ by $\phi_j$. Then the set of 4 shape functions is a partition of unity on a cell: $\sum_{j=0}^{3} \phi_j = 1$. (This is easy to see: at each edge midpoint, the sum of the four function adds up to one because two functions have value 0.5 and the other value 0.0. Because the function is globally linear, the only function that can have value 1 at four points must also be globally equal to one.)

    -

    The following figures represent $\phi_j$ for $j=0,\cdots,3$ with their midpoint values:

    +*

    For each vertex $v_j$ of given cell, there are two edges of which $v_j$ is one of end points. Consider a linear function such that it has value 0.5 at the midpoints of two adjacent edges, and 0.0 at the two midpoints of the other edges. Note that the set of these values satisfies the dice rule which is described above. We denote such a function associated with vertex $v_j$ by $\phi_j$. Then the set of 4 shape functions is a partition of unity on a cell: $\sum_{j=0}^{3} \phi_j = 1$. (This is easy to see: at each edge midpoint, the sum of the four function adds up to one because two functions have value 0.5 and the other value 0.0. Because the function is globally linear, the only function that can have value 1 at four points must also be globally equal to one.)

    +

    The following figures represent $\phi_j$ for $j=0,\cdots,3$ with their midpoint values:

    • -

      shape function $\phi_0$:

      *  +--------0.0--------+
      +

      shape function $\phi_0$:

      *  +--------0.0--------+
       *  |                   |
       *  |                   |
       *  |                   |
      @@ -513,7 +513,7 @@
       *  

    • -

      shape function $\phi_1$:

      *  +--------0.0--------+
      +

      shape function $\phi_1$:

      *  +--------0.0--------+
       *  |                   |
       *  |                   |
       *  |                   |
      @@ -527,7 +527,7 @@
       *  

    • -

      shape function $\phi_2$:

      *  +--------0.5--------+
      +

      shape function $\phi_2$:

      *  +--------0.5--------+
       *  |                   |
       *  |                   |
       *  |                   |
      @@ -541,7 +541,7 @@
       *  

    • -

      shape function $\phi_3$:

      *  +--------0.5--------+
      +

      shape function $\phi_3$:

      *  +--------0.5--------+
       *  |                   |
       *  |                   |
       *  |                   |
      @@ -871,8 +871,8 @@
         
       
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.
    -

    Return the coefficients of 4 local linear shape functions $\phi_j(x,y) = a
-x + b y + c$ on given cell. For each local shape function, the array consists of three coefficients is in order of a,b and c.

    +

    Return the coefficients of 4 local linear shape functions $\phi_j(x,y) = a
+x + b y + c$ on given cell. For each local shape function, the array consists of three coefficients is in order of a,b and c.

    Definition at line 89 of file fe_p1nc.cc.

    @@ -2944,7 +2944,7 @@

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3046,7 +3046,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3330,9 +3330,9 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    @@ -3365,11 +3365,11 @@
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Poly.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Poly.html 2024-01-30 03:04:35.212737782 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Poly.html 2024-01-30 03:04:35.212737782 +0000 @@ -1400,17 +1400,17 @@

    Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

    Before the correction, the Hessians would be given by

    -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1140.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1142.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    @@ -1445,21 +1445,21 @@

    Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

    Before the correction, the third derivatives would be given by

    -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1144.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1145.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    @@ -3091,7 +3091,7 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3199,7 +3199,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3509,9 +3509,9 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    @@ -3546,11 +3546,11 @@
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PolyFace.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PolyFace.html 2024-01-30 03:04:35.356738982 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PolyFace.html 2024-01-30 03:04:35.356738982 +0000 @@ -2965,7 +2965,7 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3073,7 +3073,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3383,9 +3383,9 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    @@ -3420,11 +3420,11 @@
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PolyTensor.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PolyTensor.html 2024-01-30 03:04:35.504740215 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PolyTensor.html 2024-01-30 03:04:35.504740215 +0000 @@ -493,12 +493,12 @@

    Similarly, in many cases, node functionals depend on the shape of the mesh cell, since they evaluate normal or tangential components on the faces. In order to allow for a set of transformations, the variable mapping_kind has been introduced. It needs be set in the constructor of a derived class.

    Any derived class must decide on the polynomial space to use. This polynomial space should be implemented simply as a set of vector valued polynomials like PolynomialsBDM and PolynomialsRaviartThomas. In order to facilitate this implementation, which basis the polynomial space chooses is not of importance to the current class – as described next, this class handles the transformation from the basis chosen by the polynomial space template argument to the basis we want to use for finite element computations internally.

    Determining the correct basis

    -

    In most cases, the basis used by the class that describes the polynomial space, $\{\tilde\varphi_j(\hat{\mathbf x})\}$, does not match the one we want to use for the finite element description, $\{\varphi_j(\hat{\mathbf x})\}$. Rather, we need to express the finite element shape functions as a linear combination of the basis provided by the polynomial space:

    -\begin{align*}
+<p>In most cases, the basis used by the class that describes the polynomial space, <picture><source srcset=$\{\tilde\varphi_j(\hat{\mathbf x})\}$, does not match the one we want to use for the finite element description, $\{\varphi_j(\hat{\mathbf x})\}$. Rather, we need to express the finite element shape functions as a linear combination of the basis provided by the polynomial space:

    +\begin{align*}
   \varphi_j = \sum_k c_{jk} \tilde\varphi_j.
-\end{align*} +\end{align*}" src="form_1149.png"/>

    -

    These expansion coefficients $c_{jk}$ are typically computed in the constructors of derived classes. To facilitate this, this class at first (unless told otherwise, see below), assumes that the shape functions should be exactly the ones provided by the polynomial space. In the constructor of the derived class, one then typically has code of the form

    // Now compute the inverse node matrix, generating the correct
    +

    These expansion coefficients $c_{jk}$ are typically computed in the constructors of derived classes. To facilitate this, this class at first (unless told otherwise, see below), assumes that the shape functions should be exactly the ones provided by the polynomial space. In the constructor of the derived class, one then typically has code of the form

    // Now compute the inverse node matrix, generating the correct
    // basis functions from the raw ones. For a discussion of what
    // exactly happens here, see FETools::compute_node_matrix.
    @@ -511,7 +511,7 @@
    void invert(const FullMatrix< number2 > &M)
    FullMatrix< double > compute_node_matrix(const FiniteElement< dim, spacedim > &fe)
    -

    The FETools::compute_node_matrix() function explains in more detail what exactly it computes, and how; in any case, the result is that inverse_node_matrix now contains the expansion coefficients $c_{jk}$, and the fact that this block of code now sets the matrix to a non-zero size indicates to the functions of the current class that it should from then on use the expanded basis, $\{\varphi_j(\hat{\mathbf x})\}$, and no longer the original, "raw" basis $\{\tilde\varphi_j(\hat{\mathbf x})\}$ when asked for values or derivatives of shape functions.

    +

    The FETools::compute_node_matrix() function explains in more detail what exactly it computes, and how; in any case, the result is that inverse_node_matrix now contains the expansion coefficients $c_{jk}$, and the fact that this block of code now sets the matrix to a non-zero size indicates to the functions of the current class that it should from then on use the expanded basis, $\{\varphi_j(\hat{\mathbf x})\}$, and no longer the original, "raw" basis $\{\tilde\varphi_j(\hat{\mathbf x})\}$ when asked for values or derivatives of shape functions.

    In order for this scheme to work, it is important to ensure that the size of the inverse_node_matrix be zero at the time when FETools::compute_node_matrix() is called; thus, the call to this function cannot be inlined into the last line – the result of the call really does need to be stored in the temporary object M.

    Setting the transformation

    In most cases, vector valued basis functions must be transformed when mapped from the reference cell to the actual grid cell. These transformations can be selected from the set MappingKind and stored in mapping_kind. Therefore, each constructor should contain a line like:

    @@ -3002,7 +3002,7 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3110,7 +3110,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3420,9 +3420,9 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    @@ -3457,11 +3457,11 @@
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PyramidDGP.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PyramidDGP.html 2024-01-30 03:04:35.644741381 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PyramidDGP.html 2024-01-30 03:04:35.648741414 +0000 @@ -700,11 +700,11 @@

    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.
    @@ -1773,17 +1773,17 @@

    Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

    Before the correction, the Hessians would be given by

    -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1140.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1142.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    @@ -1818,21 +1818,21 @@

    Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

    Before the correction, the third derivatives would be given by

    -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1144.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1145.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    @@ -3402,7 +3402,7 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3510,7 +3510,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3820,9 +3820,9 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PyramidP.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PyramidP.html 2024-01-30 03:04:35.784742547 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PyramidP.html 2024-01-30 03:04:35.788742581 +0000 @@ -837,11 +837,11 @@

    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).
    @@ -1910,17 +1910,17 @@

    Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

    Before the correction, the Hessians would be given by

    -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1140.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1142.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    @@ -1955,21 +1955,21 @@

    Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

    Before the correction, the third derivatives would be given by

    -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1144.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1145.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    @@ -3418,7 +3418,7 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3526,7 +3526,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3836,9 +3836,9 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PyramidPoly.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PyramidPoly.html 2024-01-30 03:04:35.928743747 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PyramidPoly.html 2024-01-30 03:04:35.932743780 +0000 @@ -642,11 +642,11 @@

    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).
    @@ -1715,17 +1715,17 @@

    Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

    Before the correction, the Hessians would be given by

    -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1140.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1142.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    @@ -1760,21 +1760,21 @@

    Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

    Before the correction, the third derivatives would be given by

    -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1144.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1145.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    @@ -3406,7 +3406,7 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3514,7 +3514,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3824,9 +3824,9 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q.html 2024-01-30 03:04:36.080745014 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q.html 2024-01-30 03:04:36.084745047 +0000 @@ -484,7 +484,7 @@

    The constructor creates a TensorProductPolynomials object that includes the tensor product of LagrangeEquidistant polynomials of degree p. This TensorProductPolynomials object provides all values and derivatives of the shape functions. In case a quadrature rule is given, the constructor creates a TensorProductPolynomials object that includes the tensor product of Lagrange polynomials with the support points from points.

    Furthermore the constructor fills the interface_constraints, the prolongation (embedding) and the restriction matrices. These are implemented only up to a certain degree and may not be available for very high polynomial degree.

    Unit support point distribution and conditioning of interpolation

    -

    When constructing an FE_Q element at polynomial degrees one or two, equidistant support points at 0 and 1 (linear case) or 0, 0.5, and 1 (quadratic case) are used. The unit support or nodal points xi are those points where the jth Lagrange polynomial satisfies the $\delta_{ij}$ property, i.e., where one polynomial is one and all the others are zero. For higher polynomial degrees, the support points are non-equidistant by default, and chosen to be the support points of the (degree+1)-order Gauss-Lobatto quadrature rule. This point distribution yields well-conditioned Lagrange interpolation at arbitrary polynomial degrees. By contrast, polynomials based on equidistant points get increasingly ill-conditioned as the polynomial degree increases. In interpolation, this effect is known as the Runge phenomenon. For Galerkin methods, the Runge phenomenon is typically not visible in the solution quality but rather in the condition number of the associated system matrices. For example, the elemental mass matrix of equidistant points at degree 10 has condition number 2.6e6, whereas the condition number for Gauss-Lobatto points is around 400.

    +

    When constructing an FE_Q element at polynomial degrees one or two, equidistant support points at 0 and 1 (linear case) or 0, 0.5, and 1 (quadratic case) are used. The unit support or nodal points xi are those points where the jth Lagrange polynomial satisfies the $\delta_{ij}$ property, i.e., where one polynomial is one and all the others are zero. For higher polynomial degrees, the support points are non-equidistant by default, and chosen to be the support points of the (degree+1)-order Gauss-Lobatto quadrature rule. This point distribution yields well-conditioned Lagrange interpolation at arbitrary polynomial degrees. By contrast, polynomials based on equidistant points get increasingly ill-conditioned as the polynomial degree increases. In interpolation, this effect is known as the Runge phenomenon. For Galerkin methods, the Runge phenomenon is typically not visible in the solution quality but rather in the condition number of the associated system matrices. For example, the elemental mass matrix of equidistant points at degree 10 has condition number 2.6e6, whereas the condition number for Gauss-Lobatto points is around 400.

    The Gauss-Lobatto points in 1d include the end points 0 and +1 of the unit interval. The interior points are shifted towards the end points, which gives a denser point distribution close to the element boundary.

    If combined with Gauss-Lobatto quadrature, FE_Q based on the default support points gives diagonal mass matrices. This case is demonstrated in step-48. However, this element can be combined with arbitrary quadrature rules through the usual FEValues approach, including full Gauss quadrature. In the general case, the mass matrix is non-diagonal.

    Numbering of the degrees of freedom (DoFs)

    @@ -670,9 +670,9 @@ - @@ -685,9 +685,9 @@ - @@ -700,9 +700,9 @@ - @@ -715,9 +715,9 @@ - @@ -727,7 +727,7 @@

    -
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).

    $Q_2$ element, shape function 0

    +

    $Q_2$ element, shape function 0

    -

    $Q_2$ element, shape function 1

    +

    $Q_2$ element, shape function 1

    $Q_2$ element, shape function 2

    +

    $Q_2$ element, shape function 2

    -

    $Q_2$ element, shape function 3

    +

    $Q_2$ element, shape function 3

    $Q_2$ element, shape function 4

    +

    $Q_2$ element, shape function 4

    -

    $Q_2$ element, shape function 5

    +

    $Q_2$ element, shape function 5

    $Q_2$ element, shape function 6

    +

    $Q_2$ element, shape function 6

    -

    $Q_2$ element, shape function 7

    +

    $Q_2$ element, shape function 7

    $Q_2$ element, shape function 8

    +

    $Q_2$ element, shape function 8

    @@ -896,9 +896,9 @@
    -

    $Q_4$ element, shape function 0

    +

    $Q_4$ element, shape function 0

    -

    $Q_4$ element, shape function 1

    +

    $Q_4$ element, shape function 1

    @@ -911,9 +911,9 @@
    -

    $Q_4$ element, shape function 2

    +

    $Q_4$ element, shape function 2

    -

    $Q_4$ element, shape function 3

    +

    $Q_4$ element, shape function 3

    @@ -926,9 +926,9 @@ -

    $Q_4$ element, shape function 4

    +

    $Q_4$ element, shape function 4

    -

    $Q_4$ element, shape function 5

    +

    $Q_4$ element, shape function 5

    @@ -941,9 +941,9 @@ -

    $Q_4$ element, shape function 6

    +

    $Q_4$ element, shape function 6

    -

    $Q_4$ element, shape function 7

    +

    $Q_4$ element, shape function 7

    @@ -956,9 +956,9 @@ -

    $Q_4$ element, shape function 8

    +

    $Q_4$ element, shape function 8

    -

    $Q_4$ element, shape function 9

    +

    $Q_4$ element, shape function 9

    @@ -971,9 +971,9 @@ -

    $Q_4$ element, shape function 10

    +

    $Q_4$ element, shape function 10

    -

    $Q_4$ element, shape function 11

    +

    $Q_4$ element, shape function 11

    @@ -986,9 +986,9 @@ -

    $Q_4$ element, shape function 12

    +

    $Q_4$ element, shape function 12

    -

    $Q_4$ element, shape function 13

    +

    $Q_4$ element, shape function 13

    @@ -1001,9 +1001,9 @@ -

    $Q_4$ element, shape function 14

    +

    $Q_4$ element, shape function 14

    -

    $Q_4$ element, shape function 15

    +

    $Q_4$ element, shape function 15

    @@ -1016,9 +1016,9 @@ -

    $Q_4$ element, shape function 16

    +

    $Q_4$ element, shape function 16

    -

    $Q_4$ element, shape function 17

    +

    $Q_4$ element, shape function 17

    @@ -1031,9 +1031,9 @@ -

    $Q_4$ element, shape function 18

    +

    $Q_4$ element, shape function 18

    -

    $Q_4$ element, shape function 19

    +

    $Q_4$ element, shape function 19

    @@ -1046,9 +1046,9 @@ -

    $Q_4$ element, shape function 20

    +

    $Q_4$ element, shape function 20

    -

    $Q_4$ element, shape function 21

    +

    $Q_4$ element, shape function 21

    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__Base.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__Base.html 2024-01-30 03:04:36.228746247 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__Base.html 2024-01-30 03:04:36.228746247 +0000 @@ -2287,17 +2287,17 @@

    Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

    Before the correction, the Hessians would be given by

    -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1140.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1142.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    @@ -2332,21 +2332,21 @@

    Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

    Before the correction, the third derivatives would be given by

    -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1144.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1145.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    @@ -3587,7 +3587,7 @@

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3695,7 +3695,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3974,9 +3974,9 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    @@ -4011,11 +4011,11 @@
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__Bubbles.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__Bubbles.html 2024-01-30 03:04:36.372747446 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__Bubbles.html 2024-01-30 03:04:36.372747446 +0000 @@ -485,17 +485,17 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Detailed Description

    template<int dim, int spacedim = dim>
    -class FE_Q_Bubbles< dim, spacedim >

    Implementation of a scalar Lagrange finite element $Q_p^+$ that yields the finite element space of continuous, piecewise polynomials of degree p in each coordinate direction plus some (non-normalized) bubble enrichment space spanned by the additional shape function $\varphi_j(\mathbf x)
+class FE_Q_Bubbles< dim, spacedim ></div><p>Implementation of a scalar Lagrange finite element <picture><source srcset=$Q_p^+$ that yields the finite element space of continuous, piecewise polynomials of degree p in each coordinate direction plus some (non-normalized) bubble enrichment space spanned by the additional shape function $\varphi_j(\mathbf x)
 = 2^{p-1}\left(x_j-\frac 12\right)^{p-1}
-\left[\prod_{i=0}^{dim-1}(x_i(1-x_i))\right]$. for $j=0,\ldots,dim-1$. If $p$ is one, then the first factor disappears and one receives the usual bubble function centered at the mid-point of the cell. Because these last shape functions have polynomial degree is $p+1$, the overall polynomial degree of the shape functions in the space described by this class is $p+1$.

    +\left[\prod_{i=0}^{dim-1}(x_i(1-x_i))\right]$" src="form_1157.png"/>. for $j=0,\ldots,dim-1$. If $p$ is one, then the first factor disappears and one receives the usual bubble function centered at the mid-point of the cell. Because these last shape functions have polynomial degree is $p+1$, the overall polynomial degree of the shape functions in the space described by this class is $p+1$.

    This class is realized using tensor product polynomials based on equidistant or given support points, in the same way as one can provide support points to the FE_Q class's constructors.

    For more information about the spacedim template parameter check the documentation of the FiniteElement class, or the one of Triangulation.

    -

    Due to the fact that the enrichments are small almost everywhere for large $p$, the condition number for the mass and stiffness matrix quickly increaseses with increasing $p$. Below you see a comparison with FE_Q(QGaussLobatto(p+1)) for dim=1.

    +

    Due to the fact that the enrichments are small almost everywhere for large $p$, the condition number for the mass and stiffness matrix quickly increaseses with increasing $p$. Below you see a comparison with FE_Q(QGaussLobatto(p+1)) for dim=1.

    -

    Therefore, this element should be used with care for $p>3$.

    +

    Therefore, this element should be used with care for $p>3$.

    Implementation

    The constructor creates a TensorProductPolynomials object that includes the tensor product of LagrangeEquidistant polynomials of degree p plus the bubble enrichments. This TensorProductPolynomialsBubbles object provides all values and derivatives of the shape functions. In case a quadrature rule is given, the constructor creates a TensorProductPolynomialsBubbles object that includes the tensor product of Lagrange polynomials with the support points from points and the bubble enrichments as defined above.

    Furthermore the constructor fills the interface_constrains, the prolongation (embedding) and the restriction matrices.

    @@ -714,11 +714,11 @@
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    @@ -2497,17 +2497,17 @@

    Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

    Before the correction, the Hessians would be given by

    -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1140.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1142.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    @@ -2542,21 +2542,21 @@

    Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

    Before the correction, the third derivatives would be given by

    -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1144.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1145.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    @@ -3704,7 +3704,7 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3812,7 +3812,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -4091,9 +4091,9 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__DG0.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__DG0.html 2024-01-30 03:04:36.516748646 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__DG0.html 2024-01-30 03:04:36.516748646 +0000 @@ -885,11 +885,11 @@
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    @@ -2666,17 +2666,17 @@

    Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

    Before the correction, the Hessians would be given by

    -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1140.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1142.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    @@ -2711,21 +2711,21 @@

    Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

    Before the correction, the third derivatives would be given by

    -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1144.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1145.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    @@ -3873,7 +3873,7 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3981,7 +3981,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -4260,9 +4260,9 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    -

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__Hierarchical.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__Hierarchical.html 2024-01-30 03:04:36.676749979 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__Hierarchical.html 2024-01-30 03:04:36.680750013 +0000 @@ -520,7 +520,7 @@

    Numbering of the degrees of freedom (DoFs)

    The original ordering of the shape functions represented by the TensorProductPolynomials is a tensor product numbering. However, the shape functions on a cell are renumbered beginning with the shape functions whose support points are at the vertices, then on the line, on the quads, and finally (for 3d) on the hexes. To be explicit, these numberings are listed in the following:

    Q1 elements

    -

    The $Q_1^H$ element is of polynomial degree one and, consequently, is exactly the same as the $Q_1$ element in class FE_Q. In particular, the shape function are defined in the exact same way:

    +

    The $Q_1^H$ element is of polynomial degree one and, consequently, is exactly the same as the $Q_1$ element in class FE_Q. In particular, the shape function are defined in the exact same way:

    • 1d case:

      *      0-------1
      @@ -576,9 +576,9 @@
       
          
       
    - @@ -591,9 +591,9 @@ - +
    [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).

    $Q_1^H$ element, shape function 0

    +

    $Q_1^H$ element, shape function 0

    -

    $Q_1^H$ element, shape function 1

    +

    $Q_1^H$ element, shape function 1

    $Q_1^H$ element, shape function 2

    +

    $Q_1^H$ element, shape function 2

    -
    $Q_1^H$ element, shape function 3
    $Q_1^H$ element, shape function 3

    Q2 elements

      @@ -701,9 +701,9 @@
    -

    $Q_2^H$ element, shape function 0

    +

    $Q_2^H$ element, shape function 0

    -

    $Q_2^H$ element, shape function 1

    +

    $Q_2^H$ element, shape function 1

    @@ -716,9 +716,9 @@
    -

    $Q_2^H$ element, shape function 2

    +

    $Q_2^H$ element, shape function 2

    -

    $Q_2^H$ element, shape function 3

    +

    $Q_2^H$ element, shape function 3

    @@ -731,9 +731,9 @@ -

    $Q_2^H$ element, shape function 4

    +

    $Q_2^H$ element, shape function 4

    -

    $Q_2^H$ element, shape function 5

    +

    $Q_2^H$ element, shape function 5

    @@ -746,9 +746,9 @@ -

    $Q_2^H$ element, shape function 6

    +

    $Q_2^H$ element, shape function 6

    -

    $Q_2^H$ element, shape function 7

    +

    $Q_2^H$ element, shape function 7

    @@ -758,7 +758,7 @@

    -

    $Q_2^H$ element, shape function 8

    +

    $Q_2^H$ element, shape function 8

    @@ -789,9 +789,9 @@ -

    $Q_3^H$ element, shape function 0

    +

    $Q_3^H$ element, shape function 0

    -

    $Q_3^H$ element, shape function 1

    +

    $Q_3^H$ element, shape function 1

    @@ -804,9 +804,9 @@ -

    $Q_3^H$ element, shape function 2

    +

    $Q_3^H$ element, shape function 2

    -

    $Q_3^H$ element, shape function 3

    +

    $Q_3^H$ element, shape function 3

    @@ -819,9 +819,9 @@ -

    $Q_3^H$ element, shape function 4

    +

    $Q_3^H$ element, shape function 4

    -

    $Q_3^H$ element, shape function 5

    +

    $Q_3^H$ element, shape function 5

    @@ -834,9 +834,9 @@ -

    $Q_3^H$ element, shape function 6

    +

    $Q_3^H$ element, shape function 6

    -

    $Q_3^H$ element, shape function 7

    +

    $Q_3^H$ element, shape function 7

    @@ -849,9 +849,9 @@ -

    $Q_3^H$ element, shape function 8

    +

    $Q_3^H$ element, shape function 8

    -

    $Q_3^H$ element, shape function 9

    +

    $Q_3^H$ element, shape function 9

    @@ -864,9 +864,9 @@ -

    $Q_3^H$ element, shape function 10

    +

    $Q_3^H$ element, shape function 10

    -

    $Q_3^H$ element, shape function 11

    +

    $Q_3^H$ element, shape function 11

    @@ -879,9 +879,9 @@ -

    $Q_3^H$ element, shape function 12

    +

    $Q_3^H$ element, shape function 12

    -

    $Q_3^H$ element, shape function 13

    +

    $Q_3^H$ element, shape function 13

    @@ -894,9 +894,9 @@ -

    $Q_3^H$ element, shape function 14

    +

    $Q_3^H$ element, shape function 14

    -$Q_3^H$ element, shape function 15 +$Q_3^H$ element, shape function 15

    Q4 elements

      @@ -927,9 +927,9 @@ -

      $Q_4^H$ element, shape function 0

      +

      $Q_4^H$ element, shape function 0

      -

      $Q_4^H$ element, shape function 1

      +

      $Q_4^H$ element, shape function 1

      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__iso__Q1.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__iso__Q1.html 2024-01-30 03:04:36.820751179 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__iso__Q1.html 2024-01-30 03:04:36.824751212 +0000 @@ -487,7 +487,7 @@

    • -

      Stokes/Navier Stokes systems such as the one discussed in step-22 could be solved with Q2-iso-Q1 elements for velocities instead of $Q_2$ elements. Combined with $Q_1$ pressures they give a stable mixed element pair. However, they perform worse than the standard (Taylor-Hood $Q_2\times Q_1$) approach in most situations. (See, for example, [Boffi2011] .) This combination of subdivided elements for the velocity and non-subdivided elements for the pressure is sometimes called the "Bercovier-Pironneau +

      Stokes/Navier Stokes systems such as the one discussed in step-22 could be solved with Q2-iso-Q1 elements for velocities instead of $Q_2$ elements. Combined with $Q_1$ pressures they give a stable mixed element pair. However, they perform worse than the standard (Taylor-Hood $Q_2\times Q_1$) approach in most situations. (See, for example, [Boffi2011] .) This combination of subdivided elements for the velocity and non-subdivided elements for the pressure is sometimes called the "Bercovier-Pironneau element" and dates back to around the same time as the Taylor-Hood element (namely, the mid-1970s). For more information, see the paper by Bercovier and Pironneau from 1979 [Bercovier1979], and for the origins of the comparable Taylor-Hood element see [Taylor73] from 1973.

    • @@ -2444,17 +2444,17 @@

      Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

      Before the correction, the Hessians would be given by

      -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1140.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1142.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      @@ -2489,21 +2489,21 @@

      Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

      Before the correction, the third derivatives would be given by

      -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1144.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1145.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      @@ -3651,7 +3651,7 @@

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3759,7 +3759,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -4038,9 +4038,9 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      -

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__RT__Bubbles.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__RT__Bubbles.html 2024-01-30 03:04:36.972752445 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__RT__Bubbles.html 2024-01-30 03:04:36.972752445 +0000 @@ -493,12 +493,12 @@
      template<int dim>
      class FE_RT_Bubbles< dim >

      This class implements a curl-enhanced Raviart-Thomas elements, conforming with Hdiv space. The node functionals are defined as point values in Gauss-Lobatto points. These elements generate vector fields with normal components continuous between mesh cells. The purpose of this finite element is in localizing the interactions between degrees of freedom around the nodes when an appropriate quadrature rule is used, leading to a block-diagonal mass matrix (even with full-tensor coefficient).

      The elements are defined through enrichment of classical Raviart-Thomas elements with extra curls, so that the Hdiv conformity is preserved, and the total number of degrees of freedom of FE_RT_Bubbles of order k is equal to the number of DoFs in dim copies of FE_Q of order k.

      -
      Note
      Unlike Raviart-Thomas, the lowest possible order for this enhanced finite element is 1, i.e. $k \ge 1$.
      -

      The matching pressure space for FE_RT_Bubbles of order k is FE_DGQ of order k-1. With the exact integration, this pair yields $(k+1)$-st order of convergence in $L_2$-norm for a vector variable and $k$-th order in $L_2$-norm for a scalar one (same as $BDM_k \times P_{k-1}$).

      +
      Note
      Unlike Raviart-Thomas, the lowest possible order for this enhanced finite element is 1, i.e. $k \ge 1$.
      +

      The matching pressure space for FE_RT_Bubbles of order k is FE_DGQ of order k-1. With the exact integration, this pair yields $(k+1)$-st order of convergence in $L_2$-norm for a vector variable and $k$-th order in $L_2$-norm for a scalar one (same as $BDM_k \times P_{k-1}$).

      For this enhanced Raviart-Thomas element, the node values are not cell and face moments with respect to certain polynomials, but the values in Gauss-Lobatto quadrature points. The nodal values on edges (faces in 3d) are evaluated first, according to the natural ordering of the edges (faces) of a cell. The interior degrees of freedom are evaluated last.

      For an RT-Bubbles element of degree k, we choose (k+1)dim-1 Gauss-Lobatto points on each face. These points are ordered lexicographically with respect to the orientation of the face. In the interior of the cells, the values are computed using an anisotropic Gauss-Lobatto formula for integration. The mass matrix assembled with the use of this same quadrature rule, is block diagonal with blocks corresponding to quadrature points. See "Higher order multipoint flux mixed finite element methods on quadrilaterals and hexahedra" for more details.

      -

      The elements of degree $k=3$ in 2d and $k=2$ in 3d are shown in the figures below (filled arrows indicate DoFs for which continuity across the edges (faces in 3d) is required).

      +

      The elements of degree $k=3$ in 2d and $k=2$ in 3d are shown in the figures below (filled arrows indicate DoFs for which continuity across the edges (faces in 3d) is required).

      [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).
      @@ -733,11 +733,11 @@
      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      @@ -3442,7 +3442,7 @@
      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3550,7 +3550,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3860,9 +3860,9 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      -

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__RannacherTurek.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__RannacherTurek.html 2024-01-30 03:04:37.120753678 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__RannacherTurek.html 2024-01-30 03:04:37.120753678 +0000 @@ -480,7 +480,7 @@
      [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).

      Detailed Description

      template<int dim>
      -class FE_RannacherTurek< dim >

      Implementation of the Rannacher-Turek element. This element is used to generate a stable pair of function spaces for the Stokes equation without having to increase the polynomial degree of the velocity space as much as one would do for the stable Taylor-Hood element which uses the $Q_2^d\times Q_1$ pair for velocity and pressure. That said, like many other non-conforming elements, it can also be used for the discretization of the Laplace equation. The element was first described in R. Rannacher and S. Turek: "Simple non-conforming quadrilateral Stokes element", Numerical Methods for Partial Differential Equations, vol. 8, pp. 97-112, 1992.

      +class FE_RannacherTurek< dim >

      Implementation of the Rannacher-Turek element. This element is used to generate a stable pair of function spaces for the Stokes equation without having to increase the polynomial degree of the velocity space as much as one would do for the stable Taylor-Hood element which uses the $Q_2^d\times Q_1$ pair for velocity and pressure. That said, like many other non-conforming elements, it can also be used for the discretization of the Laplace equation. The element was first described in R. Rannacher and S. Turek: "Simple non-conforming quadrilateral Stokes element", Numerical Methods for Partial Differential Equations, vol. 8, pp. 97-112, 1992.

      The shape functions generated by this element are in general discontinuous, and consequently the element is not $H^1$ conforming (i.e., it is a "non-conforming" element). However, the shape functions are constructed in such a way that the jump along faces has mean value zero, and consequently there is some sort of conformity in the element: a conforming element would have a pointwise zero jump, a completely discontinuous element like the FE_DGQ elements can have entirely arbitrary values for the jump across a face, and the current element is somewhere in the middle because its jump is nonzero but at least has mean value zero.

      The element is currently implemented only in dimension 2, for the lowest polynomial order, and without hanging nodes and restriction/prolongation.

      Interpolation

      @@ -714,11 +714,11 @@
      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      @@ -1847,17 +1847,17 @@

      Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

      Before the correction, the Hessians would be given by

      -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1140.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1142.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      @@ -1892,21 +1892,21 @@

      Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

      Before the correction, the third derivatives would be given by

      -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1144.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1145.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      @@ -3476,7 +3476,7 @@
      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3584,7 +3584,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3894,9 +3894,9 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      -

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__RaviartThomas.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__RaviartThomas.html 2024-01-30 03:04:37.272754945 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__RaviartThomas.html 2024-01-30 03:04:37.288755078 +0000 @@ -503,11 +503,11 @@
      [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).

      Detailed Description

      template<int dim>
      -class FE_RaviartThomas< dim >

      Implementation of Raviart-Thomas (RT) elements. The Raviart-Thomas space is designed to solve problems in which the solution only lives in the space $H^\text{div}=\{ {\mathbf u} \in L_2: \text{div}\, {\mathbf u} \in L_2\}$, rather than in the more commonly used space $H^1=\{ u \in L_2: \nabla u \in L_2\}$. In other words, the solution must be a vector field whose divergence is square integrable, but for which the gradient may not be square integrable. The typical application for this space (and these elements) is to the mixed formulation of the Laplace equation and related situations, see for example step-20. The defining characteristic of functions in $H^\text{div}$ is that they are in general discontinuous – but that if you draw a line in 2d (or a surface in 3d), then the normal component of the vector field must be continuous across the line (or surface) even though the tangential component may not be. As a consequence, the Raviart-Thomas element is constructed in such a way that (i) it is vector-valued, (ii) the shape functions are discontinuous, but (iii) the normal component of the vector field represented by each shape function is continuous across the faces of cells.

      +class FE_RaviartThomas< dim >

      Implementation of Raviart-Thomas (RT) elements. The Raviart-Thomas space is designed to solve problems in which the solution only lives in the space $H^\text{div}=\{ {\mathbf u} \in L_2: \text{div}\, {\mathbf u} \in L_2\}$, rather than in the more commonly used space $H^1=\{ u \in L_2: \nabla u \in L_2\}$. In other words, the solution must be a vector field whose divergence is square integrable, but for which the gradient may not be square integrable. The typical application for this space (and these elements) is to the mixed formulation of the Laplace equation and related situations, see for example step-20. The defining characteristic of functions in $H^\text{div}$ is that they are in general discontinuous – but that if you draw a line in 2d (or a surface in 3d), then the normal component of the vector field must be continuous across the line (or surface) even though the tangential component may not be. As a consequence, the Raviart-Thomas element is constructed in such a way that (i) it is vector-valued, (ii) the shape functions are discontinuous, but (iii) the normal component of the vector field represented by each shape function is continuous across the faces of cells.

      Other properties of the Raviart-Thomas element are that (i) it is not a primitive element ; (ii) the shape functions are defined so that certain integrals over the faces are either zero or one, rather than the common case of certain point values being either zero or one. (There is, however, the FE_RaviartThomasNodal element that uses point values.)

      We follow the commonly used – though confusing – definition of the "degree" of RT elements. Specifically, the "degree" of the element denotes the polynomial degree of the largest complete polynomial subspace contained in the finite element space, even if the space may contain shape functions of higher polynomial degree. The lowest order element is consequently FE_RaviartThomas(0), i.e., the Raviart-Thomas element "of degree zero", even though the functions of this space are in general polynomials of degree one in each variable. This choice of "degree" implies that the approximation order of the function itself is degree+1, as with usual polynomial spaces. The numbering so chosen implies the sequence

      -\[
+<picture><source srcset=\[
   Q_{k+1}
   \stackrel{\text{grad}}{\rightarrow}
   \text{Nedelec}_k
@@ -515,7 +515,7 @@
   \text{RaviartThomas}_k
   \stackrel{\text{div}}{\rightarrow}
   DGQ_{k}
-\] +\]" src="form_1119.png"/>

      This class is not implemented for the codimension one case (spacedim != dim).

      Interpolation

      @@ -782,11 +782,11 @@
      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      @@ -985,7 +985,7 @@

      Fill the necessary tables defined in base classes such as adjust_quad_dof_index_for_face_orientation_table declared in fe.cc. We need to fill it with the correct values in case of non-standard, flipped (rotated by +180 degrees) or rotated (rotated by +90 degrees) faces. These are given in the form three flags (face_orientation, face_flip, face_rotation), see the documentation in GeometryInfo<dim> and this glossary entry on face orientation.

      Example: Raviart-Thomas Elements of order 2 (tensor polynomial degree 3)

      -

      The dofs on a face are connected to a $n\times n$ matrix where here n=3. In our example we can imagine the following dofs on a quad (face):

      +

      The dofs on a face are connected to a $n\times n$ matrix where here n=3. In our example we can imagine the following dofs on a quad (face):

      *  ___________
       * |           |
       * |  6  7  8  |
      @@ -3555,7 +3555,7 @@
       
      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3663,7 +3663,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3942,9 +3942,9 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      -

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__RaviartThomasNodal.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__RaviartThomasNodal.html 2024-01-30 03:04:37.436756311 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__RaviartThomasNodal.html 2024-01-30 03:04:37.436756311 +0000 @@ -488,8 +488,8 @@ class FE_RaviartThomasNodal< dim >

      The Raviart-Thomas elements with node functionals defined as point values in Gauss-Lobatto points.

      Description of node values

      For this Raviart-Thomas element, the node values are not cell and face moments with respect to certain polynomials, but the values at quadrature points. Following the general scheme for numbering degrees of freedom, the node values on faces (edges in 2d, quads in 3d) are first, face by face, according to the natural ordering of the faces of a cell. The interior degrees of freedom are last.

      -

      For an RT-element of degree k, we choose (k+1)d-1 Gauss-Lobatto points on each face, as defined by QGaussLobatto. For degree $k=0$, the midpoint is chosen. These points are ordered lexicographically with respect to the orientation of the face. This way, the normal component which is in Qk, is uniquely determined.

      -

      These face polynomials are extended into the interior by the means of a QGaussLobatto formula for the normal direction. In other words, the polynomials are the tensor product of Lagrange polynomials on the points of a QGaussLobatto formula with $(k+2)$ points in the normal direction with Lagrange polynomials on the points of a QGaussLobatto quadrature formula with $(k+1)$ points.

      +

      For an RT-element of degree k, we choose (k+1)d-1 Gauss-Lobatto points on each face, as defined by QGaussLobatto. For degree $k=0$, the midpoint is chosen. These points are ordered lexicographically with respect to the orientation of the face. This way, the normal component which is in Qk, is uniquely determined.

      +

      These face polynomials are extended into the interior by the means of a QGaussLobatto formula for the normal direction. In other words, the polynomials are the tensor product of Lagrange polynomials on the points of a QGaussLobatto formula with $(k+2)$ points in the normal direction with Lagrange polynomials on the points of a QGaussLobatto quadrature formula with $(k+1)$ points.

      Note
      The degree stored in the member variable FiniteElementData<dim>::degree is higher by one than the constructor argument!

      Definition at line 336 of file fe_raviart_thomas.h.

      @@ -787,11 +787,11 @@
      [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).
      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      @@ -3560,7 +3560,7 @@
      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3668,7 +3668,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3978,9 +3978,9 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      -

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__SimplexDGP.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__SimplexDGP.html 2024-01-30 03:04:37.576757477 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__SimplexDGP.html 2024-01-30 03:04:37.576757477 +0000 @@ -466,7 +466,7 @@
      [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).

      Detailed Description

      template<int dim, int spacedim = dim>
      -class FE_SimplexDGP< dim, spacedim >

      Implementation of a scalar discontinuous Lagrange finite element $P_k$, sometimes denoted as $P_{-k}$, that yields the finite element space of discontinuous, piecewise polynomials of degree $k$.

      +class FE_SimplexDGP< dim, spacedim >

      Implementation of a scalar discontinuous Lagrange finite element $P_k$, sometimes denoted as $P_{-k}$, that yields the finite element space of discontinuous, piecewise polynomials of degree $k$.

      Also see Simplex support.

      Definition at line 185 of file fe_simplex_p.h.

      @@ -1028,11 +1028,11 @@

      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      @@ -2101,17 +2101,17 @@

      Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

      Before the correction, the Hessians would be given by

      -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1140.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1142.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      @@ -2146,21 +2146,21 @@

      Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

      Before the correction, the third derivatives would be given by

      -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1144.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1145.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      @@ -3463,7 +3463,7 @@
      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3571,7 +3571,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3850,9 +3850,9 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      -

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__SimplexP.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__SimplexP.html 2024-01-30 03:04:37.712758611 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__SimplexP.html 2024-01-30 03:04:37.712758611 +0000 @@ -466,7 +466,7 @@
      [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).

      Detailed Description

      template<int dim, int spacedim = dim>
      -class FE_SimplexP< dim, spacedim >

      Implementation of a scalar Lagrange finite element $P_k$ that yields the finite element space of continuous, piecewise polynomials of degree $k$.

      +class FE_SimplexP< dim, spacedim >

      Implementation of a scalar Lagrange finite element $P_k$ that yields the finite element space of continuous, piecewise polynomials of degree $k$.

      Also see Simplex support.

      Definition at line 129 of file fe_simplex_p.h.

      @@ -1028,11 +1028,11 @@

      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      @@ -2101,17 +2101,17 @@

      Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

      Before the correction, the Hessians would be given by

      -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1140.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1142.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      @@ -2146,21 +2146,21 @@

      Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

      Before the correction, the third derivatives would be given by

      -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1144.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1145.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      @@ -3463,7 +3463,7 @@
      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3571,7 +3571,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3850,9 +3850,9 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      -

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__SimplexP__Bubbles.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__SimplexP__Bubbles.html 2024-01-30 03:04:37.852759777 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__SimplexP__Bubbles.html 2024-01-30 03:04:37.856759811 +0000 @@ -935,11 +935,11 @@

      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).
      @@ -2008,17 +2008,17 @@

      Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

      Before the correction, the Hessians would be given by

      -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1140.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1142.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      @@ -2053,21 +2053,21 @@

      Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

      Before the correction, the third derivatives would be given by

      -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1144.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1145.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      @@ -3461,7 +3461,7 @@
      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3569,7 +3569,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3848,9 +3848,9 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      -

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__SimplexPoly.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__SimplexPoly.html 2024-01-30 03:04:38.000761010 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__SimplexPoly.html 2024-01-30 03:04:38.004761044 +0000 @@ -875,11 +875,11 @@

      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).
      @@ -1948,17 +1948,17 @@

      Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

      Before the correction, the Hessians would be given by

      -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1140.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1142.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      @@ -1993,21 +1993,21 @@

      Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

      Before the correction, the third derivatives would be given by

      -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1144.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1145.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      @@ -3463,7 +3463,7 @@
      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3571,7 +3571,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3850,9 +3850,9 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      -

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__TraceQ.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__TraceQ.html 2024-01-30 03:04:38.140762177 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__TraceQ.html 2024-01-30 03:04:38.144762210 +0000 @@ -3388,7 +3388,7 @@
      [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3496,7 +3496,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3775,9 +3775,9 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      -

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__TraceQ_3_011_00_01spacedim_01_4.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__TraceQ_3_011_00_01spacedim_01_4.html 2024-01-30 03:04:38.280763343 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__TraceQ_3_011_00_01spacedim_01_4.html 2024-01-30 03:04:38.280763343 +0000 @@ -3441,7 +3441,7 @@
      [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3543,7 +3543,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3800,9 +3800,9 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      -

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      @@ -3835,11 +3835,11 @@
      [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).
      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__WedgeDGP.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__WedgeDGP.html 2024-01-30 03:04:38.420764509 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__WedgeDGP.html 2024-01-30 03:04:38.420764509 +0000 @@ -700,11 +700,11 @@

      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.
      @@ -1773,17 +1773,17 @@

      Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

      Before the correction, the Hessians would be given by

      -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1140.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1142.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      @@ -1818,21 +1818,21 @@

      Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

      Before the correction, the third derivatives would be given by

      -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1144.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1145.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      @@ -3402,7 +3402,7 @@
      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3510,7 +3510,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3820,9 +3820,9 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      -

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__WedgeP.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__WedgeP.html 2024-01-30 03:04:38.560765676 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__WedgeP.html 2024-01-30 03:04:38.564765709 +0000 @@ -837,11 +837,11 @@

      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).
      @@ -1910,17 +1910,17 @@

      Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

      Before the correction, the Hessians would be given by

      -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1140.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1142.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      @@ -1955,21 +1955,21 @@

      Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

      Before the correction, the third derivatives would be given by

      -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1144.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1145.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      @@ -3418,7 +3418,7 @@
      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3526,7 +3526,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3836,9 +3836,9 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      -

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__WedgePoly.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__WedgePoly.html 2024-01-30 03:04:38.708766909 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__WedgePoly.html 2024-01-30 03:04:38.708766909 +0000 @@ -642,11 +642,11 @@

      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).
      @@ -1715,17 +1715,17 @@

      Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

      Before the correction, the Hessians would be given by

      -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1140.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1142.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      @@ -1760,21 +1760,21 @@

      Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

      Before the correction, the third derivatives would be given by

      -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1144.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1145.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      @@ -3406,7 +3406,7 @@
      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3514,7 +3514,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3824,9 +3824,9 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      -

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFiniteElement.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFiniteElement.html 2024-01-30 03:04:38.856768142 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFiniteElement.html 2024-01-30 03:04:38.860768175 +0000 @@ -484,8 +484,8 @@
      template<int dim, int spacedim = dim>
      class FiniteElement< dim, spacedim >

      This is the base class for finite elements in arbitrary dimensions. It declares the interface both in terms of member variables and public member functions through which properties of a concrete implementation of a finite element can be accessed. This interface generally consists of a number of groups of variables and functions that can roughly be delineated as follows:

      • Basic information about the finite element, such as the number of degrees of freedom per vertex, edge, or cell. This kind of data is stored in the FiniteElementData base class. (Though the FiniteElement::get_name() member function also falls into this category.)
      • -
      • A description of the shape functions and their derivatives on the reference cell $[0,1]^d$, if an element is indeed defined by mapping shape functions from the reference cell to an actual cell.
      • -
      • Matrices (and functions that access them) that describe how an element's shape functions related to those on parent or child cells (restriction or prolongation) or neighboring cells (for hanging node constraints), as well as to other finite element spaces defined on the same cell (e.g., when doing $p$ refinement).
      • +
      • A description of the shape functions and their derivatives on the reference cell $[0,1]^d$, if an element is indeed defined by mapping shape functions from the reference cell to an actual cell.
      • +
      • Matrices (and functions that access them) that describe how an element's shape functions related to those on parent or child cells (restriction or prolongation) or neighboring cells (for hanging node constraints), as well as to other finite element spaces defined on the same cell (e.g., when doing $p$ refinement).
      • Functions that describe the properties of individual shape functions, for example which vector components of a vector-valued finite element's shape function is nonzero, or whether an element is primitive.
      • For elements that are interpolatory, such as the common $Q_p$ Lagrange elements, data that describes where their support points are located.
      • Functions that define the interface to the FEValues class that is almost always used to access finite element shape functions from user code.
      • @@ -569,8 +569,8 @@
      [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).
      21 1 0 8 1
      -

      What we see is the following: there are a total of 22 degrees-of-freedom on this element with components ranging from 0 to 2. Each DoF corresponds to one of the two base elements used to build FESystem : $\mathbb Q_2$ or $\mathbb
-  Q_1$. Since FE_Q are primitive elements, we have a total of 9 distinct scalar-valued shape functions for the quadratic element and 4 for the linear element. Finally, for DoFs corresponding to the first base element multiplicity is either zero or one, meaning that we use the same scalar valued $\mathbb Q_2$ for both $x$ and $y$ components of the velocity field $\mathbb Q_2 \otimes \mathbb Q_2$. For DoFs corresponding to the second base element multiplicity is zero.

      +

      What we see is the following: there are a total of 22 degrees-of-freedom on this element with components ranging from 0 to 2. Each DoF corresponds to one of the two base elements used to build FESystem : $\mathbb Q_2$ or $\mathbb
+  Q_1$. Since FE_Q are primitive elements, we have a total of 9 distinct scalar-valued shape functions for the quadratic element and 4 for the linear element. Finally, for DoFs corresponding to the first base element multiplicity is either zero or one, meaning that we use the same scalar valued $\mathbb Q_2$ for both $x$ and $y$ components of the velocity field $\mathbb Q_2 \otimes \mathbb Q_2$. For DoFs corresponding to the second base element multiplicity is zero.

      Support points

      Finite elements are frequently defined by defining a polynomial space and a set of dual functionals. If these functionals involve point evaluations, then the element is "interpolatory" and it is possible to interpolate an arbitrary (but sufficiently smooth) function onto the finite element space by evaluating it at these points. We call these points "support points".

      Most finite elements are defined by mapping from the reference cell to a concrete cell. Consequently, the support points are then defined on the reference ("unit") cell, see this glossary entry. The support points on a concrete cell can then be computed by mapping the unit support points, using the Mapping class interface and derived classes, typically via the FEValues class.

      @@ -596,14 +596,14 @@

      Interpolation matrices in one dimension

      In one space dimension (i.e., for dim==1 and any value of spacedim), finite element classes implementing the interface of the current base class need only set the restriction and prolongation matrices that describe the interpolation of the finite element space on one cell to that of its parent cell, and to that on its children, respectively. The constructor of the current class in one dimension presets the interface_constraints matrix (used to describe hanging node constraints at the interface between cells of different refinement levels) to have size zero because there are no hanging nodes in 1d.

      Interpolation matrices in two dimensions

      -

      In addition to the fields discussed above for 1d, a constraint matrix is needed to describe hanging node constraints if the finite element has degrees of freedom located on edges or vertices. These constraints are represented by an $m\times n$-matrix interface_constraints, where m is the number of degrees of freedom on the refined side without the corner vertices (those dofs on the middle vertex plus those on the two lines), and n is that of the unrefined side (those dofs on the two vertices plus those on the line). The matrix is thus a rectangular one. The $m\times n$ size of the interface_constraints matrix can also be accessed through the interface_constraints_size() function.

      -

      The mapping of the dofs onto the indices of the matrix on the unrefined side is as follows: let $d_v$ be the number of dofs on a vertex, $d_l$ that on a line, then $n=0...d_v-1$ refers to the dofs on vertex zero of the unrefined line, $n=d_v...2d_v-1$ to those on vertex one, $n=2d_v...2d_v+d_l-1$ to those on the line.

      -

      Similarly, $m=0...d_v-1$ refers to the dofs on the middle vertex of the refined side (vertex one of child line zero, vertex zero of child line one), $m=d_v...d_v+d_l-1$ refers to the dofs on child line zero, $m=d_v+d_l...d_v+2d_l-1$ refers to the dofs on child line one. Please note that we do not need to reserve space for the dofs on the end vertices of the refined lines, since these must be mapped one-to-one to the appropriate dofs of the vertices of the unrefined line.

      +

      In addition to the fields discussed above for 1d, a constraint matrix is needed to describe hanging node constraints if the finite element has degrees of freedom located on edges or vertices. These constraints are represented by an $m\times n$-matrix interface_constraints, where m is the number of degrees of freedom on the refined side without the corner vertices (those dofs on the middle vertex plus those on the two lines), and n is that of the unrefined side (those dofs on the two vertices plus those on the line). The matrix is thus a rectangular one. The $m\times n$ size of the interface_constraints matrix can also be accessed through the interface_constraints_size() function.

      +

      The mapping of the dofs onto the indices of the matrix on the unrefined side is as follows: let $d_v$ be the number of dofs on a vertex, $d_l$ that on a line, then $n=0...d_v-1$ refers to the dofs on vertex zero of the unrefined line, $n=d_v...2d_v-1$ to those on vertex one, $n=2d_v...2d_v+d_l-1$ to those on the line.

      +

      Similarly, $m=0...d_v-1$ refers to the dofs on the middle vertex of the refined side (vertex one of child line zero, vertex zero of child line one), $m=d_v...d_v+d_l-1$ refers to the dofs on child line zero, $m=d_v+d_l...d_v+2d_l-1$ refers to the dofs on child line one. Please note that we do not need to reserve space for the dofs on the end vertices of the refined lines, since these must be mapped one-to-one to the appropriate dofs of the vertices of the unrefined line.

      Through this construction, the degrees of freedom on the child faces are constrained to the degrees of freedom on the parent face. The information so provided is typically consumed by the DoFTools::make_hanging_node_constraints() function.

      Note
      The hanging node constraints described by these matrices are only relevant to the case where the same finite element space is used on neighboring (but differently refined) cells. The case that the finite element spaces on different sides of a face are different, i.e., the $hp$ case (see hp-finite element support) is handled by separate functions. See the FiniteElement::get_face_interpolation_matrix() and FiniteElement::get_subface_interpolation_matrix() functions.

      Interpolation matrices in three dimensions

      -

      For the interface constraints, the 3d case is similar to the 2d case. The numbering for the indices $n$ on the mother face is obvious and keeps to the usual numbering of degrees of freedom on quadrilaterals.

      -

      The numbering of the degrees of freedom on the interior of the refined faces for the index $m$ is as follows: let $d_v$ and $d_l$ be as above, and $d_q$ be the number of degrees of freedom per quadrilateral (and therefore per face), then $m=0...d_v-1$ denote the dofs on the vertex at the center, $m=d_v...5d_v-1$ for the dofs on the vertices at the center of the bounding lines of the quadrilateral, $m=5d_v..5d_v+4*d_l-1$ are for the degrees of freedom on the four lines connecting the center vertex to the outer boundary of the mother face, $m=5d_v+4*d_l...5d_v+4*d_l+8*d_l-1$ for the degrees of freedom on the small lines surrounding the quad, and $m=5d_v+12*d_l...5d_v+12*d_l+4*d_q-1$ for the dofs on the four child faces. Note the direction of the lines at the boundary of the quads, as shown below.

      +

      For the interface constraints, the 3d case is similar to the 2d case. The numbering for the indices $n$ on the mother face is obvious and keeps to the usual numbering of degrees of freedom on quadrilaterals.

      +

      The numbering of the degrees of freedom on the interior of the refined faces for the index $m$ is as follows: let $d_v$ and $d_l$ be as above, and $d_q$ be the number of degrees of freedom per quadrilateral (and therefore per face), then $m=0...d_v-1$ denote the dofs on the vertex at the center, $m=d_v...5d_v-1$ for the dofs on the vertices at the center of the bounding lines of the quadrilateral, $m=5d_v..5d_v+4*d_l-1$ are for the degrees of freedom on the four lines connecting the center vertex to the outer boundary of the mother face, $m=5d_v+4*d_l...5d_v+4*d_l+8*d_l-1$ for the degrees of freedom on the small lines surrounding the quad, and $m=5d_v+12*d_l...5d_v+12*d_l+4*d_q-1$ for the dofs on the four child faces. Note the direction of the lines at the boundary of the quads, as shown below.

      The order of the twelve lines and the four child faces can be extracted from the following sketch, where the overall order of the different dof groups is depicted:

      *    *--15--4--16--*
       *    |      |      |
       *    10 19  6  20  12
      @@ -644,7 +644,7 @@
       
    • Compute the basis vj of the finite element shape function space by applying M-1 to the basis wj.
    • -

      The matrix M may be computed with FETools::compute_node_matrix(). This function relies on the existence of generalized_support_points and FiniteElement::convert_generalized_support_point_values_to_dof_values() (see the glossary entry on generalized support points for more information). With this, one can then use the following piece of code in the constructor of a class derived from FiniteElement to compute the $M$ matrix:

      +

      The matrix M may be computed with FETools::compute_node_matrix(). This function relies on the existence of generalized_support_points and FiniteElement::convert_generalized_support_point_values_to_dof_values() (see the glossary entry on generalized support points for more information). With this, one can then use the following piece of code in the constructor of a class derived from FiniteElement to compute the $M$ matrix:

      this->inverse_node_matrix.reinit(this->n_dofs_per_cell(),
      this->n_dofs_per_cell()); this->inverse_node_matrix.invert(M);
      @@ -661,7 +661,7 @@
      std::vector< std::vector< FullMatrix< double > > > prolongation
      Definition fe.h:2416
      void compute_embedding_matrices(const FiniteElement< dim, spacedim > &fe, std::vector< std::vector< FullMatrix< number > > > &matrices, const bool isotropic_only=false, const double threshold=1.e-12)
      -

      As in this example, prolongation is almost always implemented via embedding, i.e., the nodal values of the function on the children may be different from the nodal values of the function on the parent cell, but as a function of $\mathbf x\in{\mathbb R}^\text{spacedim}$, the finite element field on the child is the same as on the parent.

      +

      As in this example, prolongation is almost always implemented via embedding, i.e., the nodal values of the function on the children may be different from the nodal values of the function on the parent cell, but as a function of $\mathbf x\in{\mathbb R}^\text{spacedim}$, the finite element field on the child is the same as on the parent.

      Computing restriction matrices

      The opposite operation, restricting a finite element function defined on the children to the parent cell is typically implemented by interpolating the finite element function on the children to the nodal values of the parent cell. In deal.II, the restriction operation is implemented as a loop over the children of a cell that each apply a matrix to the vector of unknowns on that child cell (these matrices are stored in restriction and are accessed by get_restriction_matrix()). The operation that then needs to be implemented turns out to be surprisingly difficult to describe, but is instructive to describe because it also defines the meaning of the restriction_is_additive_flags array (accessed via the restriction_is_additive() function).

      To give a concrete example, assume we use a $Q_1$ element in 1d, and that on each of the parent and child cells degrees of freedom are (locally and globally) numbered as follows:

      meshes: *-------* *---*---*
      @@ -669,42 +669,42 @@
      global DoF numbers: 0 1 0 1 2

      Then we want the restriction operation to take the value of the zeroth DoF on child 0 as the value of the zeroth DoF on the parent, and take the value of the first DoF on child 1 as the value of the first DoF on the parent. Ideally, we would like to write this follows

      -\[
+<picture><source srcset=\[
     U^\text{coarse}|_\text{parent}
     = \sum_{\text{child}=0}^1 R_\text{child} U^\text{fine}|_\text{child}
-  \] + \]" src="form_1015.png"/>

      -

      where $U^\text{fine}|_\text{child=0}=(U^\text{fine}_0,U^\text{fine}_1)^T$ and $U^\text{fine}|_\text{child=1}=(U^\text{fine}_1,U^\text{fine}_2)^T$. Writing the requested operation like this would here be possible by choosing

      -\[
+<p> where <picture><source srcset=$U^\text{fine}|_\text{child=0}=(U^\text{fine}_0,U^\text{fine}_1)^T$ and $U^\text{fine}|_\text{child=1}=(U^\text{fine}_1,U^\text{fine}_2)^T$. Writing the requested operation like this would here be possible by choosing

      +\[
     R_0 = \left(\begin{matrix}1 & 0 \\ 0 & 0\end{matrix}\right),
     \qquad\qquad
     R_1 = \left(\begin{matrix}0 & 0 \\ 0 & 1\end{matrix}\right).
-  \] + \]" src="form_1018.png"/>

      -

      However, this approach already fails if we go to a $Q_2$ element with the following degrees of freedom:

      meshes: *-------* *----*----*
      +

      However, this approach already fails if we go to a $Q_2$ element with the following degrees of freedom:

      meshes: *-------* *----*----*
      local DoF numbers: 0 2 1 0 2 1|0 2 1
      global DoF numbers: 0 2 1 0 2 1 4 3
      -

      Writing things as the sum over matrix operations as above would not easily work because we have to add nonzero values to $U^\text{coarse}_2$ twice, once for each child.

      +

      Writing things as the sum over matrix operations as above would not easily work because we have to add nonzero values to $U^\text{coarse}_2$ twice, once for each child.

      Consequently, restriction is typically implemented as a concatenation operation. I.e., we first compute the individual restrictions from each child,

      -\[
+<picture><source srcset=\[
     \tilde U^\text{coarse}_\text{child}
     = R_\text{child} U^\text{fine}|_\text{child},
-  \] + \]" src="form_1020.png"/>

      -

      and then compute the values of $U^\text{coarse}|_\text{parent}$ with the following code:

      for (unsigned int child=0; child<cell->n_children(); ++child)
      +

      and then compute the values of $U^\text{coarse}|_\text{parent}$ with the following code:

      for (unsigned int child=0; child<cell->n_children(); ++child)
      for (unsigned int i=0; i<dofs_per_cell; ++i)
      if (U_tilde_coarse[child][i] != 0)
      U_coarse_on_parent[i] = U_tilde_coarse[child][i];
      const unsigned int dofs_per_cell
      Definition fe_data.h:437
      -

      In other words, each nonzero element of $\tilde
-  U^\text{coarse}_\text{child}$ overwrites, rather than adds to the corresponding element of $U^\text{coarse}|_\text{parent}$. This typically also implies that the restriction matrices from two different cells should agree on a value for coarse degrees of freedom that they both want to touch (otherwise the result would depend on the order in which we loop over children, which would be unreasonable because the order of children is an otherwise arbitrary convention). For example, in the example above, the restriction matrices will be

      -\[
+</div><!-- fragment --><p> In other words, each nonzero element of <picture><source srcset=$\tilde
+  U^\text{coarse}_\text{child}$ overwrites, rather than adds to the corresponding element of $U^\text{coarse}|_\text{parent}$. This typically also implies that the restriction matrices from two different cells should agree on a value for coarse degrees of freedom that they both want to touch (otherwise the result would depend on the order in which we loop over children, which would be unreasonable because the order of children is an otherwise arbitrary convention). For example, in the example above, the restriction matrices will be

      +\[
     R_0 = \left(\begin{matrix}1 & 0 & 0 \\ 0 & 0 & 0 \\ 0 & 1 & 0
   \end{matrix}\right), \qquad\qquad R_1 = \left(\begin{matrix}0 & 0 & 0 \\ 0 &
   1 & 0 \\ 1 & 0 & 0 \end{matrix}\right),
-  \] + \]" src="form_1023.png"/>

      -

      and the compatibility condition is the $R_{0,21}=R_{1,20}$ because they both indicate that $U^\text{coarse}|_\text{parent,2}$ should be set to one times $U^\text{fine}|_\text{child=0,1}$ and $U^\text{fine}|_\text{child=1,0}$.

      +

      and the compatibility condition is the $R_{0,21}=R_{1,20}$ because they both indicate that $U^\text{coarse}|_\text{parent,2}$ should be set to one times $U^\text{fine}|_\text{child=0,1}$ and $U^\text{fine}|_\text{child=1,0}$.

      Unfortunately, not all finite elements allow to write the restriction operation in this way. For example, for the piecewise constant FE_DGQ(0) element, the value of the finite element field on the parent cell can not be determined by interpolation from the children. Rather, the only reasonable choice is to take it as the average value between the children – so we are back to the sum operation, rather than the concatenation. Further thought shows that whether restriction should be additive or not is a property of the individual shape function, not of the finite element as a whole. Consequently, the FiniteElement::restriction_is_additive() function returns whether a particular shape function should act via concatenation (a return value of false) or via addition (return value of true), and the correct code for the overall operation is then as follows (and as, in fact, implemented in DoFAccessor::get_interpolated_dof_values()):

      for (unsigned int child=0; child<cell->n_children(); ++child)
      for (unsigned int i=0; i<dofs_per_cell; ++i)
      if (fe.restriction_is_additive(i) == true)
      @@ -2621,7 +2621,7 @@

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -2705,7 +2705,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -2959,9 +2959,9 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      -

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest- dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      @@ -2996,11 +2996,11 @@
      [in]cell_dof_indexThe index of a shape function or degree of freedom. This index must be in the range [0,dofs_per_cell).
      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFiniteSizeHistory.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFiniteSizeHistory.html 2024-01-30 03:04:38.884768375 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFiniteSizeHistory.html 2024-01-30 03:04:38.884768375 +0000 @@ -129,7 +129,7 @@
      template<typename T>
      class FiniteSizeHistory< T >

      A helper class to store a finite-size collection of objects of type T. If the number of elements exceeds the specified maximum size of the container, the oldest element is removed. Additionally, random access and removal of elements is implemented. Indexing is done relative to the last added element.

      In order to optimize the container for usage with memory-demanding objects (i.e. linear algebra vectors), the removal of an element does not free the memory. Instead the element is being kept in a separate cache so that subsequent addition does not require re-allocation of memory.

      -

      The primary usage of this class is in solvers to store a history of vectors. That is, if at the iteration $k$ we store $m$ vectors from previous iterations $\{k-1,k-2,...,k-m\}$, then addition of the new element will make the object contain elements from iterations $\{k,k-1,k-2,...,k-m+1\}$.

      +

      The primary usage of this class is in solvers to store a history of vectors. That is, if at the iteration $k$ we store $m$ vectors from previous iterations $\{k-1,k-2,...,k-m\}$, then addition of the new element will make the object contain elements from iterations $\{k,k-1,k-2,...,k-m+1\}$.

      Definition at line 49 of file history.h.

      Constructor & Destructor Documentation

      /usr/share/doc/packages/dealii/doxygen/deal.II/classFlatManifold.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFlatManifold.html 2024-01-30 03:04:38.928768742 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFlatManifold.html 2024-01-30 03:04:38.928768742 +0000 @@ -491,7 +491,7 @@
      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.
      -

      Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. For the current class, we assume that the manifold is flat, so the geodesic is the straight line between the two points, and we return $\mathbf x_2-\mathbf x_1$. The normalization of the vector is chosen so that it fits the convention described in Manifold::get_tangent_vector().

      +

      Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. For the current class, we assume that the manifold is flat, so the geodesic is the straight line between the two points, and we return $\mathbf x_2-\mathbf x_1$. The normalization of the vector is chosen so that it fits the convention described in Manifold::get_tangent_vector().

      Note
      If you use this class as a stepping stone to build a manifold that only "slightly" deviates from a flat manifold, by overloading the project_to_manifold() function.
      Parameters
      @@ -500,7 +500,7 @@
      -
      Returns
      A "direction" vector tangential to the geodesic. Here, this is $\mathbf x_2-\mathbf x_1$, possibly modified by the periodicity of the domain as set in the constructor, to use the "shortest" connection between the points through the periodic boundary as necessary.
      +
      Returns
      A "direction" vector tangential to the geodesic. Here, this is $\mathbf x_2-\mathbf x_1$, possibly modified by the periodicity of the domain as set in the constructor, to use the "shortest" connection between the points through the periodic boundary as necessary.

      Reimplemented from Manifold< dim, spacedim >.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classFullMatrix.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFullMatrix.html 2024-01-30 03:04:38.980769175 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFullMatrix.html 2024-01-30 03:04:38.980769175 +0000 @@ -1081,8 +1081,8 @@
      -

      Return the l1-norm of the matrix, where $||M||_1 =
-\max_j \sum_i |M_{ij}|$ (maximum of the sums over columns).

      +

      Return the l1-norm of the matrix, where $||M||_1 =
+\max_j \sum_i |M_{ij}|$ (maximum of the sums over columns).

      @@ -1102,8 +1102,8 @@
      -

      Return the $l_\infty$-norm of the matrix, where $||M||_\infty = \max_i
-\sum_j |M_{ij}|$ (maximum of the sums over rows).

      +

      Return the $l_\infty$-norm of the matrix, where $||M||_\infty = \max_i
+\sum_j |M_{ij}|$ (maximum of the sums over rows).

      @@ -2056,7 +2056,7 @@

      A=Inverse(A). A must be a square matrix. Inversion of this matrix by Gauss-Jordan algorithm with partial pivoting. This process is well-behaved for positive definite matrices, but be aware of round-off errors in the indefinite case.

      In case deal.II was configured with LAPACK, the functions Xgetrf and Xgetri build an LU factorization and invert the matrix upon that factorization, providing best performance up to matrices with a few hundreds rows and columns.

      -

      The numerical effort to invert an $n \times n$ matrix is of the order $n^3$.

      +

      The numerical effort to invert an $n \times n$ matrix is of the order $n^3$.

      @@ -2100,7 +2100,7 @@
      -

      Assign the Cholesky decomposition $A=:L L^T$ of the given matrix $A$ to *this, where $L$ is lower triangular matrix. The given matrix must be symmetric positive definite.

      +

      Assign the Cholesky decomposition $A=:L L^T$ of the given matrix $A$ to *this, where $L$ is lower triangular matrix. The given matrix must be symmetric positive definite.

      ExcMatrixNotPositiveDefinite will be thrown in the case that the matrix is not positive definite.

      @@ -2124,7 +2124,7 @@ const Vector< number2 > & W&#href_anchor"memdoc"> -

      *this(i,j) = $V(i) W(j)$ where $V,W$ are vectors of the same length.

      +

      *this(i,j) = $V(i) W(j)$ where $V,W$ are vectors of the same length.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classFunction.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFunction.html 2024-01-30 03:04:39.024769542 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFunction.html 2024-01-30 03:04:39.024769542 +0000 @@ -245,14 +245,14 @@

      Functions that return tensors

      If the functions you are dealing with have a number of components that are a priori known (for example, dim elements), you might consider using the TensorFunction class instead. This is, in particular, true if the objects you return have the properties of a tensor, i.e., they are for example dim-dimensional vectors or dim-by-dim matrices. On the other hand, functions like VectorTools::interpolate or VectorTools::interpolate_boundary_values definitely only want objects of the current type. You can use the VectorFunctionFromTensorFunction class to convert the former to the latter.

      Functions that return vectors of other data types

      -

      Most of the time, your functions will have the form $f : \Omega \rightarrow
-{\mathbb R}^{n_\text{components}}$. However, there are occasions where you want the function to return vectors (or scalars) over a different number field, for example functions that return complex numbers or vectors of complex numbers: $f : \Omega \rightarrow {\mathbb
-C}^{n_\text{components}}$. In such cases, you can choose a value different than the default double for the second template argument of this class: it describes the scalar type to be used for each component of your return values. It defaults to double, but in the example above, it could be set to std::complex<double>. step-58 is an example of this.

      +

      Most of the time, your functions will have the form $f : \Omega \rightarrow
+{\mathbb R}^{n_\text{components}}$. However, there are occasions where you want the function to return vectors (or scalars) over a different number field, for example functions that return complex numbers or vectors of complex numbers: $f : \Omega \rightarrow {\mathbb
+C}^{n_\text{components}}$. In such cases, you can choose a value different than the default double for the second template argument of this class: it describes the scalar type to be used for each component of your return values. It defaults to double, but in the example above, it could be set to std::complex<double>. step-58 is an example of this.

      Template Parameters
      - - + +
      dimThe space dimension of the range space within which the domain $\Omega$ of the function lies. Consequently, the function will be evaluated at objects of type Point<dim>.
      RangeNumberTypeThe scalar type of the vector space that is the range (or image) of this function. As discussed above, objects of the current type represent functions from ${\mathbb
-  R}^\text{dim}$ to $S^{n_\text{components}}$ where $S$ is the underlying scalar type of the vector space. The type of $S$ is given by the RangeNumberType template argument.
      dimThe space dimension of the range space within which the domain $\Omega$ of the function lies. Consequently, the function will be evaluated at objects of type Point<dim>.
      RangeNumberTypeThe scalar type of the vector space that is the range (or image) of this function. As discussed above, objects of the current type represent functions from ${\mathbb
+  R}^\text{dim}$ to $S^{n_\text{components}}$ where $S$ is the underlying scalar type of the vector space. The type of $S$ is given by the RangeNumberType template argument.
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFunctionDerivative.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctionDerivative.html 2024-01-30 03:04:39.072769942 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctionDerivative.html 2024-01-30 03:04:39.072769942 +0000 @@ -349,27 +349,27 @@

      Names of difference formulas.

      Enumerator
      Euler 

      The symmetric Euler formula of second order:

      -\[
+<picture><source srcset=\[
 u'(t) \approx
 \frac{u(t+h) -
 u(t-h)}{2h}.
-\] +\]" src="form_359.png"/>

      UpwindEuler 

      The upwind Euler formula of first order:

      -\[
+<picture><source srcset=\[
 u'(t) \approx
 \frac{u(t) -
 u(t-h)}{h}.
-\] +\]" src="form_360.png"/>

      FourthOrder 

      The fourth order scheme

      -\[
+<picture><source srcset=\[
 u'(t) \approx
 \frac{u(t-2h) - 8u(t-h)
 +  8u(t+h) - u(t+2h)}{12h}.
-\] +\]" src="form_361.png"/>

      /usr/share/doc/packages/dealii/doxygen/deal.II/classFunctionManifold.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctionManifold.html 2024-01-30 03:04:39.124770375 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctionManifold.html 2024-01-30 03:04:39.124770375 +0000 @@ -558,7 +558,7 @@
      -

      Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the sub_manifold coordinate system to the Euclidean coordinate system. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

      +

      Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the sub_manifold coordinate system to the Euclidean coordinate system. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

      This function is used in the computations required by the get_tangent_vector() function. The default implementation calls the get_gradient() method of the FunctionManifold::push_forward_function() member class. If you construct this object using the constructor that takes two string expression, then the default implementation of this method uses a finite difference scheme to compute the gradients(see the AutoDerivativeFunction() class for details), and you can specify the size of the spatial step size at construction time with the h parameter.

      Refer to the general documentation of this class for more information.

      @@ -735,7 +735,7 @@
      -

      Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

      +

      Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

      For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

      \begin{align*}
   \zeta(t) &= \xi_1 +  t (\xi_2-\xi_1)
@@ -751,7 +751,7 @@
                                     -F^{-1}(\mathbf x_1)\right]\right).
 \end{align*}

      -

      What the current function is supposed to return is $\mathbf s'(0)$. By the chain rule, this is equal to

      +

      What the current function is supposed to return is $\mathbf s'(0)$. By the chain rule, this is equal to

      \begin{align*}
   \mathbf s'(0) &=
     \frac{d}{dt}\left. F\left(F^{-1}(\mathbf x_1)
/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctionParser.html differs (HTML document, ASCII text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctionParser.html	2024-01-30 03:04:39.180770841 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctionParser.html	2024-01-30 03:04:39.180770841 +0000
@@ -510,27 +510,27 @@
 <p>Names of difference formulas. </p>
 <table class= EnumeratorEuler 

      The symmetric Euler formula of second order:

      -\[
+<picture><source srcset=\[
 u'(t) \approx
 \frac{u(t+h) -
 u(t-h)}{2h}.
-\] +\]" src="form_359.png"/>

      UpwindEuler 

      The upwind Euler formula of first order:

      -\[
+<picture><source srcset=\[
 u'(t) \approx
 \frac{u(t) -
 u(t-h)}{h}.
-\] +\]" src="form_360.png"/>

      FourthOrder 

      The fourth order scheme

      -\[
+<picture><source srcset=\[
 u'(t) \approx
 \frac{u(t-2h) - 8u(t-h)
 +  8u(t+h) - u(t+2h)}{12h}.
-\] +\]" src="form_361.png"/>

      /usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1CoordinateRestriction.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1CoordinateRestriction.html 2024-01-30 03:04:39.220771175 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1CoordinateRestriction.html 2024-01-30 03:04:39.220771175 +0000 @@ -217,7 +217,7 @@

      Detailed Description

      template<int dim>
      -class Functions::CoordinateRestriction< dim >

      This class takes a function in dim + 1 dimensions and creates a new function in one dimension lower by restricting one of the coordinates to a given value. Mathematically this corresponds to taking a function $f = f(x, y, z)$, a fixed value, $Z$, and defining a new function (the restriction) $g = g(x, y) = f(x, y, Z)$. Using this class, this translates to

      +class Functions::CoordinateRestriction< dim >

      This class takes a function in dim + 1 dimensions and creates a new function in one dimension lower by restricting one of the coordinates to a given value. Mathematically this corresponds to taking a function $f = f(x, y, z)$, a fixed value, $Z$, and defining a new function (the restriction) $g = g(x, y) = f(x, y, Z)$. Using this class, this translates to

      double z = ...
      unsigned int restricted_direction = 2;
      @@ -225,7 +225,7 @@
      const SmartPointer< const Function< dim+1 > > function
      -

      The dim-dimensional coordinates on the restriction are ordered starting from the restricted (dim + 1)-coordinate. In particular, this means that if the $y$-coordinate is locked to $Y$ in 3d, the coordinates are ordered as $(z, x)$ on the restriction: $g = g(z, x) = f(x, Y, z)$. This is the same convention as in BoundingBox::cross_section.

      +

      The dim-dimensional coordinates on the restriction are ordered starting from the restricted (dim + 1)-coordinate. In particular, this means that if the $y$-coordinate is locked to $Y$ in 3d, the coordinates are ordered as $(z, x)$ on the restriction: $g = g(z, x) = f(x, Y, z)$. This is the same convention as in BoundingBox::cross_section.

      Definition at line 51 of file function_restriction.h.

      Member Typedef Documentation

      /usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1CosineFunction.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1CosineFunction.html 2024-01-30 03:04:39.260771508 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1CosineFunction.html 2024-01-30 03:04:39.260771508 +0000 @@ -216,7 +216,7 @@

      Detailed Description

      template<int dim>
      -class Functions::CosineFunction< dim >

      Cosine-shaped pillow function. This is another function with zero boundary values on $[-1,1]^d$. In the interior it is the product of $\cos(\pi/2 x_i)$.

      +class Functions::CosineFunction< dim >

      Cosine-shaped pillow function. This is another function with zero boundary values on $[-1,1]^d$. In the interior it is the product of $\cos(\pi/2 x_i)$.

      Definition at line 219 of file function_lib.h.

      Member Typedef Documentation

      /usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1CutOffFunctionC1.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1CutOffFunctionC1.html 2024-01-30 03:04:39.308771908 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1CutOffFunctionC1.html 2024-01-30 03:04:39.308771908 +0000 @@ -240,8 +240,8 @@

      Detailed Description

      template<int dim>
      -class Functions::CutOffFunctionC1< dim >

      A cut-off function for an arbitrarily-sized ball that is in the space $C^1$ (i.e., continuously differentiable). This is a cut-off function that is often used in the literature of the Immersed Boundary Method.

      -

      The expression of the function in radial coordinates is given by $f(r)=1/2(cos(\pi r/s)+1)$ where $r<s$ is the distance to the center, and $s$ is the radius of the sphere. If vector valued, it can be restricted to a single component.

      +class Functions::CutOffFunctionC1< dim >

      A cut-off function for an arbitrarily-sized ball that is in the space $C^1$ (i.e., continuously differentiable). This is a cut-off function that is often used in the literature of the Immersed Boundary Method.

      +

      The expression of the function in radial coordinates is given by $f(r)=1/2(cos(\pi r/s)+1)$ where $r<s$ is the distance to the center, and $s$ is the radius of the sphere. If vector valued, it can be restricted to a single component.

      Definition at line 1206 of file function_lib.h.

      Member Typedef Documentation

      /usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1CutOffFunctionCinfty.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1CutOffFunctionCinfty.html 2024-01-30 03:04:39.356772308 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1CutOffFunctionCinfty.html 2024-01-30 03:04:39.356772308 +0000 @@ -240,7 +240,7 @@

      Detailed Description

      template<int dim>
      -class Functions::CutOffFunctionCinfty< dim >

      Cut-off function for an arbitrary ball. This is the traditional cut-off function in C-infinity for a ball of certain radius around center, $f(r)=exp(1-1/(1-r**2/s**2))$, where $r$ is the distance to the center, and $s$ is the radius of the sphere. If vector valued, it can be restricted to a single component.

      +class Functions::CutOffFunctionCinfty< dim >

      Cut-off function for an arbitrary ball. This is the traditional cut-off function in C-infinity for a ball of certain radius around center, $f(r)=exp(1-1/(1-r**2/s**2))$, where $r$ is the distance to the center, and $s$ is the radius of the sphere. If vector valued, it can be restricted to a single component.

      Definition at line 1259 of file function_lib.h.

      Member Typedef Documentation

      /usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1CutOffFunctionLinfty.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1CutOffFunctionLinfty.html 2024-01-30 03:04:39.400772674 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1CutOffFunctionLinfty.html 2024-01-30 03:04:39.404772708 +0000 @@ -241,7 +241,7 @@

      Detailed Description

      template<int dim>
      class Functions::CutOffFunctionLinfty< dim >

      Cut-off function in L-infinity for an arbitrary ball. This function is the characteristic function of a ball around center with a specified radius, that is,

      -\[ f = \chi(B_r(c)). \] +\[ f = \chi(B_r(c)). \]

      If vector valued, it can be restricted to a single component.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1FourierCosineFunction.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1FourierCosineFunction.html 2024-01-30 03:04:39.440773007 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1FourierCosineFunction.html 2024-01-30 03:04:39.440773007 +0000 @@ -213,8 +213,8 @@

      Detailed Description

      template<int dim>
      -class Functions::FourierCosineFunction< dim >

      Given a wavenumber vector generate a cosine function. The wavenumber coefficient is given as a $d$-dimensional point $k$ in Fourier space, and the function is then recovered as $f(x) = \cos(\sum_i k_i x_i) =
-Re(\exp(i k.x))$.

      +class Functions::FourierCosineFunction< dim >

      Given a wavenumber vector generate a cosine function. The wavenumber coefficient is given as a $d$-dimensional point $k$ in Fourier space, and the function is then recovered as $f(x) = \cos(\sum_i k_i x_i) =
+Re(\exp(i k.x))$.

      The class has its name from the fact that it resembles one component of a Fourier cosine decomposition.

      Definition at line 712 of file function_lib.h.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1FourierCosineSum.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1FourierCosineSum.html 2024-01-30 03:04:39.480773340 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1FourierCosineSum.html 2024-01-30 03:04:39.480773340 +0000 @@ -215,8 +215,8 @@

      Detailed Description

      template<int dim>
      -class Functions::FourierCosineSum< dim >

      Given a sequence of wavenumber vectors and weights generate a sum of cosine functions. Each wavenumber coefficient is given as a $d$-dimensional point $k$ in Fourier space, and the entire function is then recovered as $f(x) = \sum_j w_j cos(\sum_i k_i x_i) = Re(\sum_j w_j
-\exp(i k.x))$.

      +class Functions::FourierCosineSum< dim >

      Given a sequence of wavenumber vectors and weights generate a sum of cosine functions. Each wavenumber coefficient is given as a $d$-dimensional point $k$ in Fourier space, and the entire function is then recovered as $f(x) = \sum_j w_j cos(\sum_i k_i x_i) = Re(\sum_j w_j
+\exp(i k.x))$.

      Definition at line 870 of file function_lib.h.

      Member Typedef Documentation

      /usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1FourierSineFunction.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1FourierSineFunction.html 2024-01-30 03:04:39.516773640 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1FourierSineFunction.html 2024-01-30 03:04:39.516773640 +0000 @@ -213,8 +213,8 @@

      Detailed Description

      template<int dim>
      -class Functions::FourierSineFunction< dim >

      Given a wavenumber vector generate a sine function. The wavenumber coefficient is given as a $d$-dimensional point $k$ in Fourier space, and the function is then recovered as $f(x) = \sin(\sum_i k_i x_i) =
-Im(\exp(i k.x))$.

      +class Functions::FourierSineFunction< dim >

      Given a wavenumber vector generate a sine function. The wavenumber coefficient is given as a $d$-dimensional point $k$ in Fourier space, and the function is then recovered as $f(x) = \sin(\sum_i k_i x_i) =
+Im(\exp(i k.x))$.

      The class has its name from the fact that it resembles one component of a Fourier sine decomposition.

      Definition at line 766 of file function_lib.h.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1FourierSineSum.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1FourierSineSum.html 2024-01-30 03:04:39.556773974 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1FourierSineSum.html 2024-01-30 03:04:39.556773974 +0000 @@ -215,7 +215,7 @@

      Detailed Description

      template<int dim>
      -class Functions::FourierSineSum< dim >

      Given a sequence of wavenumber vectors and weights generate a sum of sine functions. Each wavenumber coefficient is given as a $d$-dimensional point $k$ in Fourier space, and the entire function is then recovered as $f(x) = \sum_j w_j sin(\sum_i k_i x_i) = Im(\sum_j w_j \exp(i k.x))$.

      +class Functions::FourierSineSum< dim >

      Given a sequence of wavenumber vectors and weights generate a sum of sine functions. Each wavenumber coefficient is given as a $d$-dimensional point $k$ in Fourier space, and the entire function is then recovered as $f(x) = \sum_j w_j sin(\sum_i k_i x_i) = Im(\sum_j w_j \exp(i k.x))$.

      Definition at line 816 of file function_lib.h.

      Member Typedef Documentation

      /usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1InterpolatedTensorProductGridData.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1InterpolatedTensorProductGridData.html 2024-01-30 03:04:39.596774307 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1InterpolatedTensorProductGridData.html 2024-01-30 03:04:39.596774307 +0000 @@ -228,11 +228,11 @@

      Detailed Description

      template<int dim>
      -class Functions::InterpolatedTensorProductGridData< dim >

      A scalar function that computes its values by (bi-, tri-)linear interpolation from a set of point data that are arranged on a possibly non-uniform tensor product mesh. In other words, considering the three- dimensional case, let there be points $x_0,\ldots, x_{K-1}$, $y_0,\ldots,y_{L-1}$, $z_1,\ldots,z_{M-1}$, and data $d_{klm}$ defined at point $(x_k,y_l,z_m)^T$, then evaluating the function at a point $\mathbf
-x=(x,y,z)$ will find the box so that $x_k\le x\le x_{k+1}, y_l\le y\le
-y_{l+1}, z_m\le z\le z_{m+1}$, and do a trilinear interpolation of the data on this cell. Similar operations are done in lower dimensions.

      +class Functions::InterpolatedTensorProductGridData< dim >

      A scalar function that computes its values by (bi-, tri-)linear interpolation from a set of point data that are arranged on a possibly non-uniform tensor product mesh. In other words, considering the three- dimensional case, let there be points $x_0,\ldots, x_{K-1}$, $y_0,\ldots,y_{L-1}$, $z_1,\ldots,z_{M-1}$, and data $d_{klm}$ defined at point $(x_k,y_l,z_m)^T$, then evaluating the function at a point $\mathbf
+x=(x,y,z)$ will find the box so that $x_k\le x\le x_{k+1}, y_l\le y\le
+y_{l+1}, z_m\le z\le z_{m+1}$, and do a trilinear interpolation of the data on this cell. Similar operations are done in lower dimensions.

      This class is most often used for either evaluating coefficients or right hand sides that are provided experimentally at a number of points inside the domain, or for comparing outputs of a solution on a finite element mesh against previously obtained data defined on a grid.

      -
      Note
      If the points $x_i$ are actually equally spaced on an interval $[x_0,x_1]$ and the same is true for the other data points in higher dimensions, you should use the InterpolatedUniformGridData class instead.
      +
      Note
      If the points $x_i$ are actually equally spaced on an interval $[x_0,x_1]$ and the same is true for the other data points in higher dimensions, you should use the InterpolatedUniformGridData class instead.

      If a point is requested outside the box defined by the end points of the coordinate arrays, then the function is assumed to simply extend by constant values beyond the last data point in each coordinate direction. (The class does not throw an error if a point lies outside the box since it frequently happens that a point lies just outside the box by an amount on the order of numerical roundoff.)

      Note
      The use of the related class InterpolatedUniformGridData is discussed in step-53.

      Dealing with large data sets

      @@ -374,7 +374,7 @@

      Constructor to initialize this class instance with the data given in data_values.

      Parameters
      - +
      coordinate_valuesAn array of dim arrays. Each of the inner arrays contains the coordinate values $x_0,\ldots, x_{K-1}$ and similarly for the other coordinate directions. These arrays need not have the same size. Obviously, we need dim such arrays for a dim- dimensional function object. The coordinate values within this array are assumed to be strictly ascending to allow for efficient lookup.
      coordinate_valuesAn array of dim arrays. Each of the inner arrays contains the coordinate values $x_0,\ldots, x_{K-1}$ and similarly for the other coordinate directions. These arrays need not have the same size. Obviously, we need dim such arrays for a dim- dimensional function object. The coordinate values within this array are assumed to be strictly ascending to allow for efficient lookup.
      data_valuesA dim-dimensional table of data at each of the mesh points defined by the coordinate arrays above. The data passed in is copied into internal data structures. Note that the Table class has a number of conversion constructors that allow converting other data types into a table where you specify this argument.
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1InterpolatedUniformGridData.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1InterpolatedUniformGridData.html 2024-01-30 03:04:39.640774674 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1InterpolatedUniformGridData.html 2024-01-30 03:04:39.640774674 +0000 @@ -221,10 +221,10 @@

      Detailed Description

      template<int dim>
      -class Functions::InterpolatedUniformGridData< dim >

      A scalar function that computes its values by (bi-, tri-)linear interpolation from a set of point data that are arranged on a uniformly spaced tensor product mesh. In other words, considering the three- dimensional case, let there be points $x_0,\ldots, x_{K-1}$ that result from a uniform subdivision of the interval $[x_0,x_{K-1}]$ into $K-1$ sub-intervals of size $\Delta x = (x_{K-1}-x_0)/(K-1)$, and similarly $y_0,\ldots,y_{L-1}$, $z_1,\ldots,z_{M-1}$. Also consider data $d_{klm}$ defined at point $(x_k,y_l,z_m)^T$, then evaluating the function at a point $\mathbf x=(x,y,z)$ will find the box so that $x_k\le x\le x_{k+1},
-y_l\le y\le y_{l+1}, z_m\le z\le z_{m+1}$, and do a trilinear interpolation of the data on this cell. Similar operations are done in lower dimensions.

      +class Functions::InterpolatedUniformGridData< dim >

      A scalar function that computes its values by (bi-, tri-)linear interpolation from a set of point data that are arranged on a uniformly spaced tensor product mesh. In other words, considering the three- dimensional case, let there be points $x_0,\ldots, x_{K-1}$ that result from a uniform subdivision of the interval $[x_0,x_{K-1}]$ into $K-1$ sub-intervals of size $\Delta x = (x_{K-1}-x_0)/(K-1)$, and similarly $y_0,\ldots,y_{L-1}$, $z_1,\ldots,z_{M-1}$. Also consider data $d_{klm}$ defined at point $(x_k,y_l,z_m)^T$, then evaluating the function at a point $\mathbf x=(x,y,z)$ will find the box so that $x_k\le x\le x_{k+1},
+y_l\le y\le y_{l+1}, z_m\le z\le z_{m+1}$, and do a trilinear interpolation of the data on this cell. Similar operations are done in lower dimensions.

      This class is most often used for either evaluating coefficients or right hand sides that are provided experimentally at a number of points inside the domain, or for comparing outputs of a solution on a finite element mesh against previously obtained data defined on a grid.

      -
      Note
      If you have a problem where the points $x_i$ are not equally spaced (e.g., they result from a computation on a graded mesh that is denser closer to one boundary), then use the InterpolatedTensorProductGridData class instead.
      +
      Note
      If you have a problem where the points $x_i$ are not equally spaced (e.g., they result from a computation on a graded mesh that is denser closer to one boundary), then use the InterpolatedTensorProductGridData class instead.

      If a point is requested outside the box defined by the end points of the coordinate arrays, then the function is assumed to simply extend by constant values beyond the last data point in each coordinate direction. (The class does not throw an error if a point lies outside the box since it frequently happens that a point lies just outside the box by an amount on the order of numerical roundoff.)

      Note
      The use of this class is discussed in step-53.

      Dealing with large data sets

      /usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1LSingularityFunction.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1LSingularityFunction.html 2024-01-30 03:04:39.676774973 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1LSingularityFunction.html 2024-01-30 03:04:39.676774973 +0000 @@ -217,14 +217,14 @@

      Detailed Description

      A function that solves the Laplace equation (with specific boundary values but zero right hand side) and that has a singularity at the center of the L-shaped domain in 2d (i.e., at the location of the re-entrant corner of this non-convex domain).

      -

      The function is given in polar coordinates by $r^{\frac{2}{3}}
-\sin(\frac{2}{3} \phi)$ with a singularity at the origin and should be used with GridGenerator::hyper_L(). Here, $\phi$ is defined as the clockwise angle against the positive $x$-axis.

      +

      The function is given in polar coordinates by $r^{\frac{2}{3}}
+\sin(\frac{2}{3} \phi)$ with a singularity at the origin and should be used with GridGenerator::hyper_L(). Here, $\phi$ is defined as the clockwise angle against the positive $x$-axis.

      This function is often used to illustrate that the solutions of the Laplace equation

      -\[
+<picture><source srcset=\[
   -\Delta u = 0
-\] +\]" src="form_468.png"/>

      -

      can be singular even if the boundary values are smooth. (Here, if the domain is the L-shaped domain $(-1,1)^2 \backslash [0,1]^2$, the boundary values for $u$ are zero on the two line segments adjacent to the origin, and equal to $r^{\frac{2}{3}} \sin(\frac{2}{3} \phi)$ on the remaining parts of the boundary.) The function itself remains bounded on the domain, but its gradient is of the form $r^{-1/3}$ in the vicinity of the origin and consequently diverges as one approaches the origin.

      +

      can be singular even if the boundary values are smooth. (Here, if the domain is the L-shaped domain $(-1,1)^2 \backslash [0,1]^2$, the boundary values for $u$ are zero on the two line segments adjacent to the origin, and equal to $r^{\frac{2}{3}} \sin(\frac{2}{3} \phi)$ on the remaining parts of the boundary.) The function itself remains bounded on the domain, but its gradient is of the form $r^{-1/3}$ in the vicinity of the origin and consequently diverges as one approaches the origin.

      Definition at line 410 of file function_lib.h.

      Member Typedef Documentation

      /usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1Monomial.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1Monomial.html 2024-01-30 03:04:39.716775307 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1Monomial.html 2024-01-30 03:04:39.716775307 +0000 @@ -215,7 +215,7 @@

      Detailed Description

      template<int dim, typename Number = double>
      -class Functions::Monomial< dim, Number >

      A class that represents a function object for a monomial. Monomials are polynomials with only a single term, i.e. in 1-d they have the form $x^\alpha$, in 2-d the form $x_1^{\alpha_1}x_2^{\alpha_2}$, and in 3-d $x_1^{\alpha_1}x_2^{\alpha_2}x_3^{\alpha_3}$. Monomials are therefore described by a $dim$-tuple of exponents. Consequently, the class's constructor takes a Tensor<1,dim> to describe the set of exponents. Most of the time these exponents will of course be integers, but real exponents are of course equally valid. Exponents can't be real when the bases are negative numbers.

      +class Functions::Monomial< dim, Number >

      A class that represents a function object for a monomial. Monomials are polynomials with only a single term, i.e. in 1-d they have the form $x^\alpha$, in 2-d the form $x_1^{\alpha_1}x_2^{\alpha_2}$, and in 3-d $x_1^{\alpha_1}x_2^{\alpha_2}x_3^{\alpha_3}$. Monomials are therefore described by a $dim$-tuple of exponents. Consequently, the class's constructor takes a Tensor<1,dim> to describe the set of exponents. Most of the time these exponents will of course be integers, but real exponents are of course equally valid. Exponents can't be real when the bases are negative numbers.

      Definition at line 1320 of file function_lib.h.

      Member Typedef Documentation

      /usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1ParsedFunction.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1ParsedFunction.html 2024-01-30 03:04:39.764775706 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1ParsedFunction.html 2024-01-30 03:04:39.768775740 +0000 @@ -381,27 +381,27 @@

      Names of difference formulas.

      Enumerator
      Euler 

      The symmetric Euler formula of second order:

      -\[
+<picture><source srcset=\[
 u'(t) \approx
 \frac{u(t+h) -
 u(t-h)}{2h}.
-\] +\]" src="form_359.png"/>

      UpwindEuler 

      The upwind Euler formula of first order:

      -\[
+<picture><source srcset=\[
 u'(t) \approx
 \frac{u(t) -
 u(t-h)}{h}.
-\] +\]" src="form_360.png"/>

      FourthOrder 

      The fourth order scheme

      -\[
+<picture><source srcset=\[
 u'(t) \approx
 \frac{u(t-2h) - 8u(t-h)
 +  8u(t+h) - u(t+2h)}{12h}.
-\] +\]" src="form_361.png"/>

      /usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1PillowFunction.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1PillowFunction.html 2024-01-30 03:04:39.808776073 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1PillowFunction.html 2024-01-30 03:04:39.808776073 +0000 @@ -217,7 +217,7 @@

      Detailed Description

      template<int dim>
      class Functions::PillowFunction< dim >

      d-quadratic pillow on the unit hypercube.

      -

      This is a function for testing the implementation. It has zero Dirichlet boundary values on the domain $(-1,1)^d$. In the inside, it is the product of $1-x_i^2$ over all space dimensions.

      +

      This is a function for testing the implementation. It has zero Dirichlet boundary values on the domain $(-1,1)^d$. In the inside, it is the product of $1-x_i^2$ over all space dimensions.

      Providing a non-zero argument to the constructor, the whole function can be offset by a constant.

      Together with the function, its derivatives and Laplacian are defined.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1PointRestriction.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1PointRestriction.html 2024-01-30 03:04:39.844776373 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1PointRestriction.html 2024-01-30 03:04:39.844776373 +0000 @@ -220,7 +220,7 @@

      Detailed Description

      template<int dim>
      -class Functions::PointRestriction< dim >

      This class creates a 1-dimensional function from a dim + 1 dimensional function by restricting dim of the coordinate values to a given point. Mathematically this corresponds to taking a function, $f = f(x, y, z)$, and a point $(Y, Z)$, and defining a new function $g = g(x) = f(x, Y, Z)$. Using this class, this translates to

      +class Functions::PointRestriction< dim >

      This class creates a 1-dimensional function from a dim + 1 dimensional function by restricting dim of the coordinate values to a given point. Mathematically this corresponds to taking a function, $f = f(x, y, z)$, and a point $(Y, Z)$, and defining a new function $g = g(x) = f(x, Y, Z)$. Using this class, this translates to

      Point<2> point(y, z);
      unsigned int open_direction = 0;
      @@ -229,7 +229,7 @@
      const SmartPointer< const Function< dim+1 > > function
      -

      The coordinates of the point will be expanded in the higher-dimensional functions coordinates starting from the open-direction (and wrapping around). In particular, if we restrict to a point $(Z, X)$ and choose to keep the y-direction open, the restriction that is created is the function $g(y) = f(X, y, Z)$. This is consistent with the convention in BoundingBox::cross_section.

      +

      The coordinates of the point will be expanded in the higher-dimensional functions coordinates starting from the open-direction (and wrapping around). In particular, if we restrict to a point $(Z, X)$ and choose to keep the y-direction open, the restriction that is created is the function $g(y) = f(X, y, Z)$. This is consistent with the convention in BoundingBox::cross_section.

      Definition at line 110 of file function_restriction.h.

      Member Typedef Documentation

      /usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1Polynomial.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1Polynomial.html 2024-01-30 03:04:39.884776707 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1Polynomial.html 2024-01-30 03:04:39.884776707 +0000 @@ -216,8 +216,8 @@

      Detailed Description

      template<int dim>
      -class Functions::Polynomial< dim >

      A class that represents a function object for a polynomial. A polynomial is composed by the summation of multiple monomials. If the polynomial has n monomials and the dimension is equal to dim, the polynomial can be written as $\sum_{i=1}^{n} a_{i}(\prod_{d=1}^{dim}
-x_{d}^{\alpha_{i,d}})$, where $a_{i}$ are the coefficients of the monomials and $\alpha_{i,d}$ are their exponents. The class's constructor takes a Table<2,double> to describe the set of exponents and a Vector<double> to describe the set of coefficients.

      +class Functions::Polynomial< dim >

      A class that represents a function object for a polynomial. A polynomial is composed by the summation of multiple monomials. If the polynomial has n monomials and the dimension is equal to dim, the polynomial can be written as $\sum_{i=1}^{n} a_{i}(\prod_{d=1}^{dim}
+x_{d}^{\alpha_{i,d}})$, where $a_{i}$ are the coefficients of the monomials and $\alpha_{i,d}$ are their exponents. The class's constructor takes a Table<2,double> to describe the set of exponents and a Vector<double> to describe the set of coefficients.

      Definition at line 1700 of file function_lib.h.

      Member Typedef Documentation

      @@ -321,8 +321,8 @@ const std::vector< double > & coefficients&#href_anchor"memdoc"> -

      Constructor. The coefficients and the exponents of the polynomial are passed as arguments. The Table<2, double> exponents has a number of rows equal to the number of monomials of the polynomial and a number of columns equal to dim. The i-th row of the exponents table contains the ${\alpha_{i,d}}$ exponents of the i-th monomial $a_{i}\prod_{d=1}^{dim}
-x_{d}^{\alpha_{i,d}}$. The i-th element of the coefficients vector contains the coefficient $a_{i}$ for the i-th monomial.

      +

      Constructor. The coefficients and the exponents of the polynomial are passed as arguments. The Table<2, double> exponents has a number of rows equal to the number of monomials of the polynomial and a number of columns equal to dim. The i-th row of the exponents table contains the ${\alpha_{i,d}}$ exponents of the i-th monomial $a_{i}\prod_{d=1}^{dim}
+x_{d}^{\alpha_{i,d}}$. The i-th element of the coefficients vector contains the coefficient $a_{i}$ for the i-th monomial.

      Definition at line 2838 of file function_lib.cc.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Ellipsoid.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Ellipsoid.html 2024-01-30 03:04:39.928777073 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Ellipsoid.html 2024-01-30 03:04:39.928777073 +0000 @@ -227,11 +227,11 @@
      template<int dim>
      class Functions::SignedDistance::Ellipsoid< dim >

      Signed-distance level set function to an ellipsoid defined by:

      -\[
+<picture><source srcset=\[
 \sum_{i=1}^{dim} \frac{(x_i - c_i)^2}{R_i^2} = 1
-\] +\]" src="form_527.png"/>

      -

      Here, $c_i$ are the coordinates of the center of the ellipsoid and $R_i$ are the elliptic radii. This function is zero on the ellipsoid, negative inside the ellipsoid and positive outside the ellipsoid.

      +

      Here, $c_i$ are the coordinates of the center of the ellipsoid and $R_i$ are the elliptic radii. This function is zero on the ellipsoid, negative inside the ellipsoid and positive outside the ellipsoid.

      Definition at line 145 of file function_signed_distance.h.

      Member Typedef Documentation

      @@ -451,9 +451,9 @@

      Evaluates the ellipsoid function:

      -\[
+<picture><source srcset=\[
 f(\vec{x}) = \sum_{i=1}^{dim} \frac{(x_i - c_i)^2}{R_i^2} - 1
-\] +\]" src="form_533.png"/>

      Definition at line 201 of file function_signed_distance.cc.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Plane.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Plane.html 2024-01-30 03:04:39.964777373 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Plane.html 2024-01-30 03:04:39.964777373 +0000 @@ -215,7 +215,7 @@

      Detailed Description

      template<int dim>
      -class Functions::SignedDistance::Plane< dim >

      Signed level set function of a plane in $\mathbb{R}^{dim}$: $\psi(x) = n \cdot (x - x_p)$. Here, $n$ is the plane normal and $x_p$ is a point in the plane. Thus, with respect to the direction of the normal, this function is positive above the plane, zero in the plane, and negative below the plane. If the normal is normalized, $\psi$ will be the signed distance to the closest point in the plane.

      +class Functions::SignedDistance::Plane< dim >

      Signed level set function of a plane in $\mathbb{R}^{dim}$: $\psi(x) = n \cdot (x - x_p)$. Here, $n$ is the plane normal and $x_p$ is a point in the plane. Thus, with respect to the direction of the normal, this function is positive above the plane, zero in the plane, and negative below the plane. If the normal is normalized, $\psi$ will be the signed distance to the closest point in the plane.

      Definition at line 105 of file function_signed_distance.h.

      Member Typedef Documentation

      /usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Rectangle.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Rectangle.html 2024-01-30 03:04:40.004777706 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Rectangle.html 2024-01-30 03:04:40.008777739 +0000 @@ -215,7 +215,7 @@

      Detailed Description

      template<int dim>
      class Functions::SignedDistance::Rectangle< dim >

      Signed-distance level set function of a rectangle.

      -

      This function is zero on the rectangle, negative "inside" and positive in the rest of $\mathbb{R}^{dim}$.

      +

      This function is zero on the rectangle, negative "inside" and positive in the rest of $\mathbb{R}^{dim}$.

      Contour surfaces of the signed distance function of a 3D rectangle are illustrated below:

      /usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Sphere.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Sphere.html 2024-01-30 03:04:40.044778040 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Sphere.html 2024-01-30 03:04:40.044778040 +0000 @@ -215,9 +215,9 @@

      Detailed Description

      template<int dim>
      -class Functions::SignedDistance::Sphere< dim >

      Signed-distance level set function of a sphere: $\psi(x) = \| x - x^c \| - R$. Here, $x^c$ is the center of the sphere and $R$ is its radius. This function is thus zero on the sphere, negative "inside" the ball having the sphere as its boundary, and positive in the rest of $\mathbb{R}^{dim}$.

      -

      This function has gradient and Hessian equal to $\partial_i \psi(x) = (x - x^c)/\| x - x^c \|$, $\partial_i \partial_j \psi =
-\delta_{ij}/\| x - x^c \| - (x_i - x_i^c)(x_j - x_j^c)/\| x - x^c \|^3$, where $\delta_{ij}$ is the Kronecker delta function.

      +class Functions::SignedDistance::Sphere< dim >

      Signed-distance level set function of a sphere: $\psi(x) = \| x - x^c \| - R$. Here, $x^c$ is the center of the sphere and $R$ is its radius. This function is thus zero on the sphere, negative "inside" the ball having the sphere as its boundary, and positive in the rest of $\mathbb{R}^{dim}$.

      +

      This function has gradient and Hessian equal to $\partial_i \psi(x) = (x - x^c)/\| x - x^c \|$, $\partial_i \partial_j \psi =
+\delta_{ij}/\| x - x^c \| - (x_i - x_i^c)(x_j - x_j^c)/\| x - x^c \|^3$, where $\delta_{ij}$ is the Kronecker delta function.

      Definition at line 49 of file function_signed_distance.h.

      Member Typedef Documentation

      /usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1ZalesakDisk.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1ZalesakDisk.html 2024-01-30 03:04:40.084778373 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1ZalesakDisk.html 2024-01-30 03:04:40.084778373 +0000 @@ -216,8 +216,8 @@

      Detailed Description

      template<int dim>
      class Functions::SignedDistance::ZalesakDisk< dim >

      Signed-distance level set function of Zalesak's disk proposed in [zalesak1979fully].

      -

      It is calculated by the set difference $\psi(x) = \max(\psi_{S}(x),
--\psi_{N}(x))$ of the level set functions of a sphere $\psi_{S}$ and a rectangle $\psi_{N}$. This function is zero on the surface of the disk, negative "inside" and positive in the rest of $\mathbb{R}^{dim}$.

      +

      It is calculated by the set difference $\psi(x) = \max(\psi_{S}(x),
+-\psi_{N}(x))$ of the level set functions of a sphere $\psi_{S}$ and a rectangle $\psi_{N}$. This function is zero on the surface of the disk, negative "inside" and positive in the rest of $\mathbb{R}^{dim}$.

      Contour surfaces of the signed distance function of a 3D Zalesak's disk are illustrated below:

      /usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1Spherical.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1Spherical.html 2024-01-30 03:04:40.128778739 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1Spherical.html 2024-01-30 03:04:40.128778739 +0000 @@ -218,7 +218,7 @@

      Detailed Description

      template<int dim>
      -class Functions::Spherical< dim >

      An abstract base class for a scalar-valued function $f=f(r,\theta,\phi)$ defined in spherical coordinates. This class wraps transformation of values, gradients and hessians from spherical coordinates to the Cartesian coordinate system used by the Function base class. Therefore derived classes only need to implement those functions in spherical coordinates (specifically svalue(), sgradient() and shessian() ). The convention for angles is the same as in GeometricUtilities::Coordinates.

      +class Functions::Spherical< dim >

      An abstract base class for a scalar-valued function $f=f(r,\theta,\phi)$ defined in spherical coordinates. This class wraps transformation of values, gradients and hessians from spherical coordinates to the Cartesian coordinate system used by the Function base class. Therefore derived classes only need to implement those functions in spherical coordinates (specifically svalue(), sgradient() and shessian() ). The convention for angles is the same as in GeometricUtilities::Coordinates.

      Note
      This function is currently only implemented for dim==3 .

      Definition at line 44 of file function_spherical.h.

      @@ -517,7 +517,7 @@

      Return the gradient in spherical coordinates.

      -

      The returned object should contain derivatives in the following order: $\{ f_{,r},\, f_{,\theta},\, f_{,\phi}\}$.

      +

      The returned object should contain derivatives in the following order: $\{ f_{,r},\, f_{,\theta},\, f_{,\phi}\}$.

      Definition at line 330 of file function_spherical.cc.

      @@ -548,8 +548,8 @@

      Return the Hessian in spherical coordinates.

      -

      The returned object should contain derivatives in the following order: $\{ f_{,rr},\, f_{,\theta\theta},\, f_{,\phi\phi},\, f_{,r\theta},\,
-f_{,r\phi},\, f_{,\theta\phi}\}$.

      +

      The returned object should contain derivatives in the following order: $\{ f_{,rr},\, f_{,\theta\theta},\, f_{,\phi\phi},\, f_{,r\theta},\,
+f_{,r\phi},\, f_{,\theta\phi}\}$.

      Definition at line 341 of file function_spherical.cc.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1StokesLSingularity.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1StokesLSingularity.html 2024-01-30 03:04:40.176779139 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1StokesLSingularity.html 2024-01-30 03:04:40.176779139 +0000 @@ -264,7 +264,7 @@

      Detailed Description

      A singular solution to Stokes' equations on a 2d L-shaped domain.

      -

      This function satisfies $-\triangle \mathbf{u} + \nabla p = 0$ and represents a typical singular solution around a reentrant corner of an L-shaped domain that can be created using GridGenerator::hyper_L(). The velocity vanishes on the two faces of the re-entrant corner and $\nabla\mathbf{u}$ and $p$ are singular at the origin while they are smooth in the rest of the domain because they can be written as a product of a smooth function and the term $r^{\lambda-1}$ where $r$ is the radius and $\lambda \approx 0.54448$ is a fixed parameter.

      +

      This function satisfies $-\triangle \mathbf{u} + \nabla p = 0$ and represents a typical singular solution around a reentrant corner of an L-shaped domain that can be created using GridGenerator::hyper_L(). The velocity vanishes on the two faces of the re-entrant corner and $\nabla\mathbf{u}$ and $p$ are singular at the origin while they are smooth in the rest of the domain because they can be written as a product of a smooth function and the term $r^{\lambda-1}$ where $r$ is the radius and $\lambda \approx 0.54448$ is a fixed parameter.

      Taken from Houston, Schötzau, Wihler, proceeding ENUMATH 2003.

      Definition at line 246 of file flow_function.h.

      @@ -1738,7 +1738,7 @@
      -

      The exponent of the radius, computed as the solution to $\sin(\lambda\omega)+\lambda \sin(\omega)=0$

      +

      The exponent of the radius, computed as the solution to $\sin(\lambda\omega)+\lambda \sin(\omega)=0$

      Definition at line 283 of file flow_function.h.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classGridIn.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classGridIn.html 2024-01-30 03:04:40.216779472 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classGridIn.html 2024-01-30 03:04:40.216779472 +0000 @@ -911,7 +911,7 @@
    Returns
    This function returns a struct containing some extra data stored by the ExodusII file that cannot be loaded into a Triangulation - see ExodusIIData for more information.
    -

    A cell face in ExodusII can be in an arbitrary number of sidesets (i.e., it can have an arbitrary number of sideset ids) - however, a boundary cell face in deal.II has exactly one boundary id. All boundary faces that are not in a sideset are given the (default) boundary id of $0$. This function then groups sidesets together into unique sets and gives each one a boundary id. For example: Consider a single-quadrilateral mesh whose left side has no sideset id, right side has sideset ids $0$ and $1$, and whose bottom and top sides have sideset ids of $0$. The left face will have a boundary id of $0$, the top and bottom faces boundary ids of $1$, and the right face a boundary id of $2$. Hence the vector returned by this function in that case will be $\{\{\}, \{0\}, \{0, 1\}\}$.

    +

    A cell face in ExodusII can be in an arbitrary number of sidesets (i.e., it can have an arbitrary number of sideset ids) - however, a boundary cell face in deal.II has exactly one boundary id. All boundary faces that are not in a sideset are given the (default) boundary id of $0$. This function then groups sidesets together into unique sets and gives each one a boundary id. For example: Consider a single-quadrilateral mesh whose left side has no sideset id, right side has sideset ids $0$ and $1$, and whose bottom and top sides have sideset ids of $0$. The left face will have a boundary id of $0$, the top and bottom faces boundary ids of $1$, and the right face a boundary id of $2$. Hence the vector returned by this function in that case will be $\{\{\}, \{0\}, \{0, 1\}\}$.

    Definition at line 3772 of file grid_in.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classHouseholder.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classHouseholder.html 2024-01-30 03:04:40.240779673 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classHouseholder.html 2024-01-30 03:04:40.240779673 +0000 @@ -137,10 +137,10 @@

    Detailed Description

    template<typename number>
    class Householder< number >

    QR-decomposition of a full matrix.

    -

    This class computes the QR-decomposition of given matrix by the Householder algorithm. Then, the function least_squares() can be used to compute the vector $x$ minimizing $\|Ax-b\|$ for a given vector $b$. The QR decomposition of $A$ is useful for this purpose because the minimizer is given by the equation $x=(A^TA)^{-1}A^Tb=(R^TQ^TQR)^{-1}R^TQ^Tb$ which is easy to compute because $Q$ is an orthogonal matrix, and consequently $Q^TQ=I$. Thus, $x=(R^TR)^{-1}R^TQ^Tb=R^{-1}R^{-T}R^TQ^Tb=R^{-1}Q^Tb$. Furthermore, $R$ is triangular, so applying $R^{-1}$ to a vector only involves a backward or forward solve.

    +

    This class computes the QR-decomposition of given matrix by the Householder algorithm. Then, the function least_squares() can be used to compute the vector $x$ minimizing $\|Ax-b\|$ for a given vector $b$. The QR decomposition of $A$ is useful for this purpose because the minimizer is given by the equation $x=(A^TA)^{-1}A^Tb=(R^TQ^TQR)^{-1}R^TQ^Tb$ which is easy to compute because $Q$ is an orthogonal matrix, and consequently $Q^TQ=I$. Thus, $x=(R^TR)^{-1}R^TQ^Tb=R^{-1}R^{-T}R^TQ^Tb=R^{-1}Q^Tb$. Furthermore, $R$ is triangular, so applying $R^{-1}$ to a vector only involves a backward or forward solve.

    Implementation details

    -

    The class does not in fact store the $Q$ and $R$ factors explicitly as matrices. It does store $R$, but the $Q$ factor is stored as the product of Householder reflections of the form $Q_i = I-v_i v_i^T$ where the vectors $v_i$ are so that they can be stored in the lower-triangular part of an underlying matrix object, whereas $R$ is stored in the upper triangular part.

    -

    The $v_i$ vectors and the $R$ matrix now are in conflict because they both want to use the diagonal entry of the matrix, but we can only store one in these positions, of course. Consequently, the entries $(v_i)_i$ are stored separately in the diagonal member variable.

    +

    The class does not in fact store the $Q$ and $R$ factors explicitly as matrices. It does store $R$, but the $Q$ factor is stored as the product of Householder reflections of the form $Q_i = I-v_i v_i^T$ where the vectors $v_i$ are so that they can be stored in the lower-triangular part of an underlying matrix object, whereas $R$ is stored in the upper triangular part.

    +

    The $v_i$ vectors and the $R$ matrix now are in conflict because they both want to use the diagonal entry of the matrix, but we can only store one in these positions, of course. Consequently, the entries $(v_i)_i$ are stored separately in the diagonal member variable.

    Note
    Instantiations for this template are provided for <float> and <double>; others can be generated in application programs (see the section on Template instantiations in the manual).

    Definition at line 80 of file householder.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classIdentityMatrix.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classIdentityMatrix.html 2024-01-30 03:04:40.256779806 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classIdentityMatrix.html 2024-01-30 03:04:40.256779806 +0000 @@ -134,13 +134,13 @@  

    Detailed Description

    -

    Implementation of a simple class representing the identity matrix of a given size, i.e. a matrix with entries $A_{ij}=\delta_{ij}$. While it has the most important ingredients of a matrix, in particular that one can ask for its size and perform matrix-vector products with it, a matrix of this type is really only useful in two contexts: preconditioning and initializing other matrices.

    +

    Implementation of a simple class representing the identity matrix of a given size, i.e. a matrix with entries $A_{ij}=\delta_{ij}$. While it has the most important ingredients of a matrix, in particular that one can ask for its size and perform matrix-vector products with it, a matrix of this type is really only useful in two contexts: preconditioning and initializing other matrices.

    Initialization

    The main usefulness of this class lies in its ability to initialize other matrix, like this:

    std_cxx20::type_identity< T > identity
    -

    This creates a $10\times 10$ matrix with ones on the diagonal and zeros everywhere else. Most matrix types, in particular FullMatrix and SparseMatrix, have conversion constructors and assignment operators for IdentityMatrix, and can therefore be filled rather easily with identity matrices.

    +

    This creates a $10\times 10$ matrix with ones on the diagonal and zeros everywhere else. Most matrix types, in particular FullMatrix and SparseMatrix, have conversion constructors and assignment operators for IdentityMatrix, and can therefore be filled rather easily with identity matrices.

    Preconditioning

    No preconditioning at all is equivalent to preconditioning with preconditioning with the identity matrix. deal.II has a specialized class for this purpose, PreconditionIdentity, than can be used in a context as shown in the documentation of that class. The present class can be used in much the same way, although without any additional benefit:

    SolverControl solver_control (1000, 1e-12);
    SolverCG<> cg (solver_control);
    /usr/share/doc/packages/dealii/doxygen/deal.II/classImplicitQR.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classImplicitQR.html 2024-01-30 03:04:40.280780006 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classImplicitQR.html 2024-01-30 03:04:40.280780006 +0000 @@ -168,7 +168,7 @@

    Detailed Description

    template<typename VectorType>
    -class ImplicitQR< VectorType >

    A class to obtain the triangular $R$ matrix of the $A=QR$ factorization together with the matrix $A$ itself. The orthonormal matrix $Q$ is not stored explicitly, the name of the class. The multiplication with $Q$ can be represented as $Q=A R^{-1}$, whereas the multiplication with $Q^T$ is given by $Q^T=R^{-T}A^T$.

    +class ImplicitQR< VectorType >

    A class to obtain the triangular $R$ matrix of the $A=QR$ factorization together with the matrix $A$ itself. The orthonormal matrix $Q$ is not stored explicitly, the name of the class. The multiplication with $Q$ can be represented as $Q=A R^{-1}$, whereas the multiplication with $Q^T$ is given by $Q^T=R^{-T}A^T$.

    The class is designed to update a given (possibly empty) QR factorization due to the addition of a new column vector. This is equivalent to constructing an orthonormal basis by the Gram-Schmidt procedure. The class also provides update functionality when the column is removed.

    The VectorType template argument may either be a parallel and serial vector, and only need to have basic operations such as additions, scalar product, etc. It also needs to have a copy-constructor.

    @@ -298,8 +298,8 @@

    Remove column and update QR factorization.

    -

    Starting from the given QR decomposition $QR= A = [a_1\,\dots a_n], \quad a_i \in R^m$ we aim at computing factorization of $\tilde Q \tilde R= \tilde A = [a_2\,\dots a_n], \quad a_i \in R^m$.

    -

    Note that $\tilde R^T \tilde R = \tilde A^T \tilde A$, where the RHS is included in $A^T A = R^T R$. Therefore $\tilde R$ can be obtained by Cholesky decomposition.

    +

    Starting from the given QR decomposition $QR= A = [a_1\,\dots a_n], \quad a_i \in R^m$ we aim at computing factorization of $\tilde Q \tilde R= \tilde A = [a_2\,\dots a_n], \quad a_i \in R^m$.

    +

    Note that $\tilde R^T \tilde R = \tilde A^T \tilde A$, where the RHS is included in $A^T A = R^T R$. Therefore $\tilde R$ can be obtained by Cholesky decomposition.

    Implements BaseQR< VectorType >.

    @@ -333,7 +333,7 @@
    -

    Set $y = Qx$. The size of $x$ should be consistent with the size of the R matrix.

    +

    Set $y = Qx$. The size of $x$ should be consistent with the size of the R matrix.

    Implements BaseQR< VectorType >.

    @@ -367,7 +367,7 @@
    -

    Set $y = Q^Tx$. The size of $x$ should be consistent with the size of column vectors.

    +

    Set $y = Q^Tx$. The size of $x$ should be consistent with the size of column vectors.

    Implements BaseQR< VectorType >.

    @@ -401,7 +401,7 @@
    -

    Set $y = QRx$. The size of $x$ should be consistent with the size of the R matrix.

    +

    Set $y = QRx$. The size of $x$ should be consistent with the size of the R matrix.

    Implements BaseQR< VectorType >.

    @@ -435,7 +435,7 @@
    -

    Set $y = R^T Q^Tx$. The size of $x$ should be consistent with the size of column vectors.

    +

    Set $y = R^T Q^Tx$. The size of $x$ should be consistent with the size of column vectors.

    Implements BaseQR< VectorType >.

    @@ -458,7 +458,7 @@

    Connect a slot to implement a custom check of linear dependency during addition of a column.

    -

    Here, u is the last column of the to-be R matrix, rho is its diagonal and col_norm_sqr is the square of the $l2$ norm of the column. The function should return true if the new column is linearly independent.

    +

    Here, u is the last column of the to-be R matrix, rho is its diagonal and col_norm_sqr is the square of the $l2$ norm of the column. The function should return true if the new column is linearly independent.

    @@ -486,7 +486,7 @@
    -

    Apply givens rotation in the (i,k)-plane to zero out $R(k,k)$.

    +

    Apply givens rotation in the (i,k)-plane to zero out $R(k,k)$.

    @@ -575,7 +575,7 @@
    -

    Solve $Rx=y$. Vectors x and y should be consistent with the current size of the subspace. If transpose is true, $R^Tx=y$ is solved instead.

    +

    Solve $Rx=y$. Vectors x and y should be consistent with the current size of the subspace. If transpose is true, $R^Tx=y$ is solved instead.

    @@ -636,7 +636,7 @@
    -

    Compute $y=Hx$ where $H$ is the matrix formed by the column vectors stored by this object.

    +

    Compute $y=Hx$ where $H$ is the matrix formed by the column vectors stored by this object.

    @@ -695,7 +695,7 @@

    Signal used to decide if the new column is linear dependent.

    -

    Here, u is the last column of the to-be R matrix, rho is its diagonal and col_norm_sqr is the square of the $l2$ norm of the column. The function should return true if the new column is linearly independent.

    +

    Here, u is the last column of the to-be R matrix, rho is its diagonal and col_norm_sqr is the square of the $l2$ norm of the column. The function should return true if the new column is linearly independent.

    Definition at line 430 of file qr.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classIndexSet.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classIndexSet.html 2024-01-30 03:04:40.320780339 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classIndexSet.html 2024-01-30 03:04:40.320780339 +0000 @@ -600,7 +600,7 @@
    -

    Add the half-open range $[\text{begin},\text{end})$ to the set of indices represented by this class.

    Parameters
    +

    Add the half-open range $[\text{begin},\text{end})$ to the set of indices represented by this class.

    Parameters
    @@ -808,7 +808,7 @@
    [in]beginThe first element of the range to be added.
    [in]endThe past-the-end element of the range to be added.
    -

    Return whether the IndexSets are ascending with respect to MPI process number and 1:1, i.e., each index is contained in exactly one IndexSet (among those stored on the different processes), each process stores contiguous subset of indices, and the index set on process $p+1$ starts at the index one larger than the last one stored on process $p$. In case there is only one MPI process, this just means that the IndexSet is complete.

    +

    Return whether the IndexSets are ascending with respect to MPI process number and 1:1, i.e., each index is contained in exactly one IndexSet (among those stored on the different processes), each process stores contiguous subset of indices, and the index set on process $p+1$ starts at the index one larger than the last one stored on process $p$. In case there is only one MPI process, this just means that the IndexSet is complete.

    Definition at line 878 of file index_set.cc.

    @@ -1118,8 +1118,8 @@
    -

    Remove all elements contained in other from this set. In other words, if $x$ is the current object and $o$ the argument, then we compute $x
-\leftarrow x \backslash o$.

    +

    Remove all elements contained in other from this set. In other words, if $x$ is the current object and $o$ the argument, then we compute $x
+\leftarrow x \backslash o$.

    Definition at line 268 of file index_set.cc.

    @@ -1819,7 +1819,7 @@
    -

    Create and return an index set of size $N$ that contains every single index within this range. In essence, this function returns an index set created by

    IndexSet is (N);
    +

    Create and return an index set of size $N$ that contains every single index within this range. In essence, this function returns an index set created by

    IndexSet is (N);
    is.add_range(0, N);

    This function exists so that one can create and initialize index sets that are complete in one step, or so one can write code like

    if (my_index_set == complete_index_set(my_index_set.size())
    /usr/share/doc/packages/dealii/doxygen/deal.II/classIndexSet_1_1IntervalAccessor.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classIndexSet_1_1IntervalAccessor.html 2024-01-30 03:04:40.344780539 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classIndexSet_1_1IntervalAccessor.html 2024-01-30 03:04:40.344780539 +0000 @@ -141,7 +141,7 @@  

    Detailed Description

    -

    Dereferencing an IntervalIterator will return a reference to an object of this type. It allows access to a contiguous interval $[a,b[$ (also called a range) of the IndexSet being iterated over.

    +

    Dereferencing an IntervalIterator will return a reference to an object of this type. It allows access to a contiguous interval $[a,b[$ (also called a range) of the IndexSet being iterated over.

    Definition at line 554 of file index_set.h.

    Constructor & Destructor Documentation

    /usr/share/doc/packages/dealii/doxygen/deal.II/classIndexSet_1_1IntervalIterator.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classIndexSet_1_1IntervalIterator.html 2024-01-30 03:04:40.364780706 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classIndexSet_1_1IntervalIterator.html 2024-01-30 03:04:40.364780706 +0000 @@ -139,7 +139,7 @@  

    Detailed Description

    -

    Class that represents an iterator pointing to a contiguous interval $[a,b[$ as returned by IndexSet::begin_interval().

    +

    Class that represents an iterator pointing to a contiguous interval $[a,b[$ as returned by IndexSet::begin_interval().

    Definition at line 644 of file index_set.h.

    Member Typedef Documentation

    /usr/share/doc/packages/dealii/doxygen/deal.II/classIntegratedLegendreSZ.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classIntegratedLegendreSZ.html 2024-01-30 03:04:40.400781005 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classIntegratedLegendreSZ.html 2024-01-30 03:04:40.400781005 +0000 @@ -1215,7 +1215,7 @@
    -

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    +

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    Definition at line 314 of file polynomial.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classKellyErrorEstimator.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classKellyErrorEstimator.html 2024-01-30 03:04:40.432781272 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classKellyErrorEstimator.html 2024-01-30 03:04:40.432781272 +0000 @@ -161,7 +161,7 @@ \eta^2 = \sum_K \eta_K^2 \]" src="form_2188.png"/>

    -

    so that $\eta \approx \|\nabla (u-u_h)\|$ for the Laplace equation. The functions of this class compute a vector of values that corresponds to $\eta_K$ (i.e., the square root of the quantity above).

    +

    so that $\eta \approx \|\nabla (u-u_h)\|$ for the Laplace equation. The functions of this class compute a vector of values that corresponds to $\eta_K$ (i.e., the square root of the quantity above).

    In the paper of Ainsworth $ c_F=\frac {h_K}{24} $, but this factor is a bit esoteric, stemming from interpolation estimates and stability constants which may hold for the Poisson problem, but may not hold for more general situations. Alternatively, we consider the case when $c_F=\frac {h_F}{2p_F}$, where $h_F$ is the diameter of the face and $p_F=max(p^+,p^-)$ is the maximum polynomial degree of adjacent elements; or $c_F=h_K$. The choice between these factors is done by means of the enumerator, provided as the last argument in all functions.

    To perform the integration, use is made of the FEFaceValues and FESubfaceValues classes. The integration is performed by looping over all cells and integrating over faces that are not yet treated. This way we avoid integration on faces twice, once for each time we visit one of the adjacent cells. In a second loop over all cells, we sum up the contributions of the faces (which are the integrated square of the jumps times some factor) of each cell and take the square root.

    The integration is done using a quadrature formula on the face provided by the caller of the estimate() functions declared by this class. For linear trial functions (FE_Q(1)), QGauss with two points or even the QMidpoint rule might actually suffice. For higher order elements, it is necessary to utilize higher order quadrature formulae with fe.degree+1 Gauss points.

    @@ -215,8 +215,8 @@ - /usr/share/doc/packages/dealii/doxygen/deal.II/classKellyErrorEstimator_3_011_00_01spacedim_01_4.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classKellyErrorEstimator_3_011_00_01spacedim_01_4.html 2024-01-30 03:04:40.464781539 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classKellyErrorEstimator_3_011_00_01spacedim_01_4.html 2024-01-30 03:04:40.464781539 +0000 @@ -163,8 +163,8 @@
    Enumerator
    cell_diameter_over_24 

    Kelly error estimator with the factor $\frac {h_K}{24}$.

    face_diameter_over_twice_max_degree 

    the boundary residual estimator with the factor $\frac {h_F}{2
-    max(p^+,p^-)}$.

    +
    face_diameter_over_twice_max_degree 

    the boundary residual estimator with the factor $\frac {h_F}{2
+    max(p^+,p^-)}$.

    cell_diameter 

    Kelly error estimator with the factor $h_K$.

    - /usr/share/doc/packages/dealii/doxygen/deal.II/classLAPACKFullMatrix.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classLAPACKFullMatrix.html 2024-01-30 03:04:40.544782205 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classLAPACKFullMatrix.html 2024-01-30 03:04:40.544782205 +0000 @@ -1126,7 +1126,7 @@
    Enumerator
    cell_diameter_over_24&#href_anchor"fielddoc">

    Kelly error estimator with the factor $\frac {h_K}{24}$.

    face_diameter_over_twice_max_degree 

    the boundary residual estimator with the factor $\frac {h_F}{2
-    max(p^+,p^-)}$.

    +
    face_diameter_over_twice_max_degree 

    the boundary residual estimator with the factor $\frac {h_F}{2
+    max(p^+,p^-)}$.

    cell_diameter 

    Kelly error estimator with the factor $h_K$.

    Return the dimension of the codomain (or range) space.

    -
    Note
    The matrix is of dimension $m \times n$.
    +
    Note
    The matrix is of dimension $m \times n$.

    Definition at line 1044 of file lapack_full_matrix.h.

    @@ -1157,7 +1157,7 @@

    Return the dimension of the domain space.

    -
    Note
    The matrix is of dimension $m \times n$.
    +
    Note
    The matrix is of dimension $m \times n$.

    Definition at line 1053 of file lapack_full_matrix.h.

    @@ -2327,9 +2327,9 @@
    -

    After a call to compute_eigenvalues(), this function returns the $n\times
-n$ matrix of (right) eigenvectors in a decomposition of the form $A V = V
-\Lambda$. Note that this function constructs the associated matrix on the fly, since LAPACK packs complex-conjugate eigenvalue/eigenvector pairs of real-valued matrices into a real-valued return matrix. This call only succeeds in case the respective flag right_eigenvectors in compute_eigenvalues() has been set to true.

    +

    After a call to compute_eigenvalues(), this function returns the $n\times
+n$ matrix of (right) eigenvectors in a decomposition of the form $A V = V
+\Lambda$. Note that this function constructs the associated matrix on the fly, since LAPACK packs complex-conjugate eigenvalue/eigenvector pairs of real-valued matrices into a real-valued return matrix. This call only succeeds in case the respective flag right_eigenvectors in compute_eigenvalues() has been set to true.

    Definition at line 2065 of file lapack_full_matrix.cc.

    @@ -3886,7 +3886,7 @@
    -

    The matrix $\mathbf U$ in the singular value decomposition $\mathbf U \cdot \mathbf S \cdot \mathbf V^T$.

    +

    The matrix $\mathbf U$ in the singular value decomposition $\mathbf U \cdot \mathbf S \cdot \mathbf V^T$.

    Definition at line 986 of file lapack_full_matrix.h.

    @@ -3913,7 +3913,7 @@
    -

    The matrix $\mathbf V^T$ in the singular value decomposition $\mathbf U \cdot \mathbf S \cdot \mathbf V^T$.

    +

    The matrix $\mathbf V^T$ in the singular value decomposition $\mathbf U \cdot \mathbf S \cdot \mathbf V^T$.

    Definition at line 992 of file lapack_full_matrix.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1CUDAWrappers_1_1Vector.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1CUDAWrappers_1_1Vector.html 2024-01-30 03:04:40.584782538 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1CUDAWrappers_1_1Vector.html 2024-01-30 03:04:40.584782538 +0000 @@ -1103,7 +1103,7 @@
    -

    Return the square of the $l_2$-norm.

    +

    Return the square of the $l_2$-norm.

    Definition at line 544 of file cuda_vector.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1ReadWriteVector.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1ReadWriteVector.html 2024-01-30 03:04:40.640783005 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1ReadWriteVector.html 2024-01-30 03:04:40.640783005 +0000 @@ -314,7 +314,7 @@

    Detailed Description

    template<typename Number>
    -class LinearAlgebra::ReadWriteVector< Number >

    ReadWriteVector is intended to represent vectors in ${\mathbb R}^N$ for which it stores all or a subset of elements. The latter case in important in parallel computations, where $N$ may be so large that no processor can actually all elements of a solution vector, but where this is also not necessary: one typically only has to store the values of degrees of freedom that live on cells that are locally owned plus potentially those degrees of freedom that live on ghost cells.

    +class LinearAlgebra::ReadWriteVector< Number >

    ReadWriteVector is intended to represent vectors in ${\mathbb R}^N$ for which it stores all or a subset of elements. The latter case in important in parallel computations, where $N$ may be so large that no processor can actually all elements of a solution vector, but where this is also not necessary: one typically only has to store the values of degrees of freedom that live on cells that are locally owned plus potentially those degrees of freedom that live on ghost cells.

    This class allows to access individual elements to be read or written. However, it does not allow global operations such as taking the norm. ReadWriteVector can be used to read and write elements in vectors derived from VectorSpaceVector such as TrilinosWrappers::MPI::Vector and PETScWrappers::MPI::Vector.

    Storing elements

    Most of the time, one will simply read from or write into a vector of the current class using the global numbers of these degrees of freedom. This is done using operator()() or operator[]() which call global_to_local() to transform the global index into a local one. In such cases, it is clear that one can only access elements of the vector that the current object indeed stores.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1distributed_1_1BlockVector.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1distributed_1_1BlockVector.html 2024-01-30 03:04:40.712783605 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1distributed_1_1BlockVector.html 2024-01-30 03:04:40.716783638 +0000 @@ -1402,7 +1402,7 @@
    -

    $l_p$-norm of the vector. The pth root of the sum of the pth powers of the absolute values of the elements.

    +

    $l_p$-norm of the vector. The pth root of the sum of the pth powers of the absolute values of the elements.

    @@ -1704,7 +1704,7 @@ const bool symmetric = false&#href_anchor"memdoc"> -

    Calculate the scalar product between each block of this vector and V and store the result in a full matrix matrix. This function computes the result by forming $A_{ij}=U_i \cdot V_j$ where $U_i$ and $V_j$ indicate the $i$th block (not element!) of $U$ and the $j$th block of $V$, respectively. If symmetric is true, it is assumed that inner product results in a square symmetric matrix and almost half of the scalar products can be avoided.

    +

    Calculate the scalar product between each block of this vector and V and store the result in a full matrix matrix. This function computes the result by forming $A_{ij}=U_i \cdot V_j$ where $U_i$ and $V_j$ indicate the $i$th block (not element!) of $U$ and the $j$th block of $V$, respectively. If symmetric is true, it is assumed that inner product results in a square symmetric matrix and almost half of the scalar products can be avoided.

    Obviously, this function can only be used if all blocks of both vectors are of the same size.

    Note
    Internally, a single global reduction will be called to accumulate scalar product between locally owned degrees of freedom.
    @@ -1734,7 +1734,7 @@ const bool symmetric = false&#href_anchor"memdoc"> -

    Calculate the scalar product between each block of this vector and V using a metric tensor matrix. This function computes the result of $ \sum_{ij} A^{ij} U_i \cdot V_j$ where $U_i$ and $V_j$ indicate the $i$th block (not element) of $U$ and the $j$th block of $V$, respectively. If symmetric is true, it is assumed that $U_i \cdot V_j$ and $A^{ij}$ are symmetric matrices and almost half of the scalar products can be avoided.

    +

    Calculate the scalar product between each block of this vector and V using a metric tensor matrix. This function computes the result of $ \sum_{ij} A^{ij} U_i \cdot V_j$ where $U_i$ and $V_j$ indicate the $i$th block (not element) of $U$ and the $j$th block of $V$, respectively. If symmetric is true, it is assumed that $U_i \cdot V_j$ and $A^{ij}$ are symmetric matrices and almost half of the scalar products can be avoided.

    Obviously, this function can only be used if all blocks of both vectors are of the same size.

    Note
    Internally, a single global reduction will be called to accumulate the scalar product between locally owned degrees of freedom.
    @@ -1769,7 +1769,7 @@ const Number b = Number(1.)&#href_anchor"memdoc"> -

    Set each block of this vector as follows: $V^i = s V^i + b \sum_{j} U_j A^{ji}$ where $V^i$ and $U_j$ indicate the $i$th block (not element) of $V$ and the $j$th block of $U$, respectively.

    +

    Set each block of this vector as follows: $V^i = s V^i + b \sum_{j} U_j A^{ji}$ where $V^i$ and $U_j$ indicate the $i$th block (not element) of $V$ and the $j$th block of $U$, respectively.

    Obviously, this function can only be used if all blocks of both vectors are of the same size.

    @@ -2055,7 +2055,7 @@
    -

    Return the $l_2$ norm of the vector (i.e., the square root of the sum of the square of all entries among all processors).

    +

    Return the $l_2$ norm of the vector (i.e., the square root of the sum of the square of all entries among all processors).

    Implements LinearAlgebra::VectorSpaceVector< Number >.

    @@ -2077,7 +2077,7 @@
    -

    Return the square of the $l_2$ norm of the vector.

    +

    Return the square of the $l_2$ norm of the vector.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1distributed_1_1Vector.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1distributed_1_1Vector.html 2024-01-30 03:04:40.792784271 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1distributed_1_1Vector.html 2024-01-30 03:04:40.792784271 +0000 @@ -1034,7 +1034,7 @@ const MPI_Comm comm_sm = MPI_COMM_SELF&#href_anchor"memdoc">

    Initialize vector with local_size locally-owned and ghost_size ghost degrees of freedoms.

    The optional argument comm_sm, which consists of processes on the same shared-memory domain, allows users have read-only access to both locally-owned and ghost values of processes combined in the shared-memory communicator. See the general documentation of this class for more information about this argument.

    -
    Note
    In the created underlying partitioner, the local index range is translated to global indices in an ascending and one-to-one fashion, i.e., the indices of process $p$ sit exactly between the indices of the processes $p-1$ and $p+1$, respectively. Setting the ghost_size variable to an appropriate value provides memory space for the ghost data in a vector's memory allocation as and allows access to it via local_element(). However, the associated global indices must be handled externally in this case.
    +
    Note
    In the created underlying partitioner, the local index range is translated to global indices in an ascending and one-to-one fashion, i.e., the indices of process $p$ sit exactly between the indices of the processes $p-1$ and $p+1$, respectively. Setting the ghost_size variable to an appropriate value provides memory space for the ghost data in a vector's memory allocation as and allows access to it via local_element(). However, the associated global indices must be handled externally in this case.
    @@ -1199,7 +1199,7 @@

    Initiates communication for the compress() function with non- blocking communication. This function does not wait for the transfer to finish, in order to allow for other computations during the time it takes until all data arrives.

    Before the data is actually exchanged, the function must be followed by a call to compress_finish().

    -

    In case this function is called for more than one vector before compress_finish() is invoked, it is mandatory to specify a unique communication channel to each such call, in order to avoid several messages with the same ID that will corrupt this operation. Any communication channel less than 100 is a valid value (in particular, the range $[100, 200)$ is reserved for LinearAlgebra::distributed::BlockVector).

    +

    In case this function is called for more than one vector before compress_finish() is invoked, it is mandatory to specify a unique communication channel to each such call, in order to avoid several messages with the same ID that will corrupt this operation. Any communication channel less than 100 is a valid value (in particular, the range $[100, 200)$ is reserved for LinearAlgebra::distributed::BlockVector).

    @@ -1244,7 +1244,7 @@

    Initiates communication for the update_ghost_values() function with non-blocking communication. This function does not wait for the transfer to finish, in order to allow for other computations during the time it takes until all data arrives.

    Before the data is actually exchanged, the function must be followed by a call to update_ghost_values_finish().

    -

    In case this function is called for more than one vector before update_ghost_values_finish() is invoked, it is mandatory to specify a unique communication channel to each such call, in order to avoid several messages with the same ID that will corrupt this operation. Any communication channel less than 100 is a valid value (in particular, the range $[100, 200)$ is reserved for LinearAlgebra::distributed::BlockVector).

    +

    In case this function is called for more than one vector before update_ghost_values_finish() is invoked, it is mandatory to specify a unique communication channel to each such call, in order to avoid several messages with the same ID that will corrupt this operation. Any communication channel less than 100 is a valid value (in particular, the range $[100, 200)$ is reserved for LinearAlgebra::distributed::BlockVector).

    @@ -1953,7 +1953,7 @@
    -

    Return the $l_2$ norm of the vector (i.e., the square root of the sum of the square of all entries among all processors).

    +

    Return the $l_2$ norm of the vector (i.e., the square root of the sum of the square of all entries among all processors).

    Implements LinearAlgebra::VectorSpaceVector< Number >.

    @@ -1975,7 +1975,7 @@
    -

    Return the square of the $l_2$ norm of the vector.

    +

    Return the square of the $l_2$ norm of the vector.

    @@ -2720,7 +2720,7 @@
    -

    $l_p$-norm of the vector. The pth root of the sum of the pth powers of the absolute values of the elements.

    +

    $l_p$-norm of the vector. The pth root of the sum of the pth powers of the absolute values of the elements.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classLinearIndexIterator.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classLinearIndexIterator.html 2024-01-30 03:04:40.824784539 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classLinearIndexIterator.html 2024-01-30 03:04:40.824784539 +0000 @@ -173,7 +173,7 @@ std::enable_if_t< std::is_convertible< OtherIterator, DerivedIterator >::value, bool >&#href_anchor"memTemplItemRight" valign="bottom">operator!= (const LinearIndexIterator &left, const OtherIterator &right) &#href_anchor"details" id="details">

    Detailed Description

    template<class DerivedIterator, class AccessorType>
    -class LinearIndexIterator< DerivedIterator, AccessorType >

    Many classes in deal.II, such as FullMatrix, TransposeTable, and SparseMatrix, store their data in contiguous buffers (though the interpretation of what the elements of these buffers represent can, of course, be complex). For example, FullMatrix and TransposeTable store their data in row major and column major order respectively, whereas for SparseMatrix the mapping from buffer location to matrix entry $\mathbf{A}(i, j)$ is more complicated. In any case, however, the contiguous arrangements of elements enables random access iteration.

    +class LinearIndexIterator< DerivedIterator, AccessorType >

    Many classes in deal.II, such as FullMatrix, TransposeTable, and SparseMatrix, store their data in contiguous buffers (though the interpretation of what the elements of these buffers represent can, of course, be complex). For example, FullMatrix and TransposeTable store their data in row major and column major order respectively, whereas for SparseMatrix the mapping from buffer location to matrix entry $\mathbf{A}(i, j)$ is more complicated. In any case, however, the contiguous arrangements of elements enables random access iteration.

    LinearIndexIterator provides most of the functionality needed to write iterators for these classes. LinearIndexIterator is essentially a simplified version of boost::iterator_facade that assumes AccessorType provides certain members (documented below) that completely describe the state of the iterator. The intended use of this class is for containers to define their own accessor classes and then use the curiously recurring template pattern (CRTP) technique to define their iterators. For example, here is a container that uses LinearIndexIterator to define its own iterator classes:

    template <typename T>
    class Container
    /usr/share/doc/packages/dealii/doxygen/deal.II/classLinearOperator.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classLinearOperator.html 2024-01-30 03:04:40.852784771 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classLinearOperator.html 2024-01-30 03:04:40.852784771 +0000 @@ -224,7 +224,7 @@
    std::function< void(Range &v, bool omit_zeroing_entries) reinit_range_vector)

    that store the knowledge how to initialize (resize + internal data structures) an arbitrary vector of the Range and Domain space.

    The primary purpose of this class is to provide syntactic sugar for complex matrix-vector operations and free the user from having to create, set up and handle intermediate storage locations by hand.

    -

    As an example consider the operation $(A+k\,B)\,C$, where $A$, $B$ and $C$ denote (possible different) matrices. In order to construct a LinearOperator op that stores the knowledge of this operation, one can write:

    +

    As an example consider the operation $(A+k\,B)\,C$, where $A$, $B$ and $C$ denote (possible different) matrices. In order to construct a LinearOperator op that stores the knowledge of this operation, one can write:

    #href_anchor"code" href="linear__operator__tools_8h.html">deal.II/lac/linear_operator_tools.h>
    @@ -240,7 +240,7 @@
    LinearOperator< Range, Domain, Payload > linear_operator(const Matrix &matrix)
    -
    Note
    This class makes heavy use of std::function objects and lambda functions. This flexibility comes with a run-time penalty. Only use this object to encapsulate matrix object of medium to large size (as a rule of thumb, sparse matrices with a size $1000\times1000$, or larger).
    +
    Note
    This class makes heavy use of std::function objects and lambda functions. This flexibility comes with a run-time penalty. Only use this object to encapsulate matrix object of medium to large size (as a rule of thumb, sparse matrices with a size $1000\times1000$, or larger).
    In order to use Trilinos or PETSc sparse matrices and preconditioners in conjunction with the LinearOperator class, it is necessary to extend the functionality of the LinearOperator class by means of an additional Payload.

    For example: LinearOperator instances representing matrix inverses usually require calling some linear solver. These solvers may not have interfaces to the LinearOperator (which, for example, may represent a composite operation). The TrilinosWrappers::internal::LinearOperatorImplementation::TrilinosPayload therefore provides an interface extension to the LinearOperator so that it can be passed to the solver and used by the solver as if it were a Trilinos operator. This implies that all of the necessary functionality of the specific Trilinos operator has been overloaded within the Payload class. This includes operator-vector multiplication and inverse operator-vector multiplication, where the operator can be either a TrilinosWrappers::SparseMatrix or a TrilinosWrappers::PreconditionBase and the vector is a native Trilinos vector.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classManifold.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classManifold.html 2024-01-30 03:04:40.892785104 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classManifold.html 2024-01-30 03:04:40.892785104 +0000 @@ -204,11 +204,11 @@

    In the most essential use of manifolds, manifold descriptions are used to create a "point between other points". For example, when a triangulation creates a new vertex on a cell, face, or edge, it determines the new vertex' coordinates through the following function call:

    ...
    Point<spacedim> new_vertex = manifold.get_new_point (points,weights);
    ...
    -

    Here, points is a collection of points in spacedim dimension, and a collection of corresponding weights. The points in this context will then be the vertices of the cell, face, or edge, and the weights are typically one over the number of points when a new midpoint of the cell, face, or edge is needed. Derived classes then will implement the Manifold::get_new_point() function in a way that computes the location of this new point. In the simplest case, for example in the FlatManifold class, the function simply computes the arithmetic average (with given weights) of the given points. However, other classes do something differently; for example, the SphericalManifold class, which is used to describe domains that form (part of) the sphere, will ensure that, given the two vertices of an edge at the boundary, the new returned point will lie on the grand circle that connects the two points, rather than choosing a point that is half-way between the two points in ${\mathbb R}^d$.

    +

    Here, points is a collection of points in spacedim dimension, and a collection of corresponding weights. The points in this context will then be the vertices of the cell, face, or edge, and the weights are typically one over the number of points when a new midpoint of the cell, face, or edge is needed. Derived classes then will implement the Manifold::get_new_point() function in a way that computes the location of this new point. In the simplest case, for example in the FlatManifold class, the function simply computes the arithmetic average (with given weights) of the given points. However, other classes do something differently; for example, the SphericalManifold class, which is used to describe domains that form (part of) the sphere, will ensure that, given the two vertices of an edge at the boundary, the new returned point will lie on the grand circle that connects the two points, rather than choosing a point that is half-way between the two points in ${\mathbb R}^d$.

    Note
    Unlike almost all other cases in the library, we here interpret the points to be in real space, not on the reference cell.

    Manifold::get_new_point() has a default implementation that can simplify this process somewhat: Internally, the function calls the Manifold::get_intermediate_point() to compute pair-wise intermediate points. Internally the Manifold::get_intermediate_point() calls the Manifold::project_to_manifold() function after computing the convex combination of the given points. This allows derived classes to only overload Manifold::project_to_manifold() for simple situations. This is often useful when describing manifolds that are embedded in higher dimensional space, e.g., the surface of a sphere. In those cases, the desired new point may be computed simply by the (weighted) average of the provided points, projected back out onto the sphere.

    Common use case: Computing tangent vectors

    -

    The second use of this class is in computing directions on domains and boundaries. For example, we may need to compute the normal vector to a face in order to impose the no-flow boundary condition $\mathbf u \cdot \mathbf n = 0$ (see the VectorTools::compute_no_normal_flux_constraints() as an example). Similarly, we may need normal vectors in the computation of the normal component of the gradient of the numerical solution in order to compute the jump in the gradient of the solution in error estimators (see, for example, the KellyErrorEstimator class).

    +

    The second use of this class is in computing directions on domains and boundaries. For example, we may need to compute the normal vector to a face in order to impose the no-flow boundary condition $\mathbf u \cdot \mathbf n = 0$ (see the VectorTools::compute_no_normal_flux_constraints() as an example). Similarly, we may need normal vectors in the computation of the normal component of the gradient of the numerical solution in order to compute the jump in the gradient of the solution in error estimators (see, for example, the KellyErrorEstimator class).

    To make this possible, the Manifold class provides a member function (to be implemented by derived classes) that computes a "vector tangent to the manifold at one point, in direction of another point" via the Manifold::get_tangent_vector() function. For example, in 2d, one would use this function with the two vertices of an edge at the boundary to compute a "tangential" vector along the edge, and then get the normal vector by rotation by 90 degrees. In 3d, one would compute the two vectors "tangential" to the two edges of a boundary face adjacent to a boundary vertex, and then take the cross product of these two to obtain a vector normal to the boundary.

    For reasons that are more difficult to understand, these direction vectors are normalized in a very specific way, rather than to have unit norm. See the documentation of Manifold::get_tangent_vector(), as well as below, for more information.

    @@ -216,11 +216,11 @@

    A unified description

    The "real" way to understand what this class does is to see it in the framework of differential geometry. More specifically, differential geometry is fundamentally based on the assumption that two sufficiently close points are connected via a line of "shortest distance". This line is called a "geodesic", and it is selected from all other lines that connect the two points by the property that it is shortest if distances are measured in terms of the "metric" that describes a manifold. To give examples, recall that the geodesics of a flat manifold (implemented in the FlatManifold class) are simply the straight lines connecting two points, whereas for spherical manifolds (see the SphericalManifold class) geodesics between two points of same distance are the grand circles, and are in general curved lines when connecting two lines of different distance from the origin.

    In the following discussion, and for the purposes of implementing the current class, the concept of "metrics" that is so fundamental to differential geometry is no longer of great importance to us. Rather, everything can simply be described by postulating the existence of geodesics connecting points on a manifold.

    -

    Given geodesics, the operations discussed in the previous two sections can be described in a more formal way. In essence, they rely on the fact that we can assume that a geodesic is parameterized by a "time" like variable $t$ so that $\mathbf s(t)$ describes the curve and so that $\mathbf s(0)$ is the location of the first and $\mathbf s(1)$ the location of the second point. Furthermore, $\mathbf s(t)$ traces out the geodesic at constant speed, covering equal distance in equal time (as measured by the metric). Note that this parameterization uses time, not arc length to denote progress along the geodesic.

    -

    In this picture, computing a mid-point between points $\mathbf x_1$ and $\mathbf x_2$, with weights $w_1$ and $w_2=1-w_1$, simply requires computing the point $\mathbf s(w_1)$. Computing a new point as a weighted average of more than two points can be done by considering pairwise geodesics, finding suitable points on the geodetic between the first two points, then on the geodetic between this new point and the third given point, etc.

    -

    Likewise, the "tangential" vector described above is simply the velocity vector, $\mathbf s'(t)$, evaluated at one of the end points of a geodesic (i.e., at $t=0$ or $t=1$). In the case of a flat manifold, the geodesic is simply the straight line connecting two points, and the velocity vector is just the connecting vector in that case. On the other hand, for two points on a spherical manifold, the geodesic is a grand circle, and the velocity vector is tangent to the spherical surface.

    -

    Note that if we wanted to, we could use this to compute the length of the geodesic that connects two points $\mathbf x_1$ and $\mathbf x_2$ by computing $\int_0^1 \|\mathbf s'(t)\| dt$ along the geodesic that connects them, but this operation will not be of use to us in practice. One could also conceive computing the direction vector using the "new point" operation above, using the formula $\mathbf s'(0)=\lim_{w\rightarrow 0}
-\frac{\mathbf s(w)-\mathbf s(0)}{w}$ where all we need to do is compute the new point $\mathbf s(w)$ with weights $w$ and $1-w$ along the geodesic connecting $\mathbf x_1$ and $\mathbf x_2$. The default implementation of the function does this, by evaluating the quotient for a small but finite weight $w$. In practice, however, it is almost always possible to explicitly compute the direction vector, i.e., without the need to numerically approximate the limit process, and derived classes should do so.

    +

    Given geodesics, the operations discussed in the previous two sections can be described in a more formal way. In essence, they rely on the fact that we can assume that a geodesic is parameterized by a "time" like variable $t$ so that $\mathbf s(t)$ describes the curve and so that $\mathbf s(0)$ is the location of the first and $\mathbf s(1)$ the location of the second point. Furthermore, $\mathbf s(t)$ traces out the geodesic at constant speed, covering equal distance in equal time (as measured by the metric). Note that this parameterization uses time, not arc length to denote progress along the geodesic.

    +

    In this picture, computing a mid-point between points $\mathbf x_1$ and $\mathbf x_2$, with weights $w_1$ and $w_2=1-w_1$, simply requires computing the point $\mathbf s(w_1)$. Computing a new point as a weighted average of more than two points can be done by considering pairwise geodesics, finding suitable points on the geodetic between the first two points, then on the geodetic between this new point and the third given point, etc.

    +

    Likewise, the "tangential" vector described above is simply the velocity vector, $\mathbf s'(t)$, evaluated at one of the end points of a geodesic (i.e., at $t=0$ or $t=1$). In the case of a flat manifold, the geodesic is simply the straight line connecting two points, and the velocity vector is just the connecting vector in that case. On the other hand, for two points on a spherical manifold, the geodesic is a grand circle, and the velocity vector is tangent to the spherical surface.

    +

    Note that if we wanted to, we could use this to compute the length of the geodesic that connects two points $\mathbf x_1$ and $\mathbf x_2$ by computing $\int_0^1 \|\mathbf s'(t)\| dt$ along the geodesic that connects them, but this operation will not be of use to us in practice. One could also conceive computing the direction vector using the "new point" operation above, using the formula $\mathbf s'(0)=\lim_{w\rightarrow 0}
+\frac{\mathbf s(w)-\mathbf s(0)}{w}$ where all we need to do is compute the new point $\mathbf s(w)$ with weights $w$ and $1-w$ along the geodesic connecting $\mathbf x_1$ and $\mathbf x_2$. The default implementation of the function does this, by evaluating the quotient for a small but finite weight $w$. In practice, however, it is almost always possible to explicitly compute the direction vector, i.e., without the need to numerically approximate the limit process, and derived classes should do so.

    Definition at line 286 of file manifold.h.

    Member Typedef Documentation

    @@ -648,11 +648,11 @@
    -

    Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. The geodesic is the shortest line between these two points, where "shortest" is defined via a metric specific to a particular implementation of this class in a derived class. For example, in the case of a FlatManifold, the shortest line between two points is just the straight line, and in this case the tangent vector is just the difference $\mathbf d=\mathbf
-x_2-\mathbf x_1$. On the other hand, for a manifold that describes a surface embedded in a higher dimensional space (e.g., the surface of a sphere), then the tangent vector is tangential to the surface, and consequently may point in a different direction than the straight line that connects the two points.

    -

    While tangent vectors are often normalized to unit length, the vectors returned by this function are normalized as described in the introduction of this class. Specifically, if $\mathbf s(t)$ traces out the geodesic between the two points where $\mathbf x_1 = \mathbf s(0)$ and $\mathbf x_2 = \mathbf s(1)$, then the returned vector must equal $\mathbf s'(0)$. In other words, the norm of the returned vector also encodes, in some sense, the length of the geodesic because a curve $\mathbf s(t)$ must move "faster" if the two points it connects between arguments $t=0$ and $t=1$ are farther apart.

    -

    The default implementation of this function approximates $\mathbf s'(0) \approx \frac{\mathbf s(\epsilon)-\mathbf x_1}{\epsilon}$ for a small value of $\epsilon$, and the evaluation of $\mathbf
-s(\epsilon)$ is done by calling get_new_point(). If possible, derived classes should override this function by an implementation of the exact derivative.

    +

    Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. The geodesic is the shortest line between these two points, where "shortest" is defined via a metric specific to a particular implementation of this class in a derived class. For example, in the case of a FlatManifold, the shortest line between two points is just the straight line, and in this case the tangent vector is just the difference $\mathbf d=\mathbf
+x_2-\mathbf x_1$. On the other hand, for a manifold that describes a surface embedded in a higher dimensional space (e.g., the surface of a sphere), then the tangent vector is tangential to the surface, and consequently may point in a different direction than the straight line that connects the two points.

    +

    While tangent vectors are often normalized to unit length, the vectors returned by this function are normalized as described in the introduction of this class. Specifically, if $\mathbf s(t)$ traces out the geodesic between the two points where $\mathbf x_1 = \mathbf s(0)$ and $\mathbf x_2 = \mathbf s(1)$, then the returned vector must equal $\mathbf s'(0)$. In other words, the norm of the returned vector also encodes, in some sense, the length of the geodesic because a curve $\mathbf s(t)$ must move "faster" if the two points it connects between arguments $t=0$ and $t=1$ are farther apart.

    +

    The default implementation of this function approximates $\mathbf s'(0) \approx \frac{\mathbf s(\epsilon)-\mathbf x_1}{\epsilon}$ for a small value of $\epsilon$, and the evaluation of $\mathbf
+s(\epsilon)$ is done by calling get_new_point(). If possible, derived classes should override this function by an implementation of the exact derivative.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classMapping.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classMapping.html 2024-01-30 03:04:40.948785571 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classMapping.html 2024-01-30 03:04:40.952785604 +0000 @@ -229,84 +229,84 @@ class Mapping< dim, spacedim >

    Abstract base class for mapping classes.

    This class declares the interface for the functionality to describe mappings from the reference (unit) cell to a cell in real space, as well as for filling the information necessary to use the FEValues, FEFaceValues, and FESubfaceValues classes. Concrete implementations of these interfaces are provided in derived classes.

    Mathematics of the mapping

    -

    The mapping is a transformation $\mathbf x = \mathbf F_K(\hat{\mathbf  x})$ which maps points $\hat{\mathbf x}$ in the reference cell $[0,1]^\text{dim}$ to points $\mathbf x$ in the actual grid cell $K\subset{\mathbb R}^\text{spacedim}$. Many of the applications of such mappings require the Jacobian of this mapping, $J(\hat{\mathbf x}) =
-\hat\nabla {\mathbf F}_K(\hat{\mathbf  x})$. For instance, if dim=spacedim=2, we have

    -\[
+<p>The mapping is a transformation <picture><source srcset=$\mathbf x = \mathbf F_K(\hat{\mathbf  x})$ which maps points $\hat{\mathbf x}$ in the reference cell $[0,1]^\text{dim}$ to points $\mathbf x$ in the actual grid cell $K\subset{\mathbb R}^\text{spacedim}$. Many of the applications of such mappings require the Jacobian of this mapping, $J(\hat{\mathbf x}) =
+\hat\nabla {\mathbf F}_K(\hat{\mathbf  x})$. For instance, if dim=spacedim=2, we have

    +\[
 J(\hat{\mathbf  x}) = \left(\begin{matrix}
 \frac{\partial x}{\partial \hat x} & \frac{\partial x}{\partial \hat y}
 \\
 \frac{\partial y}{\partial \hat x} & \frac{\partial y}{\partial \hat y}
 \end{matrix}\right)
-\] +\]" src="form_1265.png"/>

    Mapping of scalar functions

    The shape functions of scalar finite elements are typically defined on a reference cell and are then simply mapped according to the rule

    -\[
+<picture><source srcset=\[
 \varphi(\mathbf x) = \varphi\bigl(\mathbf F_K(\hat{\mathbf  x})\bigr)
 = \hat \varphi(\hat{\mathbf  x}).
-\] +\]" src="form_1266.png"/>

    Mapping of integrals

    -

    Using simply a change of variables, integrals of scalar functions over a cell $K$ can be expressed as an integral over the reference cell $\hat K$. Specifically, The volume form $d\hat x$ is transformed so that

    -\[
+<p>Using simply a change of variables, integrals of scalar functions over a cell <picture><source srcset=$K$ can be expressed as an integral over the reference cell $\hat K$. Specifically, The volume form $d\hat x$ is transformed so that

    +\[
  \int_K u(\mathbf x)\,dx = \int_{\hat K} \hat
 u(\hat{\mathbf  x}) \left|\text{det}J(\hat{\mathbf  x})\right|
 \,d\hat x.
-\] +\]" src="form_1268.png"/>

    In expressions where such integrals are approximated by quadrature, this then leads to terms of the form

    -\[
+<picture><source srcset=\[
  \int_K u(\mathbf x)\,dx
  \approx
  \sum_{q}
  \hat u(\hat{\mathbf  x}_q)
  \underbrace{\left|\text{det}J(\hat{\mathbf  x}_q)\right| w_q}_{=:
 \text{JxW}_q}.
-\] +\]" src="form_1269.png"/>

    -

    Here, the weights $\text{JxW}_q$ of each quadrature point (where JxW mnemonically stands for Jacobian times Quadrature Weights) take the role of the $dx$ in the original integral. Consequently, they appear in all code that computes integrals approximated by quadrature, and are accessed by FEValues::JxW().

    +

    Here, the weights $\text{JxW}_q$ of each quadrature point (where JxW mnemonically stands for Jacobian times Quadrature Weights) take the role of the $dx$ in the original integral. Consequently, they appear in all code that computes integrals approximated by quadrature, and are accessed by FEValues::JxW().

    Todo
    Document what happens in the codimension-1 case.

    Mapping of vector fields, differential forms and gradients of vector fields

    The transformation of vector fields or differential forms (gradients of scalar functions) $\mathbf v$, and gradients of vector fields $\mathbf T$ follows the general form

    -\[
+<picture><source srcset=\[
 \mathbf v(\mathbf x) = \mathbf A(\hat{\mathbf  x})
 \hat{\mathbf  v}(\hat{\mathbf  x}),
 \qquad
 \mathbf T(\mathbf x) = \mathbf A(\hat{\mathbf  x})
 \hat{\mathbf  T}(\hat{\mathbf  x}) \mathbf B(\hat{\mathbf  x}).
-\] +\]" src="form_1272.png"/>

    The differential forms A and B are determined by the kind of object being transformed. These transformations are performed through the transform() functions, and the type of object being transformed is specified by their MappingKind argument. See the documentation there for possible choices.

    Derivatives of the mapping

    -

    Some applications require the derivatives of the mapping, of which the first order derivative is the mapping Jacobian, $J_{iJ}(\hat{\mathbf
-x})=\frac{\partial x_i}{\partial \hat x_J}$, described above. Higher order derivatives of the mapping are similarly defined, for example the Jacobian derivative, $\hat H_{iJK}(\hat{\mathbf  x}) = \frac{\partial^2
-x_i}{\partial \hat x_J \partial \hat x_K}$, and the Jacobian second derivative, $\hat K_{iJKL}(\hat{\mathbf  x}) = \frac{\partial^3
-x_i}{\partial \hat x_J \partial \hat x_K \partial \hat x_L}$. It is also useful to define the "pushed-forward" versions of the higher order derivatives: the Jacobian pushed-forward derivative, $H_{ijk}(\hat{\mathbf
+<p>Some applications require the derivatives of the mapping, of which the first order derivative is the mapping Jacobian, <picture><source srcset=$J_{iJ}(\hat{\mathbf
+x})=\frac{\partial x_i}{\partial \hat x_J}$, described above. Higher order derivatives of the mapping are similarly defined, for example the Jacobian derivative, $\hat H_{iJK}(\hat{\mathbf  x}) = \frac{\partial^2
+x_i}{\partial \hat x_J \partial \hat x_K}$, and the Jacobian second derivative, $\hat K_{iJKL}(\hat{\mathbf  x}) = \frac{\partial^3
+x_i}{\partial \hat x_J \partial \hat x_K \partial \hat x_L}$. It is also useful to define the "pushed-forward" versions of the higher order derivatives: the Jacobian pushed-forward derivative, $H_{ijk}(\hat{\mathbf
 x}) = \frac{\partial^2 x_i}{\partial \hat x_J \partial \hat
-x_K}(J_{jJ})^{-1}(J_{kK})^{-1}$, and the Jacobian pushed-forward second derivative, $K_{ijkl}(\hat{\mathbf  x}) = \frac{\partial^3 x_i}{\partial
+x_K}(J_{jJ})^{-1}(J_{kK})^{-1}$, and the Jacobian pushed-forward second derivative, $K_{ijkl}(\hat{\mathbf  x}) = \frac{\partial^3 x_i}{\partial
 \hat x_J \partial \hat x_K \partial \hat
-x_L}(J_{jJ})^{-1}(J_{kK})^{-1}(J_{lL})^{-1}$. These pushed-forward versions can be used to compute the higher order derivatives of functions defined on the reference cell with respect to the real cell coordinates. For instance, the Jacobian derivative with respect to the real cell coordinates is given by:

    +x_L}(J_{jJ})^{-1}(J_{kK})^{-1}(J_{lL})^{-1}$" src="form_1277.png"/>. These pushed-forward versions can be used to compute the higher order derivatives of functions defined on the reference cell with respect to the real cell coordinates. For instance, the Jacobian derivative with respect to the real cell coordinates is given by:

    -\[
+<picture><source srcset=\[
 \frac{\partial}{\partial x_j}\left[J_{iJ}(\hat{\mathbf  x})\right] =
 H_{ikn}(\hat{\mathbf  x})J_{nJ}(\hat{\mathbf  x}),
-\] +\]" src="form_1278.png"/>

    and the derivative of the Jacobian inverse with respect to the real cell coordinates is similarly given by:

    -\[
+<picture><source srcset=\[
 \frac{\partial}{\partial x_j}\left[\left(J_{iJ}(\hat{\mathbf
 x})\right)^{-1}\right] = -H_{nik}(\hat{\mathbf  x})\left(J_{nJ}(\hat{\mathbf
 x})\right)^{-1}.
-\] +\]" src="form_1279.png"/>

    In a similar fashion, higher order derivatives, with respect to the real cell coordinates, of functions defined on the reference cell can be defined using the Jacobian pushed-forward higher-order derivatives. For example, the derivative, with respect to the real cell coordinates, of the Jacobian pushed-forward derivative is given by:

    -\[
+<picture><source srcset=\[
 \frac{\partial}{\partial x_l}\left[H_{ijk}(\hat{\mathbf  x})\right] =
 K_{ijkl}(\hat{\mathbf  x}) -H_{mjl}(\hat{\mathbf  x})H_{imk}(\hat{\mathbf
 x})-H_{mkl}(\hat{\mathbf  x})H_{imj}(\hat{\mathbf  x}).
-\] +\]" src="form_1280.png"/>

    References

    A general publication on differential geometry and finite elements is the survey

      @@ -987,10 +987,10 @@
    x1The first point that describes the geodesic, and the one at which the "direction" is to be evaluated.

    Compute information about the mapping from the reference cell to the real cell indicated by the first argument to this function. Derived classes will have to implement this function based on the kind of mapping they represent. It is called by FEValues::reinit().

    -

    Conceptually, this function's represents the application of the mapping $\mathbf x=\mathbf F_K(\hat {\mathbf x})$ from reference coordinates $\mathbf\in [0,1]^d$ to real space coordinates $\mathbf x$ for a given cell $K$. Its purpose is to compute the following kinds of data:

    +

    Conceptually, this function's represents the application of the mapping $\mathbf x=\mathbf F_K(\hat {\mathbf x})$ from reference coordinates $\mathbf\in [0,1]^d$ to real space coordinates $\mathbf x$ for a given cell $K$. Its purpose is to compute the following kinds of data:

      -
    • Data that results from the application of the mapping itself, e.g., computing the location $\mathbf x_q = \mathbf F_K(\hat{\mathbf x}_q)$ of quadrature points on the real cell, and that is directly useful to users of FEValues, for example during assembly.
    • -
    • Data that is necessary for finite element implementations to compute their shape functions on the real cell. To this end, the FEValues::reinit() function calls FiniteElement::fill_fe_values() after the current function, and the output of this function serves as input to FiniteElement::fill_fe_values(). Examples of information that needs to be computed here for use by the finite element classes is the Jacobian of the mapping, $\hat\nabla \mathbf F_K(\hat{\mathbf x})$ or its inverse, for example to transform the gradients of shape functions on the reference cell to the gradients of shape functions on the real cell.
    • +
    • Data that results from the application of the mapping itself, e.g., computing the location $\mathbf x_q = \mathbf F_K(\hat{\mathbf x}_q)$ of quadrature points on the real cell, and that is directly useful to users of FEValues, for example during assembly.
    • +
    • Data that is necessary for finite element implementations to compute their shape functions on the real cell. To this end, the FEValues::reinit() function calls FiniteElement::fill_fe_values() after the current function, and the output of this function serves as input to FiniteElement::fill_fe_values(). Examples of information that needs to be computed here for use by the finite element classes is the Jacobian of the mapping, $\hat\nabla \mathbf F_K(\hat{\mathbf x})$ or its inverse, for example to transform the gradients of shape functions on the reference cell to the gradients of shape functions on the real cell.

    The information computed by this function is used to fill the various member variables of the output argument of this function. Which of the member variables of that structure should be filled is determined by the update flags stored in the Mapping::InternalDataBase object passed to this function.

    An extensive discussion of the interaction between this function and FEValues can be found in the How Mapping, FiniteElement, and FEValues work together documentation module.

    @@ -1249,37 +1249,37 @@

    The mapping kinds currently implemented by derived classes are:

    • mapping_contravariant: maps a vector field on the reference cell to the physical cell through the Jacobian:

      -\[
+<picture><source srcset=\[
 \mathbf u(\mathbf x) = J(\hat{\mathbf  x})\hat{\mathbf  u}(\hat{\mathbf
 x}).
-\] +\]" src="form_1285.png"/>

      In physics, this is usually referred to as the contravariant transformation. Mathematically, it is the push forward of a vector field.

    • mapping_covariant: maps a field of one-forms on the reference cell to a field of one-forms on the physical cell. (Theoretically this would refer to a DerivativeForm<1,dim,1> but we canonically identify this type with a Tensor<1,dim>). Mathematically, it is the pull back of the differential form

      -\[
+<picture><source srcset=\[
 \mathbf u(\mathbf x) = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}\hat{\mathbf u}(\hat{\mathbf  x}).
-\] +\]" src="form_1286.png"/>

      Gradients of scalar differentiable functions are transformed this way.

      In the case when dim=spacedim the previous formula reduces to

      -\[
+<picture><source srcset=\[
 \mathbf u(\mathbf x) = J(\hat{\mathbf  x})^{-T}\hat{\mathbf
 u}(\hat{\mathbf  x})
-\] +\]" src="form_1287.png"/>

      because we assume that the mapping $\mathbf F_K$ is always invertible, and consequently its Jacobian $J$ is an invertible matrix.

    • mapping_piola: A field of dim-1-forms on the reference cell is also represented by a vector field, but again transforms differently, namely by the Piola transform

      -\[
+<picture><source srcset=\[
  \mathbf u(\mathbf x) = \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J(\hat{\mathbf x}) \hat{\mathbf  u}(\hat{\mathbf x}).
-\] +\]" src="form_1288.png"/>

    @@ -1331,21 +1331,21 @@
    -

    Transform a field of differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T} = \nabla \mathbf u$ and $\hat{\mathbf  T} = \hat \nabla \hat{\mathbf  u}$, with $\mathbf u$ a vector field. The mapping kinds currently implemented by derived classes are:

      +

      Transform a field of differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T} = \nabla \mathbf u$ and $\hat{\mathbf  T} = \hat \nabla \hat{\mathbf  u}$, with $\mathbf u$ a vector field. The mapping kinds currently implemented by derived classes are:

      • mapping_covariant: maps a field of forms on the reference cell to a field of forms on the physical cell. Mathematically, it is the pull back of the differential form

        -\[
+<picture><source srcset=\[
 \mathbf T(\mathbf x) = \hat{\mathbf  T}(\hat{\mathbf  x})
                        J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}.
-\] +\]" src="form_1291.png"/>

        Jacobians of spacedim-vector valued differentiable functions are transformed this way.

        In the case when dim=spacedim the previous formula reduces to

        -\[
+<picture><source srcset=\[
 \mathbf T(\mathbf x) = \hat{\mathbf  u}(\hat{\mathbf  x})
                        J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1292.png"/>

      /usr/share/doc/packages/dealii/doxygen/deal.II/classMappingC1.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingC1.html 2024-01-30 03:04:41.020786171 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingC1.html 2024-01-30 03:04:41.020786171 +0000 @@ -789,37 +789,37 @@

      The mapping kinds currently implemented by derived classes are:

      • mapping_contravariant: maps a vector field on the reference cell to the physical cell through the Jacobian:

        -\[
+<picture><source srcset=\[
 \mathbf u(\mathbf x) = J(\hat{\mathbf  x})\hat{\mathbf  u}(\hat{\mathbf
 x}).
-\] +\]" src="form_1285.png"/>

        In physics, this is usually referred to as the contravariant transformation. Mathematically, it is the push forward of a vector field.

      • mapping_covariant: maps a field of one-forms on the reference cell to a field of one-forms on the physical cell. (Theoretically this would refer to a DerivativeForm<1,dim,1> but we canonically identify this type with a Tensor<1,dim>). Mathematically, it is the pull back of the differential form

        -\[
+<picture><source srcset=\[
 \mathbf u(\mathbf x) = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}\hat{\mathbf u}(\hat{\mathbf  x}).
-\] +\]" src="form_1286.png"/>

        Gradients of scalar differentiable functions are transformed this way.

        In the case when dim=spacedim the previous formula reduces to

        -\[
+<picture><source srcset=\[
 \mathbf u(\mathbf x) = J(\hat{\mathbf  x})^{-T}\hat{\mathbf
 u}(\hat{\mathbf  x})
-\] +\]" src="form_1287.png"/>

        because we assume that the mapping $\mathbf F_K$ is always invertible, and consequently its Jacobian $J$ is an invertible matrix.

      • mapping_piola: A field of dim-1-forms on the reference cell is also represented by a vector field, but again transforms differently, namely by the Piola transform

        -\[
+<picture><source srcset=\[
  \mathbf u(\mathbf x) = \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J(\hat{\mathbf x}) \hat{\mathbf  u}(\hat{\mathbf x}).
-\] +\]" src="form_1288.png"/>

      @@ -873,21 +873,21 @@
    -

    Transform a field of differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T} = \nabla \mathbf u$ and $\hat{\mathbf  T} = \hat \nabla \hat{\mathbf  u}$, with $\mathbf u$ a vector field. The mapping kinds currently implemented by derived classes are:

      +

      Transform a field of differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T} = \nabla \mathbf u$ and $\hat{\mathbf  T} = \hat \nabla \hat{\mathbf  u}$, with $\mathbf u$ a vector field. The mapping kinds currently implemented by derived classes are:

      • mapping_covariant: maps a field of forms on the reference cell to a field of forms on the physical cell. Mathematically, it is the pull back of the differential form

        -\[
+<picture><source srcset=\[
 \mathbf T(\mathbf x) = \hat{\mathbf  T}(\hat{\mathbf  x})
                        J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}.
-\] +\]" src="form_1291.png"/>

        Jacobians of spacedim-vector valued differentiable functions are transformed this way.

        In the case when dim=spacedim the previous formula reduces to

        -\[
+<picture><source srcset=\[
 \mathbf T(\mathbf x) = \hat{\mathbf  u}(\hat{\mathbf  x})
                        J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1292.png"/>

      @@ -944,35 +944,35 @@

    Transform a tensor field from the reference cell to the physical cell. These tensors are usually the Jacobians in the reference cell of vector fields that have been pulled back from the physical cell. The mapping kinds currently implemented by derived classes are:

    • -mapping_contravariant_gradient: it assumes $\mathbf u(\mathbf x)
-= J \hat{\mathbf  u}$ so that

      -\[
+<code>mapping_contravariant_gradient:</code> it assumes <picture><source srcset=$\mathbf u(\mathbf x)
+= J \hat{\mathbf  u}$ so that

      +\[
 \mathbf T(\mathbf x) =
 J(\hat{\mathbf  x}) \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1294.png"/>

    • -mapping_covariant_gradient: it assumes $\mathbf u(\mathbf x) =
-J^{-T} \hat{\mathbf  u}$ so that

      -\[
+<code>mapping_covariant_gradient:</code> it assumes <picture><source srcset=$\mathbf u(\mathbf x) =
+J^{-T} \hat{\mathbf  u}$ so that

      +\[
 \mathbf T(\mathbf x) =
 J(\hat{\mathbf  x})^{-T} \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1296.png"/>

    • -mapping_piola_gradient: it assumes $\mathbf u(\mathbf x) =
+<code>mapping_piola_gradient:</code> it assumes <picture><source srcset=$\mathbf u(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})} J(\hat{\mathbf x}) \hat{\mathbf
-u}(\hat{\mathbf x})$ so that

      -\[
+u}(\hat{\mathbf x})$ so that

      +\[
 \mathbf T(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J(\hat{\mathbf  x}) \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1298.png"/>

    @@ -1031,21 +1031,21 @@

    The mapping kinds currently implemented by derived classes are:

    • mapping_covariant_gradient: maps a field of forms on the reference cell to a field of forms on the physical cell. Mathematically, it is the pull back of the differential form

      -\[
+<picture><source srcset=\[
 \mathbf T_{ijk}(\mathbf x) = \hat{\mathbf  T}_{iJK}(\hat{\mathbf  x})
-J_{jJ}^{\dagger} J_{kK}^{\dagger}\] +J_{jJ}^{\dagger} J_{kK}^{\dagger}\]" src="form_1299.png"/>

      ,

      where

      -\[ J^{\dagger} = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
+<picture><source srcset=\[ J^{\dagger} = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}.
-\] +\]" src="form_1300.png"/>

    Hessians of spacedim-vector valued differentiable functions are transformed this way (After subtraction of the product of the derivative with the Jacobian gradient).

    In the case when dim=spacedim the previous formula reduces to

    -\[J^{\dagger} = J^{-1}\] +\[J^{\dagger} = J^{-1}\]

    Parameters
    @@ -1097,40 +1097,40 @@
    -

    Transform a field of 3-differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T}_{ijk} = D^2_{jk}
-\mathbf u_i$ and $\mathbf{\hat T}_{IJK} = \hat D^2_{JK} \mathbf{\hat
-u}_I$, with $\mathbf u_i$ a vector field.

    +

    Transform a field of 3-differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T}_{ijk} = D^2_{jk}
+\mathbf u_i$ and $\mathbf{\hat T}_{IJK} = \hat D^2_{JK} \mathbf{\hat
+u}_I$, with $\mathbf u_i$ a vector field.

    The mapping kinds currently implemented by derived classes are:

    • -mapping_contravariant_hessian: it assumes $\mathbf u_i(\mathbf x)
-= J_{iI} \hat{\mathbf  u}_I$ so that

      -\[
+<code>mapping_contravariant_hessian:</code> it assumes <picture><source srcset=$\mathbf u_i(\mathbf x)
+= J_{iI} \hat{\mathbf  u}_I$ so that

      +\[
 \mathbf T_{ijk}(\mathbf x) =
 J_{iI}(\hat{\mathbf  x}) \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1306.png"/>

    • -mapping_covariant_hessian: it assumes $\mathbf u_i(\mathbf x) =
-J_{iI}^{-T} \hat{\mathbf  u}_I$ so that

      -\[
+<code>mapping_covariant_hessian:</code> it assumes <picture><source srcset=$\mathbf u_i(\mathbf x) =
+J_{iI}^{-T} \hat{\mathbf  u}_I$ so that

      +\[
 \mathbf T_{ijk}(\mathbf x) =
 J_iI(\hat{\mathbf  x})^{-1} \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1308.png"/>

    • -mapping_piola_hessian: it assumes $\mathbf u_i(\mathbf x) =
+<code>mapping_piola_hessian:</code> it assumes <picture><source srcset=$\mathbf u_i(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})} J_{iI}(\hat{\mathbf x})
-\hat{\mathbf u}(\hat{\mathbf x})$ so that

      -\[
+\hat{\mathbf u}(\hat{\mathbf x})$ so that

      +\[
 \mathbf T_{ijk}(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J_{iI}(\hat{\mathbf  x}) \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingCartesian.html differs (HTML document, ASCII text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingCartesian.html	2024-01-30 03:04:41.080786671 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingCartesian.html	2024-01-30 03:04:41.080786671 +0000
@@ -219,9 +219,9 @@
 <a name=

      Detailed Description

      template<int dim, int spacedim = dim>
      class MappingCartesian< dim, spacedim >

      A class providing a mapping from the reference cell to cells that are axiparallel, i.e., that have the shape of rectangles (in 2d) or boxes (in 3d) with edges parallel to the coordinate directions. The class therefore provides functionality that is equivalent to what, for example, MappingQ would provide for such cells. However, knowledge of the shape of cells allows this class to be substantially more efficient.

      -

      Specifically, the mapping is meant for cells for which the mapping from the reference to the real cell is a scaling along the coordinate directions: The transformation from reference coordinates $\hat {\mathbf
-x}$ to real coordinates $\mathbf x$ on each cell is of the form

      -\begin{align*}
+<p>Specifically, the mapping is meant for cells for which the mapping from the reference to the real cell is a scaling along the coordinate directions: The transformation from reference coordinates <picture><source srcset=$\hat {\mathbf
+x}$ to real coordinates $\mathbf x$ on each cell is of the form

      +\begin{align*}
   {\mathbf x}(\hat {\mathbf x})
   =
   \begin{pmatrix}
@@ -230,10 +230,10 @@
   \end{pmatrix}
   \hat{\mathbf x}
   + {\mathbf v}_0
-\end{align*} +\end{align*}" src="form_1312.png"/>

      in 2d, and

      -\begin{align*}
+<picture><source srcset=\begin{align*}
   {\mathbf x}(\hat {\mathbf x})
   =
   \begin{pmatrix}
@@ -243,9 +243,9 @@
   \end{pmatrix}
   \hat{\mathbf x}
   + {\mathbf v}_0
-\end{align*} +\end{align*}" src="form_1313.png"/>

      -

      in 3d, where ${\mathbf v}_0$ is the bottom left vertex and $h_x,h_y,h_z$ are the extents of the cell along the axes.

      +

      in 3d, where ${\mathbf v}_0$ is the bottom left vertex and $h_x,h_y,h_z$ are the extents of the cell along the axes.

      The class is intended for efficiency, and it does not do a whole lot of error checking. If you apply this mapping to a cell that does not conform to the requirements above, you will get strange results.

      Definition at line 79 of file mapping_cartesian.h.

      @@ -520,37 +520,37 @@

      The mapping kinds currently implemented by derived classes are:

      • mapping_contravariant: maps a vector field on the reference cell to the physical cell through the Jacobian:

        -\[
+<picture><source srcset=\[
 \mathbf u(\mathbf x) = J(\hat{\mathbf  x})\hat{\mathbf  u}(\hat{\mathbf
 x}).
-\] +\]" src="form_1285.png"/>

        In physics, this is usually referred to as the contravariant transformation. Mathematically, it is the push forward of a vector field.

      • mapping_covariant: maps a field of one-forms on the reference cell to a field of one-forms on the physical cell. (Theoretically this would refer to a DerivativeForm<1,dim,1> but we canonically identify this type with a Tensor<1,dim>). Mathematically, it is the pull back of the differential form

        -\[
+<picture><source srcset=\[
 \mathbf u(\mathbf x) = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}\hat{\mathbf u}(\hat{\mathbf  x}).
-\] +\]" src="form_1286.png"/>

        Gradients of scalar differentiable functions are transformed this way.

        In the case when dim=spacedim the previous formula reduces to

        -\[
+<picture><source srcset=\[
 \mathbf u(\mathbf x) = J(\hat{\mathbf  x})^{-T}\hat{\mathbf
 u}(\hat{\mathbf  x})
-\] +\]" src="form_1287.png"/>

        because we assume that the mapping $\mathbf F_K$ is always invertible, and consequently its Jacobian $J$ is an invertible matrix.

      • mapping_piola: A field of dim-1-forms on the reference cell is also represented by a vector field, but again transforms differently, namely by the Piola transform

        -\[
+<picture><source srcset=\[
  \mathbf u(\mathbf x) = \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J(\hat{\mathbf x}) \hat{\mathbf  u}(\hat{\mathbf x}).
-\] +\]" src="form_1288.png"/>

      @@ -604,21 +604,21 @@
      -

      Transform a field of differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T} = \nabla \mathbf u$ and $\hat{\mathbf  T} = \hat \nabla \hat{\mathbf  u}$, with $\mathbf u$ a vector field. The mapping kinds currently implemented by derived classes are:

        +

        Transform a field of differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T} = \nabla \mathbf u$ and $\hat{\mathbf  T} = \hat \nabla \hat{\mathbf  u}$, with $\mathbf u$ a vector field. The mapping kinds currently implemented by derived classes are:

        • mapping_covariant: maps a field of forms on the reference cell to a field of forms on the physical cell. Mathematically, it is the pull back of the differential form

          -\[
+<picture><source srcset=\[
 \mathbf T(\mathbf x) = \hat{\mathbf  T}(\hat{\mathbf  x})
                        J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}.
-\] +\]" src="form_1291.png"/>

          Jacobians of spacedim-vector valued differentiable functions are transformed this way.

          In the case when dim=spacedim the previous formula reduces to

          -\[
+<picture><source srcset=\[
 \mathbf T(\mathbf x) = \hat{\mathbf  u}(\hat{\mathbf  x})
                        J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1292.png"/>

        @@ -675,35 +675,35 @@

      Transform a tensor field from the reference cell to the physical cell. These tensors are usually the Jacobians in the reference cell of vector fields that have been pulled back from the physical cell. The mapping kinds currently implemented by derived classes are:

      • -mapping_contravariant_gradient: it assumes $\mathbf u(\mathbf x)
-= J \hat{\mathbf  u}$ so that

        -\[
+<code>mapping_contravariant_gradient:</code> it assumes <picture><source srcset=$\mathbf u(\mathbf x)
+= J \hat{\mathbf  u}$ so that

        +\[
 \mathbf T(\mathbf x) =
 J(\hat{\mathbf  x}) \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1294.png"/>

      • -mapping_covariant_gradient: it assumes $\mathbf u(\mathbf x) =
-J^{-T} \hat{\mathbf  u}$ so that

        -\[
+<code>mapping_covariant_gradient:</code> it assumes <picture><source srcset=$\mathbf u(\mathbf x) =
+J^{-T} \hat{\mathbf  u}$ so that

        +\[
 \mathbf T(\mathbf x) =
 J(\hat{\mathbf  x})^{-T} \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1296.png"/>

      • -mapping_piola_gradient: it assumes $\mathbf u(\mathbf x) =
+<code>mapping_piola_gradient:</code> it assumes <picture><source srcset=$\mathbf u(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})} J(\hat{\mathbf x}) \hat{\mathbf
-u}(\hat{\mathbf x})$ so that

        -\[
+u}(\hat{\mathbf x})$ so that

        +\[
 \mathbf T(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J(\hat{\mathbf  x}) \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1298.png"/>

      @@ -762,21 +762,21 @@

      The mapping kinds currently implemented by derived classes are:

      • mapping_covariant_gradient: maps a field of forms on the reference cell to a field of forms on the physical cell. Mathematically, it is the pull back of the differential form

        -\[
+<picture><source srcset=\[
 \mathbf T_{ijk}(\mathbf x) = \hat{\mathbf  T}_{iJK}(\hat{\mathbf  x})
-J_{jJ}^{\dagger} J_{kK}^{\dagger}\] +J_{jJ}^{\dagger} J_{kK}^{\dagger}\]" src="form_1299.png"/>

        ,

        where

        -\[ J^{\dagger} = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
+<picture><source srcset=\[ J^{\dagger} = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}.
-\] +\]" src="form_1300.png"/>

      Hessians of spacedim-vector valued differentiable functions are transformed this way (After subtraction of the product of the derivative with the Jacobian gradient).

      In the case when dim=spacedim the previous formula reduces to

      -\[J^{\dagger} = J^{-1}\] +\[J^{\dagger} = J^{-1}\]

      Parameters
      @@ -828,40 +828,40 @@
      -

      Transform a field of 3-differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T}_{ijk} = D^2_{jk}
-\mathbf u_i$ and $\mathbf{\hat T}_{IJK} = \hat D^2_{JK} \mathbf{\hat
-u}_I$, with $\mathbf u_i$ a vector field.

      +

      Transform a field of 3-differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T}_{ijk} = D^2_{jk}
+\mathbf u_i$ and $\mathbf{\hat T}_{IJK} = \hat D^2_{JK} \mathbf{\hat
+u}_I$, with $\mathbf u_i$ a vector field.

      The mapping kinds currently implemented by derived classes are:

      • /usr/share/doc/packages/dealii/doxygen/deal.II/classMappingFE.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingFE.html 2024-01-30 03:04:41.136787137 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingFE.html 2024-01-30 03:04:41.136787137 +0000 @@ -615,37 +615,37 @@

        The mapping kinds currently implemented by derived classes are:

        • mapping_contravariant: maps a vector field on the reference cell to the physical cell through the Jacobian:

          -\[
+<picture><source srcset=\[
 \mathbf u(\mathbf x) = J(\hat{\mathbf  x})\hat{\mathbf  u}(\hat{\mathbf
 x}).
-\] +\]" src="form_1285.png"/>

          In physics, this is usually referred to as the contravariant transformation. Mathematically, it is the push forward of a vector field.

        • mapping_covariant: maps a field of one-forms on the reference cell to a field of one-forms on the physical cell. (Theoretically this would refer to a DerivativeForm<1,dim,1> but we canonically identify this type with a Tensor<1,dim>). Mathematically, it is the pull back of the differential form

          -\[
+<picture><source srcset=\[
 \mathbf u(\mathbf x) = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}\hat{\mathbf u}(\hat{\mathbf  x}).
-\] +\]" src="form_1286.png"/>

          Gradients of scalar differentiable functions are transformed this way.

          In the case when dim=spacedim the previous formula reduces to

          -\[
+<picture><source srcset=\[
 \mathbf u(\mathbf x) = J(\hat{\mathbf  x})^{-T}\hat{\mathbf
 u}(\hat{\mathbf  x})
-\] +\]" src="form_1287.png"/>

          because we assume that the mapping $\mathbf F_K$ is always invertible, and consequently its Jacobian $J$ is an invertible matrix.

        • mapping_piola: A field of dim-1-forms on the reference cell is also represented by a vector field, but again transforms differently, namely by the Piola transform

          -\[
+<picture><source srcset=\[
  \mathbf u(\mathbf x) = \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J(\hat{\mathbf x}) \hat{\mathbf  u}(\hat{\mathbf x}).
-\] +\]" src="form_1288.png"/>

        @@ -699,21 +699,21 @@
      -

      Transform a field of differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T} = \nabla \mathbf u$ and $\hat{\mathbf  T} = \hat \nabla \hat{\mathbf  u}$, with $\mathbf u$ a vector field. The mapping kinds currently implemented by derived classes are:

        +

        Transform a field of differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T} = \nabla \mathbf u$ and $\hat{\mathbf  T} = \hat \nabla \hat{\mathbf  u}$, with $\mathbf u$ a vector field. The mapping kinds currently implemented by derived classes are:

        • mapping_covariant: maps a field of forms on the reference cell to a field of forms on the physical cell. Mathematically, it is the pull back of the differential form

          -\[
+<picture><source srcset=\[
 \mathbf T(\mathbf x) = \hat{\mathbf  T}(\hat{\mathbf  x})
                        J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}.
-\] +\]" src="form_1291.png"/>

          Jacobians of spacedim-vector valued differentiable functions are transformed this way.

          In the case when dim=spacedim the previous formula reduces to

          -\[
+<picture><source srcset=\[
 \mathbf T(\mathbf x) = \hat{\mathbf  u}(\hat{\mathbf  x})
                        J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1292.png"/>

        @@ -770,35 +770,35 @@

      Transform a tensor field from the reference cell to the physical cell. These tensors are usually the Jacobians in the reference cell of vector fields that have been pulled back from the physical cell. The mapping kinds currently implemented by derived classes are:

      • -mapping_contravariant_gradient: it assumes $\mathbf u(\mathbf x)
-= J \hat{\mathbf  u}$ so that

        -\[
+<code>mapping_contravariant_gradient:</code> it assumes <picture><source srcset=$\mathbf u(\mathbf x)
+= J \hat{\mathbf  u}$ so that

        +\[
 \mathbf T(\mathbf x) =
 J(\hat{\mathbf  x}) \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1294.png"/>

      • -mapping_covariant_gradient: it assumes $\mathbf u(\mathbf x) =
-J^{-T} \hat{\mathbf  u}$ so that

        -\[
+<code>mapping_covariant_gradient:</code> it assumes <picture><source srcset=$\mathbf u(\mathbf x) =
+J^{-T} \hat{\mathbf  u}$ so that

        +\[
 \mathbf T(\mathbf x) =
 J(\hat{\mathbf  x})^{-T} \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1296.png"/>

      • -mapping_piola_gradient: it assumes $\mathbf u(\mathbf x) =
+<code>mapping_piola_gradient:</code> it assumes <picture><source srcset=$\mathbf u(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})} J(\hat{\mathbf x}) \hat{\mathbf
-u}(\hat{\mathbf x})$ so that

        -\[
+u}(\hat{\mathbf x})$ so that

        +\[
 \mathbf T(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J(\hat{\mathbf  x}) \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1298.png"/>

      @@ -857,21 +857,21 @@

      The mapping kinds currently implemented by derived classes are:

      • mapping_covariant_gradient: maps a field of forms on the reference cell to a field of forms on the physical cell. Mathematically, it is the pull back of the differential form

        -\[
+<picture><source srcset=\[
 \mathbf T_{ijk}(\mathbf x) = \hat{\mathbf  T}_{iJK}(\hat{\mathbf  x})
-J_{jJ}^{\dagger} J_{kK}^{\dagger}\] +J_{jJ}^{\dagger} J_{kK}^{\dagger}\]" src="form_1299.png"/>

        ,

        where

        -\[ J^{\dagger} = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
+<picture><source srcset=\[ J^{\dagger} = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}.
-\] +\]" src="form_1300.png"/>

      Hessians of spacedim-vector valued differentiable functions are transformed this way (After subtraction of the product of the derivative with the Jacobian gradient).

      In the case when dim=spacedim the previous formula reduces to

      -\[J^{\dagger} = J^{-1}\] +\[J^{\dagger} = J^{-1}\]

      Parameters
      @@ -923,40 +923,40 @@
      -

      Transform a field of 3-differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T}_{ijk} = D^2_{jk}
-\mathbf u_i$ and $\mathbf{\hat T}_{IJK} = \hat D^2_{JK} \mathbf{\hat
-u}_I$, with $\mathbf u_i$ a vector field.

      +

      Transform a field of 3-differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T}_{ijk} = D^2_{jk}
+\mathbf u_i$ and $\mathbf{\hat T}_{IJK} = \hat D^2_{JK} \mathbf{\hat
+u}_I$, with $\mathbf u_i$ a vector field.

      The mapping kinds currently implemented by derived classes are:

      • -mapping_contravariant_hessian: it assumes $\mathbf u_i(\mathbf x)
-= J_{iI} \hat{\mathbf  u}_I$ so that

        -\[
+<code>mapping_contravariant_hessian:</code> it assumes <picture><source srcset=$\mathbf u_i(\mathbf x)
+= J_{iI} \hat{\mathbf  u}_I$ so that

        +\[
 \mathbf T_{ijk}(\mathbf x) =
 J_{iI}(\hat{\mathbf  x}) \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1306.png"/>

      • -mapping_covariant_hessian: it assumes $\mathbf u_i(\mathbf x) =
-J_{iI}^{-T} \hat{\mathbf  u}_I$ so that

        -\[
+<code>mapping_covariant_hessian:</code> it assumes <picture><source srcset=$\mathbf u_i(\mathbf x) =
+J_{iI}^{-T} \hat{\mathbf  u}_I$ so that

        +\[
 \mathbf T_{ijk}(\mathbf x) =
 J_iI(\hat{\mathbf  x})^{-1} \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1308.png"/>

      • -mapping_piola_hessian: it assumes $\mathbf u_i(\mathbf x) =
+<code>mapping_piola_hessian:</code> it assumes <picture><source srcset=$\mathbf u_i(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})} J_{iI}(\hat{\mathbf x})
-\hat{\mathbf u}(\hat{\mathbf x})$ so that

        -\[
+\hat{\mathbf u}(\hat{\mathbf x})$ so that

        +\[
 \mathbf T_{ijk}(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J_{iI}(\hat{\mathbf  x}) \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingFEField.html differs (HTML document, ASCII text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingFEField.html	2024-01-30 03:04:41.200787671 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingFEField.html	2024-01-30 03:04:41.200787671 +0000
@@ -688,37 +688,37 @@
 <p>The mapping kinds currently implemented by derived classes are: </p><ul>
 <li>
 <p class=mapping_contravariant: maps a vector field on the reference cell to the physical cell through the Jacobian:

        -\[
+<picture><source srcset=\[
 \mathbf u(\mathbf x) = J(\hat{\mathbf  x})\hat{\mathbf  u}(\hat{\mathbf
 x}).
-\] +\]" src="form_1285.png"/>

        In physics, this is usually referred to as the contravariant transformation. Mathematically, it is the push forward of a vector field.

      • mapping_covariant: maps a field of one-forms on the reference cell to a field of one-forms on the physical cell. (Theoretically this would refer to a DerivativeForm<1,dim,1> but we canonically identify this type with a Tensor<1,dim>). Mathematically, it is the pull back of the differential form

        -\[
+<picture><source srcset=\[
 \mathbf u(\mathbf x) = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}\hat{\mathbf u}(\hat{\mathbf  x}).
-\] +\]" src="form_1286.png"/>

        Gradients of scalar differentiable functions are transformed this way.

        In the case when dim=spacedim the previous formula reduces to

        -\[
+<picture><source srcset=\[
 \mathbf u(\mathbf x) = J(\hat{\mathbf  x})^{-T}\hat{\mathbf
 u}(\hat{\mathbf  x})
-\] +\]" src="form_1287.png"/>

        because we assume that the mapping $\mathbf F_K$ is always invertible, and consequently its Jacobian $J$ is an invertible matrix.

      • mapping_piola: A field of dim-1-forms on the reference cell is also represented by a vector field, but again transforms differently, namely by the Piola transform

        -\[
+<picture><source srcset=\[
  \mathbf u(\mathbf x) = \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J(\hat{\mathbf x}) \hat{\mathbf  u}(\hat{\mathbf x}).
-\] +\]" src="form_1288.png"/>

      @@ -772,21 +772,21 @@
      -

      Transform a field of differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T} = \nabla \mathbf u$ and $\hat{\mathbf  T} = \hat \nabla \hat{\mathbf  u}$, with $\mathbf u$ a vector field. The mapping kinds currently implemented by derived classes are:

        +

        Transform a field of differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T} = \nabla \mathbf u$ and $\hat{\mathbf  T} = \hat \nabla \hat{\mathbf  u}$, with $\mathbf u$ a vector field. The mapping kinds currently implemented by derived classes are:

        • mapping_covariant: maps a field of forms on the reference cell to a field of forms on the physical cell. Mathematically, it is the pull back of the differential form

          -\[
+<picture><source srcset=\[
 \mathbf T(\mathbf x) = \hat{\mathbf  T}(\hat{\mathbf  x})
                        J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}.
-\] +\]" src="form_1291.png"/>

          Jacobians of spacedim-vector valued differentiable functions are transformed this way.

          In the case when dim=spacedim the previous formula reduces to

          -\[
+<picture><source srcset=\[
 \mathbf T(\mathbf x) = \hat{\mathbf  u}(\hat{\mathbf  x})
                        J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1292.png"/>

        @@ -843,35 +843,35 @@

      Transform a tensor field from the reference cell to the physical cell. These tensors are usually the Jacobians in the reference cell of vector fields that have been pulled back from the physical cell. The mapping kinds currently implemented by derived classes are:

      • -mapping_contravariant_gradient: it assumes $\mathbf u(\mathbf x)
-= J \hat{\mathbf  u}$ so that

        -\[
+<code>mapping_contravariant_gradient:</code> it assumes <picture><source srcset=$\mathbf u(\mathbf x)
+= J \hat{\mathbf  u}$ so that

        +\[
 \mathbf T(\mathbf x) =
 J(\hat{\mathbf  x}) \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1294.png"/>

      • -mapping_covariant_gradient: it assumes $\mathbf u(\mathbf x) =
-J^{-T} \hat{\mathbf  u}$ so that

        -\[
+<code>mapping_covariant_gradient:</code> it assumes <picture><source srcset=$\mathbf u(\mathbf x) =
+J^{-T} \hat{\mathbf  u}$ so that

        +\[
 \mathbf T(\mathbf x) =
 J(\hat{\mathbf  x})^{-T} \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1296.png"/>

      • -mapping_piola_gradient: it assumes $\mathbf u(\mathbf x) =
+<code>mapping_piola_gradient:</code> it assumes <picture><source srcset=$\mathbf u(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})} J(\hat{\mathbf x}) \hat{\mathbf
-u}(\hat{\mathbf x})$ so that

        -\[
+u}(\hat{\mathbf x})$ so that

        +\[
 \mathbf T(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J(\hat{\mathbf  x}) \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1298.png"/>

      @@ -930,21 +930,21 @@

      The mapping kinds currently implemented by derived classes are:

      • mapping_covariant_gradient: maps a field of forms on the reference cell to a field of forms on the physical cell. Mathematically, it is the pull back of the differential form

        -\[
+<picture><source srcset=\[
 \mathbf T_{ijk}(\mathbf x) = \hat{\mathbf  T}_{iJK}(\hat{\mathbf  x})
-J_{jJ}^{\dagger} J_{kK}^{\dagger}\] +J_{jJ}^{\dagger} J_{kK}^{\dagger}\]" src="form_1299.png"/>

        ,

        where

        -\[ J^{\dagger} = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
+<picture><source srcset=\[ J^{\dagger} = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}.
-\] +\]" src="form_1300.png"/>

      Hessians of spacedim-vector valued differentiable functions are transformed this way (After subtraction of the product of the derivative with the Jacobian gradient).

      In the case when dim=spacedim the previous formula reduces to

      -\[J^{\dagger} = J^{-1}\] +\[J^{\dagger} = J^{-1}\]

      Parameters
      @@ -996,40 +996,40 @@
      -

      Transform a field of 3-differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T}_{ijk} = D^2_{jk}
-\mathbf u_i$ and $\mathbf{\hat T}_{IJK} = \hat D^2_{JK} \mathbf{\hat
-u}_I$, with $\mathbf u_i$ a vector field.

      +

      Transform a field of 3-differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T}_{ijk} = D^2_{jk}
+\mathbf u_i$ and $\mathbf{\hat T}_{IJK} = \hat D^2_{JK} \mathbf{\hat
+u}_I$, with $\mathbf u_i$ a vector field.

      The mapping kinds currently implemented by derived classes are:

      • -mapping_contravariant_hessian: it assumes $\mathbf u_i(\mathbf x)
-= J_{iI} \hat{\mathbf  u}_I$ so that

        -\[
+<code>mapping_contravariant_hessian:</code> it assumes <picture><source srcset=$\mathbf u_i(\mathbf x)
+= J_{iI} \hat{\mathbf  u}_I$ so that

        +\[
 \mathbf T_{ijk}(\mathbf x) =
 J_{iI}(\hat{\mathbf  x}) \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1306.png"/>

      • -mapping_covariant_hessian: it assumes $\mathbf u_i(\mathbf x) =
-J_{iI}^{-T} \hat{\mathbf  u}_I$ so that

        -\[
+<code>mapping_covariant_hessian:</code> it assumes <picture><source srcset=$\mathbf u_i(\mathbf x) =
+J_{iI}^{-T} \hat{\mathbf  u}_I$ so that

        +\[
 \mathbf T_{ijk}(\mathbf x) =
 J_iI(\hat{\mathbf  x})^{-1} \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1308.png"/>

      • -mapping_piola_hessian: it assumes $\mathbf u_i(\mathbf x) =
+<code>mapping_piola_hessian:</code> it assumes <picture><source srcset=$\mathbf u_i(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})} J_{iI}(\hat{\mathbf x})
-\hat{\mathbf u}(\hat{\mathbf x})$ so that

        -\[
+\hat{\mathbf u}(\hat{\mathbf x})$ so that

        +\[
 \mathbf T_{ijk}(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J_{iI}(\hat{\mathbf  x}) \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingFEField_1_1InternalData.html differs (HTML document, ASCII text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingFEField_1_1InternalData.html	2024-01-30 03:04:41.232787937 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingFEField_1_1InternalData.html	2024-01-30 03:04:41.232787937 +0000
@@ -666,7 +666,7 @@
   </tr>
 </table>
 </div><div class= -

        Tensors of contravariant transformation at each of the quadrature points. The contravariant matrix is the Jacobian of the transformation, i.e. $J_{ij}=dx_i/d\hat x_j$.

        +

        Tensors of contravariant transformation at each of the quadrature points. The contravariant matrix is the Jacobian of the transformation, i.e. $J_{ij}=dx_i/d\hat x_j$.

        Computed on each cell.

        Definition at line 458 of file mapping_fe_field.h.

        /usr/share/doc/packages/dealii/doxygen/deal.II/classMappingFE_1_1InternalData.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingFE_1_1InternalData.html 2024-01-30 03:04:41.260788171 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingFE_1_1InternalData.html 2024-01-30 03:04:41.260788171 +0000 @@ -739,7 +739,7 @@
      -

      Tensors of contravariant transformation at each of the quadrature points. The contravariant matrix is the Jacobian of the transformation, i.e. $J_{ij}=dx_i/d\hat x_j$.

      +

      Tensors of contravariant transformation at each of the quadrature points. The contravariant matrix is the Jacobian of the transformation, i.e. $J_{ij}=dx_i/d\hat x_j$.

      Computed on each cell.

      Definition at line 371 of file mapping_fe.h.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classMappingManifold.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingManifold.html 2024-01-30 03:04:41.312788603 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingManifold.html 2024-01-30 03:04:41.312788603 +0000 @@ -533,37 +533,37 @@

      The mapping kinds currently implemented by derived classes are:

      • mapping_contravariant: maps a vector field on the reference cell to the physical cell through the Jacobian:

        -\[
+<picture><source srcset=\[
 \mathbf u(\mathbf x) = J(\hat{\mathbf  x})\hat{\mathbf  u}(\hat{\mathbf
 x}).
-\] +\]" src="form_1285.png"/>

        In physics, this is usually referred to as the contravariant transformation. Mathematically, it is the push forward of a vector field.

      • mapping_covariant: maps a field of one-forms on the reference cell to a field of one-forms on the physical cell. (Theoretically this would refer to a DerivativeForm<1,dim,1> but we canonically identify this type with a Tensor<1,dim>). Mathematically, it is the pull back of the differential form

        -\[
+<picture><source srcset=\[
 \mathbf u(\mathbf x) = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}\hat{\mathbf u}(\hat{\mathbf  x}).
-\] +\]" src="form_1286.png"/>

        Gradients of scalar differentiable functions are transformed this way.

        In the case when dim=spacedim the previous formula reduces to

        -\[
+<picture><source srcset=\[
 \mathbf u(\mathbf x) = J(\hat{\mathbf  x})^{-T}\hat{\mathbf
 u}(\hat{\mathbf  x})
-\] +\]" src="form_1287.png"/>

        because we assume that the mapping $\mathbf F_K$ is always invertible, and consequently its Jacobian $J$ is an invertible matrix.

      • mapping_piola: A field of dim-1-forms on the reference cell is also represented by a vector field, but again transforms differently, namely by the Piola transform

        -\[
+<picture><source srcset=\[
  \mathbf u(\mathbf x) = \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J(\hat{\mathbf x}) \hat{\mathbf  u}(\hat{\mathbf x}).
-\] +\]" src="form_1288.png"/>

      @@ -617,21 +617,21 @@
      -

      Transform a field of differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T} = \nabla \mathbf u$ and $\hat{\mathbf  T} = \hat \nabla \hat{\mathbf  u}$, with $\mathbf u$ a vector field. The mapping kinds currently implemented by derived classes are:

        +

        Transform a field of differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T} = \nabla \mathbf u$ and $\hat{\mathbf  T} = \hat \nabla \hat{\mathbf  u}$, with $\mathbf u$ a vector field. The mapping kinds currently implemented by derived classes are:

        • mapping_covariant: maps a field of forms on the reference cell to a field of forms on the physical cell. Mathematically, it is the pull back of the differential form

          -\[
+<picture><source srcset=\[
 \mathbf T(\mathbf x) = \hat{\mathbf  T}(\hat{\mathbf  x})
                        J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}.
-\] +\]" src="form_1291.png"/>

          Jacobians of spacedim-vector valued differentiable functions are transformed this way.

          In the case when dim=spacedim the previous formula reduces to

          -\[
+<picture><source srcset=\[
 \mathbf T(\mathbf x) = \hat{\mathbf  u}(\hat{\mathbf  x})
                        J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1292.png"/>

        @@ -688,35 +688,35 @@

      Transform a tensor field from the reference cell to the physical cell. These tensors are usually the Jacobians in the reference cell of vector fields that have been pulled back from the physical cell. The mapping kinds currently implemented by derived classes are:

      • -mapping_contravariant_gradient: it assumes $\mathbf u(\mathbf x)
-= J \hat{\mathbf  u}$ so that

        -\[
+<code>mapping_contravariant_gradient:</code> it assumes <picture><source srcset=$\mathbf u(\mathbf x)
+= J \hat{\mathbf  u}$ so that

        +\[
 \mathbf T(\mathbf x) =
 J(\hat{\mathbf  x}) \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1294.png"/>

      • -mapping_covariant_gradient: it assumes $\mathbf u(\mathbf x) =
-J^{-T} \hat{\mathbf  u}$ so that

        -\[
+<code>mapping_covariant_gradient:</code> it assumes <picture><source srcset=$\mathbf u(\mathbf x) =
+J^{-T} \hat{\mathbf  u}$ so that

        +\[
 \mathbf T(\mathbf x) =
 J(\hat{\mathbf  x})^{-T} \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1296.png"/>

      • -mapping_piola_gradient: it assumes $\mathbf u(\mathbf x) =
+<code>mapping_piola_gradient:</code> it assumes <picture><source srcset=$\mathbf u(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})} J(\hat{\mathbf x}) \hat{\mathbf
-u}(\hat{\mathbf x})$ so that

        -\[
+u}(\hat{\mathbf x})$ so that

        +\[
 \mathbf T(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J(\hat{\mathbf  x}) \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1298.png"/>

      @@ -775,21 +775,21 @@

      The mapping kinds currently implemented by derived classes are:

      • mapping_covariant_gradient: maps a field of forms on the reference cell to a field of forms on the physical cell. Mathematically, it is the pull back of the differential form

        -\[
+<picture><source srcset=\[
 \mathbf T_{ijk}(\mathbf x) = \hat{\mathbf  T}_{iJK}(\hat{\mathbf  x})
-J_{jJ}^{\dagger} J_{kK}^{\dagger}\] +J_{jJ}^{\dagger} J_{kK}^{\dagger}\]" src="form_1299.png"/>

        ,

        where

        -\[ J^{\dagger} = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
+<picture><source srcset=\[ J^{\dagger} = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}.
-\] +\]" src="form_1300.png"/>

      Hessians of spacedim-vector valued differentiable functions are transformed this way (After subtraction of the product of the derivative with the Jacobian gradient).

      In the case when dim=spacedim the previous formula reduces to

      -\[J^{\dagger} = J^{-1}\] +\[J^{\dagger} = J^{-1}\]

      Parameters
      @@ -841,40 +841,40 @@
      -

      Transform a field of 3-differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T}_{ijk} = D^2_{jk}
-\mathbf u_i$ and $\mathbf{\hat T}_{IJK} = \hat D^2_{JK} \mathbf{\hat
-u}_I$, with $\mathbf u_i$ a vector field.

      +

      Transform a field of 3-differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T}_{ijk} = D^2_{jk}
+\mathbf u_i$ and $\mathbf{\hat T}_{IJK} = \hat D^2_{JK} \mathbf{\hat
+u}_I$, with $\mathbf u_i$ a vector field.

      The mapping kinds currently implemented by derived classes are:

      • -mapping_contravariant_hessian: it assumes $\mathbf u_i(\mathbf x)
-= J_{iI} \hat{\mathbf  u}_I$ so that

        -\[
+<code>mapping_contravariant_hessian:</code> it assumes <picture><source srcset=$\mathbf u_i(\mathbf x)
+= J_{iI} \hat{\mathbf  u}_I$ so that

        +\[
 \mathbf T_{ijk}(\mathbf x) =
 J_{iI}(\hat{\mathbf  x}) \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1306.png"/>

      • -mapping_covariant_hessian: it assumes $\mathbf u_i(\mathbf x) =
-J_{iI}^{-T} \hat{\mathbf  u}_I$ so that

        -\[
+<code>mapping_covariant_hessian:</code> it assumes <picture><source srcset=$\mathbf u_i(\mathbf x) =
+J_{iI}^{-T} \hat{\mathbf  u}_I$ so that

        +\[
 \mathbf T_{ijk}(\mathbf x) =
 J_iI(\hat{\mathbf  x})^{-1} \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1308.png"/>

      • -mapping_piola_hessian: it assumes $\mathbf u_i(\mathbf x) =
+<code>mapping_piola_hessian:</code> it assumes <picture><source srcset=$\mathbf u_i(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})} J_{iI}(\hat{\mathbf x})
-\hat{\mathbf u}(\hat{\mathbf x})$ so that

        -\[
+\hat{\mathbf u}(\hat{\mathbf x})$ so that

        +\[
 \mathbf T_{ijk}(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J_{iI}(\hat{\mathbf  x}) \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingManifold_1_1InternalData.html differs (HTML document, ASCII text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingManifold_1_1InternalData.html	2024-01-30 03:04:41.336788804 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingManifold_1_1InternalData.html	2024-01-30 03:04:41.336788804 +0000
@@ -427,7 +427,7 @@
   </tr>
 </table>
 </div><div class= -

        A vector of weights for use in Manifold::get_new_point(). For each point (interior to a cell), we compute the weight each vertex has for this point. If the point lies at a vertex, then this vertex has weight one and all others have weight zero. If the point lies interior to a cell, then the weight every vertex has is just the $d$-linear shape functions associated with each vertex evaluated at that point.

        +

        A vector of weights for use in Manifold::get_new_point(). For each point (interior to a cell), we compute the weight each vertex has for this point. If the point lies at a vertex, then this vertex has weight one and all others have weight zero. If the point lies interior to a cell, then the weight every vertex has is just the $d$-linear shape functions associated with each vertex evaluated at that point.

        This array has size GeometryInfo<dim>::vertices_per_cell, but it can't be converted into a fixed size array because it is used as input for Manifold::get_new_point() which wants to see a std::vector<double> for the weights.

        Definition at line 270 of file mapping_manifold.h.

        @@ -504,7 +504,7 @@
      -

      Tensors of contravariant transformation at each of the quadrature points. The contravariant matrix is the Jacobian of the transformation, i.e. $J_{ij}=dx_i/d\hat x_j$.

      +

      Tensors of contravariant transformation at each of the quadrature points. The contravariant matrix is the Jacobian of the transformation, i.e. $J_{ij}=dx_i/d\hat x_j$.

      Computed on each cell.

      Definition at line 307 of file mapping_manifold.h.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQ.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQ.html 2024-01-30 03:04:41.400789337 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQ.html 2024-01-30 03:04:41.400789337 +0000 @@ -253,7 +253,7 @@

      Detailed Description

      template<int dim, int spacedim = dim>
      -class MappingQ< dim, spacedim >

      This class implements the functionality for polynomial mappings $Q_p$ of polynomial degree $p$ that will be used on all cells of the mesh. In order to get a genuine higher-order mapping for all cells, it is important to provide information about how interior edges and faces of the mesh should be curved. This is typically done by associating a Manifold with interior cells and edges. A simple example of this is discussed in the "Results" section of step-6; a full discussion of manifolds is provided in step-53. If manifolds are only attached to the boundaries of a domain, the current class with higher polynomial degrees will provide the same information as a mere MappingQ1 object. If you are working on meshes that describe a (curved) manifold embedded in higher space dimensions, i.e., if dim!=spacedim, then every cell is at the boundary of the domain you will likely already have attached a manifold object to all cells that can then also be used by the mapping classes for higher order mappings.

      +class MappingQ< dim, spacedim >

      This class implements the functionality for polynomial mappings $Q_p$ of polynomial degree $p$ that will be used on all cells of the mesh. In order to get a genuine higher-order mapping for all cells, it is important to provide information about how interior edges and faces of the mesh should be curved. This is typically done by associating a Manifold with interior cells and edges. A simple example of this is discussed in the "Results" section of step-6; a full discussion of manifolds is provided in step-53. If manifolds are only attached to the boundaries of a domain, the current class with higher polynomial degrees will provide the same information as a mere MappingQ1 object. If you are working on meshes that describe a (curved) manifold embedded in higher space dimensions, i.e., if dim!=spacedim, then every cell is at the boundary of the domain you will likely already have attached a manifold object to all cells that can then also be used by the mapping classes for higher order mappings.

      Behavior along curved boundaries and with different manifolds

      For a number of applications, one only knows a manifold description of a surface but not the interior of the computational domain. In such a case, a FlatManifold object will be assigned to the interior entities that describes a usual planar coordinate system where the additional points for the higher order mapping are placed exactly according to a bi-/trilinear mapping. When combined with a non-flat manifold on the boundary, for example a circle bulging into the interior of a square cell, the two manifold descriptions are in general incompatible. For example, a FlatManifold defined solely through the cell's vertices would put an interior point located at some small distance epsilon away from the boundary along a straight line and thus in general outside the concave part of a circle. If the polynomial degree of MappingQ is sufficiently high, the transformation from the reference cell to such a cell would in general contain inverted regions close to the boundary.

      In order to avoid this situation, this class applies an algorithm for making this transition smooth using a so-called transfinite interpolation that is essentially a linear blend between the descriptions along the surrounding entities. In the algorithm that computes additional points, the compute_mapping_support_points() method, all the entities of the cells are passed through hierarchically, starting from the lines to the quads and finally hexes. Points on objects higher up in the hierarchy are obtained from the manifold associated with that object, taking into account all the points previously computed by the manifolds associated with the lower-dimensional objects, not just the vertices. If only a line is assigned a curved boundary but the adjacent quad is on a flat manifold, the flat manifold on the quad will take the points on the deformed line into account when interpolating the position of the additional points inside the quad and thus always result in a well-defined transformation.

      @@ -700,37 +700,37 @@

      The mapping kinds currently implemented by derived classes are:

      • mapping_contravariant: maps a vector field on the reference cell to the physical cell through the Jacobian:

        -\[
+<picture><source srcset=\[
 \mathbf u(\mathbf x) = J(\hat{\mathbf  x})\hat{\mathbf  u}(\hat{\mathbf
 x}).
-\] +\]" src="form_1285.png"/>

        In physics, this is usually referred to as the contravariant transformation. Mathematically, it is the push forward of a vector field.

      • mapping_covariant: maps a field of one-forms on the reference cell to a field of one-forms on the physical cell. (Theoretically this would refer to a DerivativeForm<1,dim,1> but we canonically identify this type with a Tensor<1,dim>). Mathematically, it is the pull back of the differential form

        -\[
+<picture><source srcset=\[
 \mathbf u(\mathbf x) = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}\hat{\mathbf u}(\hat{\mathbf  x}).
-\] +\]" src="form_1286.png"/>

        Gradients of scalar differentiable functions are transformed this way.

        In the case when dim=spacedim the previous formula reduces to

        -\[
+<picture><source srcset=\[
 \mathbf u(\mathbf x) = J(\hat{\mathbf  x})^{-T}\hat{\mathbf
 u}(\hat{\mathbf  x})
-\] +\]" src="form_1287.png"/>

        because we assume that the mapping $\mathbf F_K$ is always invertible, and consequently its Jacobian $J$ is an invertible matrix.

      • mapping_piola: A field of dim-1-forms on the reference cell is also represented by a vector field, but again transforms differently, namely by the Piola transform

        -\[
+<picture><source srcset=\[
  \mathbf u(\mathbf x) = \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J(\hat{\mathbf x}) \hat{\mathbf  u}(\hat{\mathbf x}).
-\] +\]" src="form_1288.png"/>

      @@ -784,21 +784,21 @@
      -

      Transform a field of differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T} = \nabla \mathbf u$ and $\hat{\mathbf  T} = \hat \nabla \hat{\mathbf  u}$, with $\mathbf u$ a vector field. The mapping kinds currently implemented by derived classes are:

        +

        Transform a field of differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T} = \nabla \mathbf u$ and $\hat{\mathbf  T} = \hat \nabla \hat{\mathbf  u}$, with $\mathbf u$ a vector field. The mapping kinds currently implemented by derived classes are:

        • mapping_covariant: maps a field of forms on the reference cell to a field of forms on the physical cell. Mathematically, it is the pull back of the differential form

          -\[
+<picture><source srcset=\[
 \mathbf T(\mathbf x) = \hat{\mathbf  T}(\hat{\mathbf  x})
                        J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}.
-\] +\]" src="form_1291.png"/>

          Jacobians of spacedim-vector valued differentiable functions are transformed this way.

          In the case when dim=spacedim the previous formula reduces to

          -\[
+<picture><source srcset=\[
 \mathbf T(\mathbf x) = \hat{\mathbf  u}(\hat{\mathbf  x})
                        J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1292.png"/>

        @@ -855,35 +855,35 @@

      Transform a tensor field from the reference cell to the physical cell. These tensors are usually the Jacobians in the reference cell of vector fields that have been pulled back from the physical cell. The mapping kinds currently implemented by derived classes are:

      • -mapping_contravariant_gradient: it assumes $\mathbf u(\mathbf x)
-= J \hat{\mathbf  u}$ so that

        -\[
+<code>mapping_contravariant_gradient:</code> it assumes <picture><source srcset=$\mathbf u(\mathbf x)
+= J \hat{\mathbf  u}$ so that

        +\[
 \mathbf T(\mathbf x) =
 J(\hat{\mathbf  x}) \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1294.png"/>

      • -mapping_covariant_gradient: it assumes $\mathbf u(\mathbf x) =
-J^{-T} \hat{\mathbf  u}$ so that

        -\[
+<code>mapping_covariant_gradient:</code> it assumes <picture><source srcset=$\mathbf u(\mathbf x) =
+J^{-T} \hat{\mathbf  u}$ so that

        +\[
 \mathbf T(\mathbf x) =
 J(\hat{\mathbf  x})^{-T} \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1296.png"/>

      • -mapping_piola_gradient: it assumes $\mathbf u(\mathbf x) =
+<code>mapping_piola_gradient:</code> it assumes <picture><source srcset=$\mathbf u(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})} J(\hat{\mathbf x}) \hat{\mathbf
-u}(\hat{\mathbf x})$ so that

        -\[
+u}(\hat{\mathbf x})$ so that

        +\[
 \mathbf T(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J(\hat{\mathbf  x}) \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1298.png"/>

      @@ -942,21 +942,21 @@

      The mapping kinds currently implemented by derived classes are:

      • mapping_covariant_gradient: maps a field of forms on the reference cell to a field of forms on the physical cell. Mathematically, it is the pull back of the differential form

        -\[
+<picture><source srcset=\[
 \mathbf T_{ijk}(\mathbf x) = \hat{\mathbf  T}_{iJK}(\hat{\mathbf  x})
-J_{jJ}^{\dagger} J_{kK}^{\dagger}\] +J_{jJ}^{\dagger} J_{kK}^{\dagger}\]" src="form_1299.png"/>

        ,

        where

        -\[ J^{\dagger} = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
+<picture><source srcset=\[ J^{\dagger} = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}.
-\] +\]" src="form_1300.png"/>

      Hessians of spacedim-vector valued differentiable functions are transformed this way (After subtraction of the product of the derivative with the Jacobian gradient).

      In the case when dim=spacedim the previous formula reduces to

      -\[J^{\dagger} = J^{-1}\] +\[J^{\dagger} = J^{-1}\]

      Parameters
      @@ -1008,40 +1008,40 @@
      -

      Transform a field of 3-differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T}_{ijk} = D^2_{jk}
-\mathbf u_i$ and $\mathbf{\hat T}_{IJK} = \hat D^2_{JK} \mathbf{\hat
-u}_I$, with $\mathbf u_i$ a vector field.

      +

      Transform a field of 3-differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T}_{ijk} = D^2_{jk}
+\mathbf u_i$ and $\mathbf{\hat T}_{IJK} = \hat D^2_{JK} \mathbf{\hat
+u}_I$, with $\mathbf u_i$ a vector field.

      The mapping kinds currently implemented by derived classes are:

      • -mapping_contravariant_hessian: it assumes $\mathbf u_i(\mathbf x)
-= J_{iI} \hat{\mathbf  u}_I$ so that

        -\[
+<code>mapping_contravariant_hessian:</code> it assumes <picture><source srcset=$\mathbf u_i(\mathbf x)
+= J_{iI} \hat{\mathbf  u}_I$ so that

        +\[
 \mathbf T_{ijk}(\mathbf x) =
 J_{iI}(\hat{\mathbf  x}) \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1306.png"/>

      • -mapping_covariant_hessian: it assumes $\mathbf u_i(\mathbf x) =
-J_{iI}^{-T} \hat{\mathbf  u}_I$ so that

        -\[
+<code>mapping_covariant_hessian:</code> it assumes <picture><source srcset=$\mathbf u_i(\mathbf x) =
+J_{iI}^{-T} \hat{\mathbf  u}_I$ so that

        +\[
 \mathbf T_{ijk}(\mathbf x) =
 J_iI(\hat{\mathbf  x})^{-1} \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1308.png"/>

      • -mapping_piola_hessian: it assumes $\mathbf u_i(\mathbf x) =
/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQ1.html differs (HTML document, ASCII text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQ1.html	2024-01-30 03:04:41.468789903 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQ1.html	2024-01-30 03:04:41.468789903 +0000
@@ -239,8 +239,8 @@
 </table>
 <a name=

        Detailed Description

        template<int dim, int spacedim = dim>
        -class MappingQ1< dim, spacedim >

        Implementation of a $d$-linear mapping from the reference cell to a general quadrilateral/hexahedron.

        -

        The mapping implemented by this class maps the reference (unit) cell to a general grid cell with straight lines in $d$ dimensions. (Note, however, that in 3d the faces of a general, trilinearly mapped cell may be curved, even if the edges are not). This is the standard mapping used for polyhedral domains. It is also the mapping used throughout deal.II for many functions that come in two variants, one that allows to pass a mapping argument explicitly and one that simply falls back to the MappingQ1 class declared here. (Or, in fact, to an object of kind MappingQ(1), which implements exactly the functionality of this class.)

        +class MappingQ1< dim, spacedim >

        Implementation of a $d$-linear mapping from the reference cell to a general quadrilateral/hexahedron.

        +

        The mapping implemented by this class maps the reference (unit) cell to a general grid cell with straight lines in $d$ dimensions. (Note, however, that in 3d the faces of a general, trilinearly mapped cell may be curved, even if the edges are not). This is the standard mapping used for polyhedral domains. It is also the mapping used throughout deal.II for many functions that come in two variants, one that allows to pass a mapping argument explicitly and one that simply falls back to the MappingQ1 class declared here. (Or, in fact, to an object of kind MappingQ(1), which implements exactly the functionality of this class.)

        The shape functions for this mapping are the same as for the finite element FE_Q of polynomial degree 1. Therefore, coupling these two yields an isoparametric element.

        Note
        This class is, in all reality, nothing more than a different name for calling MappingQ with a polynomial degree of one as argument.
        @@ -646,37 +646,37 @@

        The mapping kinds currently implemented by derived classes are:

        • mapping_contravariant: maps a vector field on the reference cell to the physical cell through the Jacobian:

          -\[
+<picture><source srcset=\[
 \mathbf u(\mathbf x) = J(\hat{\mathbf  x})\hat{\mathbf  u}(\hat{\mathbf
 x}).
-\] +\]" src="form_1285.png"/>

          In physics, this is usually referred to as the contravariant transformation. Mathematically, it is the push forward of a vector field.

        • mapping_covariant: maps a field of one-forms on the reference cell to a field of one-forms on the physical cell. (Theoretically this would refer to a DerivativeForm<1,dim,1> but we canonically identify this type with a Tensor<1,dim>). Mathematically, it is the pull back of the differential form

          -\[
+<picture><source srcset=\[
 \mathbf u(\mathbf x) = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}\hat{\mathbf u}(\hat{\mathbf  x}).
-\] +\]" src="form_1286.png"/>

          Gradients of scalar differentiable functions are transformed this way.

          In the case when dim=spacedim the previous formula reduces to

          -\[
+<picture><source srcset=\[
 \mathbf u(\mathbf x) = J(\hat{\mathbf  x})^{-T}\hat{\mathbf
 u}(\hat{\mathbf  x})
-\] +\]" src="form_1287.png"/>

          because we assume that the mapping $\mathbf F_K$ is always invertible, and consequently its Jacobian $J$ is an invertible matrix.

        • mapping_piola: A field of dim-1-forms on the reference cell is also represented by a vector field, but again transforms differently, namely by the Piola transform

          -\[
+<picture><source srcset=\[
  \mathbf u(\mathbf x) = \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J(\hat{\mathbf x}) \hat{\mathbf  u}(\hat{\mathbf x}).
-\] +\]" src="form_1288.png"/>

        @@ -730,21 +730,21 @@
      -

      Transform a field of differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T} = \nabla \mathbf u$ and $\hat{\mathbf  T} = \hat \nabla \hat{\mathbf  u}$, with $\mathbf u$ a vector field. The mapping kinds currently implemented by derived classes are:

        +

        Transform a field of differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T} = \nabla \mathbf u$ and $\hat{\mathbf  T} = \hat \nabla \hat{\mathbf  u}$, with $\mathbf u$ a vector field. The mapping kinds currently implemented by derived classes are:

        • mapping_covariant: maps a field of forms on the reference cell to a field of forms on the physical cell. Mathematically, it is the pull back of the differential form

          -\[
+<picture><source srcset=\[
 \mathbf T(\mathbf x) = \hat{\mathbf  T}(\hat{\mathbf  x})
                        J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}.
-\] +\]" src="form_1291.png"/>

          Jacobians of spacedim-vector valued differentiable functions are transformed this way.

          In the case when dim=spacedim the previous formula reduces to

          -\[
+<picture><source srcset=\[
 \mathbf T(\mathbf x) = \hat{\mathbf  u}(\hat{\mathbf  x})
                        J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1292.png"/>

        @@ -801,35 +801,35 @@

      Transform a tensor field from the reference cell to the physical cell. These tensors are usually the Jacobians in the reference cell of vector fields that have been pulled back from the physical cell. The mapping kinds currently implemented by derived classes are:

      • -mapping_contravariant_gradient: it assumes $\mathbf u(\mathbf x)
-= J \hat{\mathbf  u}$ so that

        -\[
+<code>mapping_contravariant_gradient:</code> it assumes <picture><source srcset=$\mathbf u(\mathbf x)
+= J \hat{\mathbf  u}$ so that

        +\[
 \mathbf T(\mathbf x) =
 J(\hat{\mathbf  x}) \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1294.png"/>

      • -mapping_covariant_gradient: it assumes $\mathbf u(\mathbf x) =
-J^{-T} \hat{\mathbf  u}$ so that

        -\[
+<code>mapping_covariant_gradient:</code> it assumes <picture><source srcset=$\mathbf u(\mathbf x) =
+J^{-T} \hat{\mathbf  u}$ so that

        +\[
 \mathbf T(\mathbf x) =
 J(\hat{\mathbf  x})^{-T} \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1296.png"/>

      • -mapping_piola_gradient: it assumes $\mathbf u(\mathbf x) =
+<code>mapping_piola_gradient:</code> it assumes <picture><source srcset=$\mathbf u(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})} J(\hat{\mathbf x}) \hat{\mathbf
-u}(\hat{\mathbf x})$ so that

        -\[
+u}(\hat{\mathbf x})$ so that

        +\[
 \mathbf T(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J(\hat{\mathbf  x}) \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1298.png"/>

      @@ -888,21 +888,21 @@

      The mapping kinds currently implemented by derived classes are:

      • mapping_covariant_gradient: maps a field of forms on the reference cell to a field of forms on the physical cell. Mathematically, it is the pull back of the differential form

        -\[
+<picture><source srcset=\[
 \mathbf T_{ijk}(\mathbf x) = \hat{\mathbf  T}_{iJK}(\hat{\mathbf  x})
-J_{jJ}^{\dagger} J_{kK}^{\dagger}\] +J_{jJ}^{\dagger} J_{kK}^{\dagger}\]" src="form_1299.png"/>

        ,

        where

        -\[ J^{\dagger} = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
+<picture><source srcset=\[ J^{\dagger} = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}.
-\] +\]" src="form_1300.png"/>

      Hessians of spacedim-vector valued differentiable functions are transformed this way (After subtraction of the product of the derivative with the Jacobian gradient).

      In the case when dim=spacedim the previous formula reduces to

      -\[J^{\dagger} = J^{-1}\] +\[J^{\dagger} = J^{-1}\]

      Parameters
      @@ -954,40 +954,40 @@
      -

      Transform a field of 3-differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T}_{ijk} = D^2_{jk}
-\mathbf u_i$ and $\mathbf{\hat T}_{IJK} = \hat D^2_{JK} \mathbf{\hat
-u}_I$, with $\mathbf u_i$ a vector field.

      +

      Transform a field of 3-differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T}_{ijk} = D^2_{jk}
+\mathbf u_i$ and $\mathbf{\hat T}_{IJK} = \hat D^2_{JK} \mathbf{\hat
+u}_I$, with $\mathbf u_i$ a vector field.

      The mapping kinds currently implemented by derived classes are:

      • -mapping_contravariant_hessian: it assumes $\mathbf u_i(\mathbf x)
-= J_{iI} \hat{\mathbf  u}_I$ so that

        -\[
+<code>mapping_contravariant_hessian:</code> it assumes <picture><source srcset=$\mathbf u_i(\mathbf x)
+= J_{iI} \hat{\mathbf  u}_I$ so that

        +\[
 \mathbf T_{ijk}(\mathbf x) =
 J_{iI}(\hat{\mathbf  x}) \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1306.png"/>

      • -mapping_covariant_hessian: it assumes $\mathbf u_i(\mathbf x) =
-J_{iI}^{-T} \hat{\mathbf  u}_I$ so that

        -\[
+<code>mapping_covariant_hessian:</code> it assumes <picture><source srcset=$\mathbf u_i(\mathbf x) =
+J_{iI}^{-T} \hat{\mathbf  u}_I$ so that

        +\[
 \mathbf T_{ijk}(\mathbf x) =
 J_iI(\hat{\mathbf  x})^{-1} \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1308.png"/>

      • /usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQ1Eulerian.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQ1Eulerian.html 2024-01-30 03:04:41.536790470 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQ1Eulerian.html 2024-01-30 03:04:41.536790470 +0000 @@ -244,7 +244,7 @@

        Detailed Description

        template<int dim, typename VectorType = Vector<double>, int spacedim = dim>
        -class MappingQ1Eulerian< dim, VectorType, spacedim >

        This class provides a mapping that adds to the location of each cell a $d$-linear displacement field. (The generalization to higher order polynomials is provided in the MappingQEulerian class.) Each cell is thus shifted in space by values given to the mapping through a finite element field.

        +class MappingQ1Eulerian< dim, VectorType, spacedim >

        This class provides a mapping that adds to the location of each cell a $d$-linear displacement field. (The generalization to higher order polynomials is provided in the MappingQEulerian class.) Each cell is thus shifted in space by values given to the mapping through a finite element field.

        Usage

        The constructor of this class takes two arguments: a reference to the vector that defines the mapping from the reference configuration to the current configuration and a reference to the DoFHandler. The vector should then represent a (flattened out version of a) vector valued field defined at nodes defined by the DoFHandler, where the number of components of the vector field equals the number of space dimensions. Thus, the DoFHandler shall operate on a finite element that has as many components as space dimensions. As an additional requirement, we impose that it have as many degree of freedom per vertex as there are space dimensions; since this object only evaluates the finite element field at the vertices, the values of all other degrees of freedom (not associated to vertices) are ignored. These requirements are met if the finite element which the given DoFHandler operates on is constructed as a system element (FESystem) from dim continuous FE_Q() objects.

        In many cases, the shift vector will also be the solution vector of the problem under investigation. If this is not the case (i.e. the number of components of the solution variable is not equal to the space dimension, e.g. for scalar problems in dim>1 where the Eulerian coordinates only give a background field) or for coupled problems where more variables are computed than just the flow field), then a different DoFHandler has to be set up on the given triangulation, and the shift vector has then to be associated to it.

        @@ -782,37 +782,37 @@

        The mapping kinds currently implemented by derived classes are:

        • mapping_contravariant: maps a vector field on the reference cell to the physical cell through the Jacobian:

          -\[
+<picture><source srcset=\[
 \mathbf u(\mathbf x) = J(\hat{\mathbf  x})\hat{\mathbf  u}(\hat{\mathbf
 x}).
-\] +\]" src="form_1285.png"/>

          In physics, this is usually referred to as the contravariant transformation. Mathematically, it is the push forward of a vector field.

        • mapping_covariant: maps a field of one-forms on the reference cell to a field of one-forms on the physical cell. (Theoretically this would refer to a DerivativeForm<1,dim,1> but we canonically identify this type with a Tensor<1,dim>). Mathematically, it is the pull back of the differential form

          -\[
+<picture><source srcset=\[
 \mathbf u(\mathbf x) = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}\hat{\mathbf u}(\hat{\mathbf  x}).
-\] +\]" src="form_1286.png"/>

          Gradients of scalar differentiable functions are transformed this way.

          In the case when dim=spacedim the previous formula reduces to

          -\[
+<picture><source srcset=\[
 \mathbf u(\mathbf x) = J(\hat{\mathbf  x})^{-T}\hat{\mathbf
 u}(\hat{\mathbf  x})
-\] +\]" src="form_1287.png"/>

          because we assume that the mapping $\mathbf F_K$ is always invertible, and consequently its Jacobian $J$ is an invertible matrix.

        • mapping_piola: A field of dim-1-forms on the reference cell is also represented by a vector field, but again transforms differently, namely by the Piola transform

          -\[
+<picture><source srcset=\[
  \mathbf u(\mathbf x) = \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J(\hat{\mathbf x}) \hat{\mathbf  u}(\hat{\mathbf x}).
-\] +\]" src="form_1288.png"/>

        @@ -866,21 +866,21 @@
      -

      Transform a field of differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T} = \nabla \mathbf u$ and $\hat{\mathbf  T} = \hat \nabla \hat{\mathbf  u}$, with $\mathbf u$ a vector field. The mapping kinds currently implemented by derived classes are:

        +

        Transform a field of differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T} = \nabla \mathbf u$ and $\hat{\mathbf  T} = \hat \nabla \hat{\mathbf  u}$, with $\mathbf u$ a vector field. The mapping kinds currently implemented by derived classes are:

        • mapping_covariant: maps a field of forms on the reference cell to a field of forms on the physical cell. Mathematically, it is the pull back of the differential form

          -\[
+<picture><source srcset=\[
 \mathbf T(\mathbf x) = \hat{\mathbf  T}(\hat{\mathbf  x})
                        J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}.
-\] +\]" src="form_1291.png"/>

          Jacobians of spacedim-vector valued differentiable functions are transformed this way.

          In the case when dim=spacedim the previous formula reduces to

          -\[
+<picture><source srcset=\[
 \mathbf T(\mathbf x) = \hat{\mathbf  u}(\hat{\mathbf  x})
                        J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1292.png"/>

        @@ -937,35 +937,35 @@

      Transform a tensor field from the reference cell to the physical cell. These tensors are usually the Jacobians in the reference cell of vector fields that have been pulled back from the physical cell. The mapping kinds currently implemented by derived classes are:

      • -mapping_contravariant_gradient: it assumes $\mathbf u(\mathbf x)
-= J \hat{\mathbf  u}$ so that

        -\[
+<code>mapping_contravariant_gradient:</code> it assumes <picture><source srcset=$\mathbf u(\mathbf x)
+= J \hat{\mathbf  u}$ so that

        +\[
 \mathbf T(\mathbf x) =
 J(\hat{\mathbf  x}) \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1294.png"/>

      • -mapping_covariant_gradient: it assumes $\mathbf u(\mathbf x) =
-J^{-T} \hat{\mathbf  u}$ so that

        -\[
+<code>mapping_covariant_gradient:</code> it assumes <picture><source srcset=$\mathbf u(\mathbf x) =
+J^{-T} \hat{\mathbf  u}$ so that

        +\[
 \mathbf T(\mathbf x) =
 J(\hat{\mathbf  x})^{-T} \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1296.png"/>

      • -mapping_piola_gradient: it assumes $\mathbf u(\mathbf x) =
+<code>mapping_piola_gradient:</code> it assumes <picture><source srcset=$\mathbf u(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})} J(\hat{\mathbf x}) \hat{\mathbf
-u}(\hat{\mathbf x})$ so that

        -\[
+u}(\hat{\mathbf x})$ so that

        +\[
 \mathbf T(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J(\hat{\mathbf  x}) \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1298.png"/>

      @@ -1024,21 +1024,21 @@

      The mapping kinds currently implemented by derived classes are:

      • mapping_covariant_gradient: maps a field of forms on the reference cell to a field of forms on the physical cell. Mathematically, it is the pull back of the differential form

        -\[
+<picture><source srcset=\[
 \mathbf T_{ijk}(\mathbf x) = \hat{\mathbf  T}_{iJK}(\hat{\mathbf  x})
-J_{jJ}^{\dagger} J_{kK}^{\dagger}\] +J_{jJ}^{\dagger} J_{kK}^{\dagger}\]" src="form_1299.png"/>

        ,

        where

        -\[ J^{\dagger} = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
+<picture><source srcset=\[ J^{\dagger} = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}.
-\] +\]" src="form_1300.png"/>

      Hessians of spacedim-vector valued differentiable functions are transformed this way (After subtraction of the product of the derivative with the Jacobian gradient).

      In the case when dim=spacedim the previous formula reduces to

      -\[J^{\dagger} = J^{-1}\] +\[J^{\dagger} = J^{-1}\]

      Parameters
      @@ -1090,40 +1090,40 @@
      -

      Transform a field of 3-differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T}_{ijk} = D^2_{jk}
-\mathbf u_i$ and $\mathbf{\hat T}_{IJK} = \hat D^2_{JK} \mathbf{\hat
-u}_I$, with $\mathbf u_i$ a vector field.

      +

      Transform a field of 3-differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T}_{ijk} = D^2_{jk}
+\mathbf u_i$ and $\mathbf{\hat T}_{IJK} = \hat D^2_{JK} \mathbf{\hat
+u}_I$, with $\mathbf u_i$ a vector field.

      The mapping kinds currently implemented by derived classes are:

      • -mapping_contravariant_hessian: it assumes $\mathbf u_i(\mathbf x)
-= J_{iI} \hat{\mathbf  u}_I$ so that

        -\[
+<code>mapping_contravariant_hessian:</code> it assumes <picture><source srcset=$\mathbf u_i(\mathbf x)
+= J_{iI} \hat{\mathbf  u}_I$ so that

        +\[
 \mathbf T_{ijk}(\mathbf x) =
 J_{iI}(\hat{\mathbf  x}) \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1306.png"/>

      • -mapping_covariant_hessian: it assumes $\mathbf u_i(\mathbf x) =
-J_{iI}^{-T} \hat{\mathbf  u}_I$ so that

        -\[
+<code>mapping_covariant_hessian:</code> it assumes <picture><source srcset=$\mathbf u_i(\mathbf x) =
+J_{iI}^{-T} \hat{\mathbf  u}_I$ so that

        +\[
 \mathbf T_{ijk}(\mathbf x) =
 J_iI(\hat{\mathbf  x})^{-1} \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1308.png"/>

      • -mapping_piola_hessian: it assumes $\mathbf u_i(\mathbf x) =
/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQCache.html differs (HTML document, ASCII text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQCache.html	2024-01-30 03:04:41.608791070 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQCache.html	2024-01-30 03:04:41.608791070 +0000
@@ -529,7 +529,7 @@
           <td class= const std::function< std::vector< Point< spacedim > >(const typename Triangulation< dim, spacedim >::cell_iterator &)> & compute_points_on_cell&#href_anchor"memdoc"> -

        Initialize the data cache by letting the function given as an argument provide the mapping support points for all cells (on all levels) of the given triangulation. The function must return a vector of Point<spacedim> whose length is the same as the size of the polynomial space, $(p+1)^\text{dim}$, where $p$ is the polynomial degree of the mapping, and it must be in the order the mapping or FE_Q sort their points, i.e., all $2^\text{dim}$ vertex points first, then the points on the lines, quads, and hexes according to the usual hierarchical numbering. No attempt is made to validate these points internally, except for the number of given points.

        +

        Initialize the data cache by letting the function given as an argument provide the mapping support points for all cells (on all levels) of the given triangulation. The function must return a vector of Point<spacedim> whose length is the same as the size of the polynomial space, $(p+1)^\text{dim}$, where $p$ is the polynomial degree of the mapping, and it must be in the order the mapping or FE_Q sort their points, i.e., all $2^\text{dim}$ vertex points first, then the points on the lines, quads, and hexes according to the usual hierarchical numbering. No attempt is made to validate these points internally, except for the number of given points.

        Note
        If multiple threads are enabled, this function will run in parallel, invoking the function passed in several times. Thus, in case MultithreadInfo::n_threads()>1, the user code must make sure that the function, typically a lambda, does not write into data shared with other threads.
        The cache is invalidated upon the signal Triangulation::Signals::any_change of the underlying triangulation.
        @@ -1026,37 +1026,37 @@

        The mapping kinds currently implemented by derived classes are:

        • mapping_contravariant: maps a vector field on the reference cell to the physical cell through the Jacobian:

          -\[
+<picture><source srcset=\[
 \mathbf u(\mathbf x) = J(\hat{\mathbf  x})\hat{\mathbf  u}(\hat{\mathbf
 x}).
-\] +\]" src="form_1285.png"/>

          In physics, this is usually referred to as the contravariant transformation. Mathematically, it is the push forward of a vector field.

        • mapping_covariant: maps a field of one-forms on the reference cell to a field of one-forms on the physical cell. (Theoretically this would refer to a DerivativeForm<1,dim,1> but we canonically identify this type with a Tensor<1,dim>). Mathematically, it is the pull back of the differential form

          -\[
+<picture><source srcset=\[
 \mathbf u(\mathbf x) = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}\hat{\mathbf u}(\hat{\mathbf  x}).
-\] +\]" src="form_1286.png"/>

          Gradients of scalar differentiable functions are transformed this way.

          In the case when dim=spacedim the previous formula reduces to

          -\[
+<picture><source srcset=\[
 \mathbf u(\mathbf x) = J(\hat{\mathbf  x})^{-T}\hat{\mathbf
 u}(\hat{\mathbf  x})
-\] +\]" src="form_1287.png"/>

          because we assume that the mapping $\mathbf F_K$ is always invertible, and consequently its Jacobian $J$ is an invertible matrix.

        • mapping_piola: A field of dim-1-forms on the reference cell is also represented by a vector field, but again transforms differently, namely by the Piola transform

          -\[
+<picture><source srcset=\[
  \mathbf u(\mathbf x) = \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J(\hat{\mathbf x}) \hat{\mathbf  u}(\hat{\mathbf x}).
-\] +\]" src="form_1288.png"/>

        @@ -1110,21 +1110,21 @@
      -

      Transform a field of differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T} = \nabla \mathbf u$ and $\hat{\mathbf  T} = \hat \nabla \hat{\mathbf  u}$, with $\mathbf u$ a vector field. The mapping kinds currently implemented by derived classes are:

        +

        Transform a field of differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T} = \nabla \mathbf u$ and $\hat{\mathbf  T} = \hat \nabla \hat{\mathbf  u}$, with $\mathbf u$ a vector field. The mapping kinds currently implemented by derived classes are:

        • mapping_covariant: maps a field of forms on the reference cell to a field of forms on the physical cell. Mathematically, it is the pull back of the differential form

          -\[
+<picture><source srcset=\[
 \mathbf T(\mathbf x) = \hat{\mathbf  T}(\hat{\mathbf  x})
                        J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}.
-\] +\]" src="form_1291.png"/>

          Jacobians of spacedim-vector valued differentiable functions are transformed this way.

          In the case when dim=spacedim the previous formula reduces to

          -\[
+<picture><source srcset=\[
 \mathbf T(\mathbf x) = \hat{\mathbf  u}(\hat{\mathbf  x})
                        J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1292.png"/>

        @@ -1181,35 +1181,35 @@

      Transform a tensor field from the reference cell to the physical cell. These tensors are usually the Jacobians in the reference cell of vector fields that have been pulled back from the physical cell. The mapping kinds currently implemented by derived classes are:

      • -mapping_contravariant_gradient: it assumes $\mathbf u(\mathbf x)
-= J \hat{\mathbf  u}$ so that

        -\[
+<code>mapping_contravariant_gradient:</code> it assumes <picture><source srcset=$\mathbf u(\mathbf x)
+= J \hat{\mathbf  u}$ so that

        +\[
 \mathbf T(\mathbf x) =
 J(\hat{\mathbf  x}) \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1294.png"/>

      • -mapping_covariant_gradient: it assumes $\mathbf u(\mathbf x) =
-J^{-T} \hat{\mathbf  u}$ so that

        -\[
+<code>mapping_covariant_gradient:</code> it assumes <picture><source srcset=$\mathbf u(\mathbf x) =
+J^{-T} \hat{\mathbf  u}$ so that

        +\[
 \mathbf T(\mathbf x) =
 J(\hat{\mathbf  x})^{-T} \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1296.png"/>

      • -mapping_piola_gradient: it assumes $\mathbf u(\mathbf x) =
+<code>mapping_piola_gradient:</code> it assumes <picture><source srcset=$\mathbf u(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})} J(\hat{\mathbf x}) \hat{\mathbf
-u}(\hat{\mathbf x})$ so that

        -\[
+u}(\hat{\mathbf x})$ so that

        +\[
 \mathbf T(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J(\hat{\mathbf  x}) \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1298.png"/>

      @@ -1268,21 +1268,21 @@

      The mapping kinds currently implemented by derived classes are:

      • mapping_covariant_gradient: maps a field of forms on the reference cell to a field of forms on the physical cell. Mathematically, it is the pull back of the differential form

        -\[
+<picture><source srcset=\[
 \mathbf T_{ijk}(\mathbf x) = \hat{\mathbf  T}_{iJK}(\hat{\mathbf  x})
-J_{jJ}^{\dagger} J_{kK}^{\dagger}\] +J_{jJ}^{\dagger} J_{kK}^{\dagger}\]" src="form_1299.png"/>

        ,

        where

        -\[ J^{\dagger} = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
+<picture><source srcset=\[ J^{\dagger} = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}.
-\] +\]" src="form_1300.png"/>

      Hessians of spacedim-vector valued differentiable functions are transformed this way (After subtraction of the product of the derivative with the Jacobian gradient).

      In the case when dim=spacedim the previous formula reduces to

      -\[J^{\dagger} = J^{-1}\] +\[J^{\dagger} = J^{-1}\]

      Parameters
      @@ -1334,40 +1334,40 @@
      -

      Transform a field of 3-differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T}_{ijk} = D^2_{jk}
-\mathbf u_i$ and $\mathbf{\hat T}_{IJK} = \hat D^2_{JK} \mathbf{\hat
-u}_I$, with $\mathbf u_i$ a vector field.

      +

      Transform a field of 3-differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T}_{ijk} = D^2_{jk}
+\mathbf u_i$ and $\mathbf{\hat T}_{IJK} = \hat D^2_{JK} \mathbf{\hat
+u}_I$, with $\mathbf u_i$ a vector field.

      The mapping kinds currently implemented by derived classes are:

      • -mapping_contravariant_hessian: it assumes $\mathbf u_i(\mathbf x)
-= J_{iI} \hat{\mathbf  u}_I$ so that

        -\[
+<code>mapping_contravariant_hessian:</code> it assumes <picture><source srcset=$\mathbf u_i(\mathbf x)
+= J_{iI} \hat{\mathbf  u}_I$ so that

        +\[
 \mathbf T_{ijk}(\mathbf x) =
 J_{iI}(\hat{\mathbf  x}) \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1306.png"/>

      • -mapping_covariant_hessian: it assumes $\mathbf u_i(\mathbf x) =
-J_{iI}^{-T} \hat{\mathbf  u}_I$ so that

        -\[
+<code>mapping_covariant_hessian:</code> it assumes <picture><source srcset=$\mathbf u_i(\mathbf x) =
+J_{iI}^{-T} \hat{\mathbf  u}_I$ so that

        +\[
 \mathbf T_{ijk}(\mathbf x) =
 J_iI(\hat{\mathbf  x})^{-1} \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1308.png"/>

      • -mapping_piola_hessian: it assumes $\mathbf u_i(\mathbf x) =
/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQEulerian.html differs (HTML document, ASCII text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQEulerian.html	2024-01-30 03:04:41.680791670 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQEulerian.html	2024-01-30 03:04:41.680791670 +0000
@@ -813,37 +813,37 @@
 <p>The mapping kinds currently implemented by derived classes are: </p><ul>
 <li>
 <p class=mapping_contravariant: maps a vector field on the reference cell to the physical cell through the Jacobian:

        -\[
+<picture><source srcset=\[
 \mathbf u(\mathbf x) = J(\hat{\mathbf  x})\hat{\mathbf  u}(\hat{\mathbf
 x}).
-\] +\]" src="form_1285.png"/>

        In physics, this is usually referred to as the contravariant transformation. Mathematically, it is the push forward of a vector field.

      • mapping_covariant: maps a field of one-forms on the reference cell to a field of one-forms on the physical cell. (Theoretically this would refer to a DerivativeForm<1,dim,1> but we canonically identify this type with a Tensor<1,dim>). Mathematically, it is the pull back of the differential form

        -\[
+<picture><source srcset=\[
 \mathbf u(\mathbf x) = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}\hat{\mathbf u}(\hat{\mathbf  x}).
-\] +\]" src="form_1286.png"/>

        Gradients of scalar differentiable functions are transformed this way.

        In the case when dim=spacedim the previous formula reduces to

        -\[
+<picture><source srcset=\[
 \mathbf u(\mathbf x) = J(\hat{\mathbf  x})^{-T}\hat{\mathbf
 u}(\hat{\mathbf  x})
-\] +\]" src="form_1287.png"/>

        because we assume that the mapping $\mathbf F_K$ is always invertible, and consequently its Jacobian $J$ is an invertible matrix.

      • mapping_piola: A field of dim-1-forms on the reference cell is also represented by a vector field, but again transforms differently, namely by the Piola transform

        -\[
+<picture><source srcset=\[
  \mathbf u(\mathbf x) = \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J(\hat{\mathbf x}) \hat{\mathbf  u}(\hat{\mathbf x}).
-\] +\]" src="form_1288.png"/>

      @@ -897,21 +897,21 @@
      -

      Transform a field of differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T} = \nabla \mathbf u$ and $\hat{\mathbf  T} = \hat \nabla \hat{\mathbf  u}$, with $\mathbf u$ a vector field. The mapping kinds currently implemented by derived classes are:

        +

        Transform a field of differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T} = \nabla \mathbf u$ and $\hat{\mathbf  T} = \hat \nabla \hat{\mathbf  u}$, with $\mathbf u$ a vector field. The mapping kinds currently implemented by derived classes are:

        • mapping_covariant: maps a field of forms on the reference cell to a field of forms on the physical cell. Mathematically, it is the pull back of the differential form

          -\[
+<picture><source srcset=\[
 \mathbf T(\mathbf x) = \hat{\mathbf  T}(\hat{\mathbf  x})
                        J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}.
-\] +\]" src="form_1291.png"/>

          Jacobians of spacedim-vector valued differentiable functions are transformed this way.

          In the case when dim=spacedim the previous formula reduces to

          -\[
+<picture><source srcset=\[
 \mathbf T(\mathbf x) = \hat{\mathbf  u}(\hat{\mathbf  x})
                        J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1292.png"/>

        @@ -968,35 +968,35 @@

      Transform a tensor field from the reference cell to the physical cell. These tensors are usually the Jacobians in the reference cell of vector fields that have been pulled back from the physical cell. The mapping kinds currently implemented by derived classes are:

      • -mapping_contravariant_gradient: it assumes $\mathbf u(\mathbf x)
-= J \hat{\mathbf  u}$ so that

        -\[
+<code>mapping_contravariant_gradient:</code> it assumes <picture><source srcset=$\mathbf u(\mathbf x)
+= J \hat{\mathbf  u}$ so that

        +\[
 \mathbf T(\mathbf x) =
 J(\hat{\mathbf  x}) \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1294.png"/>

      • -mapping_covariant_gradient: it assumes $\mathbf u(\mathbf x) =
-J^{-T} \hat{\mathbf  u}$ so that

        -\[
+<code>mapping_covariant_gradient:</code> it assumes <picture><source srcset=$\mathbf u(\mathbf x) =
+J^{-T} \hat{\mathbf  u}$ so that

        +\[
 \mathbf T(\mathbf x) =
 J(\hat{\mathbf  x})^{-T} \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1296.png"/>

      • -mapping_piola_gradient: it assumes $\mathbf u(\mathbf x) =
+<code>mapping_piola_gradient:</code> it assumes <picture><source srcset=$\mathbf u(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})} J(\hat{\mathbf x}) \hat{\mathbf
-u}(\hat{\mathbf x})$ so that

        -\[
+u}(\hat{\mathbf x})$ so that

        +\[
 \mathbf T(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J(\hat{\mathbf  x}) \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1298.png"/>

      @@ -1055,21 +1055,21 @@

      The mapping kinds currently implemented by derived classes are:

      • mapping_covariant_gradient: maps a field of forms on the reference cell to a field of forms on the physical cell. Mathematically, it is the pull back of the differential form

        -\[
+<picture><source srcset=\[
 \mathbf T_{ijk}(\mathbf x) = \hat{\mathbf  T}_{iJK}(\hat{\mathbf  x})
-J_{jJ}^{\dagger} J_{kK}^{\dagger}\] +J_{jJ}^{\dagger} J_{kK}^{\dagger}\]" src="form_1299.png"/>

        ,

        where

        -\[ J^{\dagger} = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
+<picture><source srcset=\[ J^{\dagger} = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}.
-\] +\]" src="form_1300.png"/>

      Hessians of spacedim-vector valued differentiable functions are transformed this way (After subtraction of the product of the derivative with the Jacobian gradient).

      In the case when dim=spacedim the previous formula reduces to

      -\[J^{\dagger} = J^{-1}\] +\[J^{\dagger} = J^{-1}\]

      Parameters
      @@ -1121,40 +1121,40 @@
      -

      Transform a field of 3-differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T}_{ijk} = D^2_{jk}
-\mathbf u_i$ and $\mathbf{\hat T}_{IJK} = \hat D^2_{JK} \mathbf{\hat
-u}_I$, with $\mathbf u_i$ a vector field.

      +

      Transform a field of 3-differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T}_{ijk} = D^2_{jk}
+\mathbf u_i$ and $\mathbf{\hat T}_{IJK} = \hat D^2_{JK} \mathbf{\hat
+u}_I$, with $\mathbf u_i$ a vector field.

      The mapping kinds currently implemented by derived classes are:

      • -mapping_contravariant_hessian: it assumes $\mathbf u_i(\mathbf x)
-= J_{iI} \hat{\mathbf  u}_I$ so that

        -\[
+<code>mapping_contravariant_hessian:</code> it assumes <picture><source srcset=$\mathbf u_i(\mathbf x)
+= J_{iI} \hat{\mathbf  u}_I$ so that

        +\[
 \mathbf T_{ijk}(\mathbf x) =
 J_{iI}(\hat{\mathbf  x}) \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1306.png"/>

      • -mapping_covariant_hessian: it assumes $\mathbf u_i(\mathbf x) =
-J_{iI}^{-T} \hat{\mathbf  u}_I$ so that

        -\[
+<code>mapping_covariant_hessian:</code> it assumes <picture><source srcset=$\mathbf u_i(\mathbf x) =
+J_{iI}^{-T} \hat{\mathbf  u}_I$ so that

        +\[
 \mathbf T_{ijk}(\mathbf x) =
 J_iI(\hat{\mathbf  x})^{-1} \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1308.png"/>

      • -mapping_piola_hessian: it assumes $\mathbf u_i(\mathbf x) =
+<code>mapping_piola_hessian:</code> it assumes <picture><source srcset=$\mathbf u_i(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})} J_{iI}(\hat{\mathbf x})
-\hat{\mathbf u}(\hat{\mathbf x})$ so that

        -\[
+\hat{\mathbf u}(\hat{\mathbf x})$ so that

        +\[
 \mathbf T_{ijk}(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J_{iI}(\hat{\mathbf  x}) \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQ_1_1InternalData.html differs (HTML document, ASCII text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQ_1_1InternalData.html	2024-01-30 03:04:41.704791869 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQ_1_1InternalData.html	2024-01-30 03:04:41.704791869 +0000
@@ -368,7 +368,7 @@
       </table>
 </div><div class=

        Number of shape functions. If this is a Q1 mapping, then it is simply the number of vertices per cell. However, since also derived classes use this class (e.g. the Mapping_Q() class), the number of shape functions may also be different.

        -

        In general, it is $(p+1)^\text{dim}$, where $p$ is the polynomial degree of the mapping.

        +

        In general, it is $(p+1)^\text{dim}$, where $p$ is the polynomial degree of the mapping.

        Definition at line 382 of file mapping_q.h.

        /usr/share/doc/packages/dealii/doxygen/deal.II/classMatrixFreeOperators_1_1LaplaceOperator.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classMatrixFreeOperators_1_1LaplaceOperator.html 2024-01-30 03:04:41.760792336 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classMatrixFreeOperators_1_1LaplaceOperator.html 2024-01-30 03:04:41.760792336 +0000 @@ -231,8 +231,8 @@

        Detailed Description

        template<int dim, int fe_degree, int n_q_points_1d = fe_degree + 1, int n_components = 1, typename VectorType = LinearAlgebra::distributed::Vector<double>, typename VectorizedArrayType = VectorizedArray<typename VectorType::value_type>>
        -class MatrixFreeOperators::LaplaceOperator< dim, fe_degree, n_q_points_1d, n_components, VectorType, VectorizedArrayType >

        This class implements the operation of the action of a Laplace matrix, namely $ L_{ij} = \int_\Omega c(\mathbf x) \mathbf \nabla N_i(\mathbf x)
-\cdot \mathbf \nabla N_j(\mathbf x)\,d \mathbf x$, where $c(\mathbf x)$ is the scalar heterogeneity coefficient.

        +class MatrixFreeOperators::LaplaceOperator< dim, fe_degree, n_q_points_1d, n_components, VectorType, VectorizedArrayType >

        This class implements the operation of the action of a Laplace matrix, namely $ L_{ij} = \int_\Omega c(\mathbf x) \mathbf \nabla N_i(\mathbf x)
+\cdot \mathbf \nabla N_j(\mathbf x)\,d \mathbf x$, where $c(\mathbf x)$ is the scalar heterogeneity coefficient.

        Note that this class only supports the non-blocked vector variant of the Base operator because only a single FEEvaluation object is used in the apply function.

        Definition at line 870 of file operators.h.

        /usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1DiscreteFaceQuadratureGenerator.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1DiscreteFaceQuadratureGenerator.html 2024-01-30 03:04:41.824792869 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1DiscreteFaceQuadratureGenerator.html 2024-01-30 03:04:41.824792869 +0000 @@ -285,7 +285,7 @@
      -

      Return the quadrature rule for the region $\{x \in F : \psi(x) < 0 \}$ created in the previous call to generate(). Here, $F$ is the face of the BoundingBox passed to generate().

      +

      Return the quadrature rule for the region $\{x \in F : \psi(x) < 0 \}$ created in the previous call to generate(). Here, $F$ is the face of the BoundingBox passed to generate().

      Definition at line 1774 of file quadrature_generator.cc.

      @@ -315,7 +315,7 @@
      -

      Return the quadrature rule for the region $\{x \in F : \psi(x) > 0 \}$ created in the previous call to generate(). Here, $F$ is the face of the BoundingBox passed to generate().

      +

      Return the quadrature rule for the region $\{x \in F : \psi(x) > 0 \}$ created in the previous call to generate(). Here, $F$ is the face of the BoundingBox passed to generate().

      Definition at line 1782 of file quadrature_generator.cc.

      @@ -345,8 +345,8 @@
      -

      Return the quadrature rule for the region $\{x \in F : \psi(x) = 0 \}$ created in the previous call to generate(). Here, $F$ is the face of the BoundingBox passed to generate().

      -
      Note
      The normal at the quadrature points will be parallel to $\nabla \psi$.
      +

      Return the quadrature rule for the region $\{x \in F : \psi(x) = 0 \}$ created in the previous call to generate(). Here, $F$ is the face of the BoundingBox passed to generate().

      +
      Note
      The normal at the quadrature points will be parallel to $\nabla \psi$.

      Definition at line 1791 of file quadrature_generator.cc.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1DiscreteQuadratureGenerator.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1DiscreteQuadratureGenerator.html 2024-01-30 03:04:41.844793037 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1DiscreteQuadratureGenerator.html 2024-01-30 03:04:41.844793037 +0000 @@ -277,7 +277,7 @@
      -

      Return the quadrature rule for the region $\{x \in B : \psi(x) < 0 \}$ created in the previous call to generate(). Here, $B$ is BoundingBox passed to generate().

      +

      Return the quadrature rule for the region $\{x \in B : \psi(x) < 0 \}$ created in the previous call to generate(). Here, $B$ is BoundingBox passed to generate().

      Definition at line 1668 of file quadrature_generator.cc.

      @@ -307,7 +307,7 @@
      -

      Return the quadrature rule for the region $\{x \in B : \psi(x) > 0 \}$ created in the previous call to generate(). Here, $B$ is BoundingBox passed to generate().

      +

      Return the quadrature rule for the region $\{x \in B : \psi(x) > 0 \}$ created in the previous call to generate(). Here, $B$ is BoundingBox passed to generate().

      Definition at line 1677 of file quadrature_generator.cc.

      @@ -337,8 +337,8 @@
      -

      Return the quadrature rule for the region $\{x \in B : \psi(x) = 0 \}$ created in the previous call to generate(). Here, $B$ is BoundingBox passed to generate().

      -
      Note
      The normal at the quadrature points will be parallel to $\nabla \psi$.
      +

      Return the quadrature rule for the region $\{x \in B : \psi(x) = 0 \}$ created in the previous call to generate(). Here, $B$ is BoundingBox passed to generate().

      +
      Note
      The normal at the quadrature points will be parallel to $\nabla \psi$.

      Definition at line 1686 of file quadrature_generator.cc.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FEImmersedSurfaceValues.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FEImmersedSurfaceValues.html 2024-01-30 03:04:41.932793769 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FEImmersedSurfaceValues.html 2024-01-30 03:04:41.932793769 +0000 @@ -506,7 +506,7 @@ const unsigned int quadrature_point&#href_anchor"memdoc">

      Returns the surface gradient of the shape function with index function_no at the quadrature point with index quadrature_point.

      -

      The surface gradient is defined as the projection of the gradient to the tangent plane of the surface: $ \nabla u - (n \cdot \nabla u) n $, where $n$ is the unit normal to the surface.

      +

      The surface gradient is defined as the projection of the gradient to the tangent plane of the surface: $ \nabla u - (n \cdot \nabla u) n $, where $n$ is the unit normal to the surface.

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients | update_normal_vectors flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

      Definition at line 138 of file fe_immersed_values.cc.

      @@ -653,7 +653,7 @@

      If the shape function is vector-valued, then this returns the only non- zero component. If the shape function has more than one non-zero component (i.e. it is not primitive), then throw an exception of type ExcShapeFunctionNotPrimitive. In that case, use the shape_value_component() function.

      Parameters
      - +
      iNumber of the shape function $\varphi_i$ to be evaluated. Note that this number runs from zero to dofs_per_cell, even in the case of an FEFaceValues or FESubfaceValues object.
      iNumber of the shape function $\varphi_i$ to be evaluated. Note that this number runs from zero to dofs_per_cell, even in the case of an FEFaceValues or FESubfaceValues object.
      q_pointNumber of the quadrature point at which function is to be evaluated
      @@ -692,7 +692,7 @@

      Compute one vector component of the value of a shape function at a quadrature point. If the finite element is scalar, then only component zero is allowed and the return value equals that of the shape_value() function. If the finite element is vector valued but all shape functions are primitive (i.e. they are non-zero in only one component), then the value returned by shape_value() equals that of this function for exactly one component. This function is therefore only of greater interest if the shape function is not primitive, but then it is necessary since the other function cannot be used.

      Parameters
      - +
      iNumber of the shape function $\varphi_i$ to be evaluated.
      iNumber of the shape function $\varphi_i$ to be evaluated.
      q_pointNumber of the quadrature point at which function is to be evaluated.
      componentvector component to be evaluated.
      @@ -729,7 +729,7 @@

      The same holds for the arguments of this function as for the shape_value() function.

      Parameters
      - +
      iNumber of the shape function $\varphi_i$ to be evaluated.
      iNumber of the shape function $\varphi_i$ to be evaluated.
      q_pointNumber of the quadrature point at which function is to be evaluated.
      @@ -917,17 +917,17 @@
      -

      Return the values of a finite element function at the quadrature points of the current cell, face, or subface (selected the last time the reinit() function was called). That is, if the first argument fe_function is a vector of nodal values of a finite element function $u_h(\mathbf x)$ defined on a DoFHandler object, then the output vector (the second argument, values) is the vector of values $u_h(\mathbf x_q^K)$ where $x_q^K$ are the quadrature points on the current cell $K$. This function is first discussed in the Results section of step-4, and the related get_function_gradients() function is also used in step-15 along with numerous other tutorial programs.

      +

      Return the values of a finite element function at the quadrature points of the current cell, face, or subface (selected the last time the reinit() function was called). That is, if the first argument fe_function is a vector of nodal values of a finite element function $u_h(\mathbf x)$ defined on a DoFHandler object, then the output vector (the second argument, values) is the vector of values $u_h(\mathbf x_q^K)$ where $x_q^K$ are the quadrature points on the current cell $K$. This function is first discussed in the Results section of step-4, and the related get_function_gradients() function is also used in step-15 along with numerous other tutorial programs.

      If the current cell is not active (i.e., it has children), then the finite element function is, strictly speaking, defined by shape functions that live on these child cells. Rather than evaluating the shape functions on the child cells, with the quadrature points defined on the current cell, this function first interpolates the finite element function to shape functions defined on the current cell, and then evaluates this interpolated function.

      This function may only be used if the finite element in use is a scalar one, i.e. has only one vector component. To get values of multi-component elements, there is another get_function_values() below, returning a vector of vectors of results.

      Parameters
      - +
      [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
      [out]valuesThe values of the function specified by fe_function at the quadrature points of the current cell. The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the values of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the solution vector.
      [out]valuesThe values of the function specified by fe_function at the quadrature points of the current cell. The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the values of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the solution vector.
      -
      Postcondition
      values[q] will contain the value of the field described by fe_function at the $q$th quadrature point.
      +
      Postcondition
      values[q] will contain the value of the field described by fe_function at the $q$th quadrature point.
      Note
      The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -959,7 +959,7 @@

      This function does the same as the other get_function_values(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

      -
      Postcondition
      values[q] is a vector of values of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by values[q] equals the number of components of the finite element, i.e. values[q](c) returns the value of the $c$th vector component at the $q$th quadrature point.
      +
      Postcondition
      values[q] is a vector of values of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by values[q] equals the number of components of the finite element, i.e. values[q](c) returns the value of the $c$th vector component at the $q$th quadrature point.
      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

      Definition at line 2749 of file fe_values.cc.

      @@ -1115,16 +1115,16 @@
      -

      Return the gradients of a finite element function at the quadrature points of the current cell, face, or subface (selected the last time the reinit() function was called). That is, if the first argument fe_function is a vector of nodal values of a finite element function $u_h(\mathbf x)$ defined on a DoFHandler object, then the output vector (the second argument, values) is the vector of values $\nabla u_h(\mathbf x_q^K)$ where $x_q^K$ are the quadrature points on the current cell $K$. This function is first discussed in the Results section of step-4, and it is also used in step-15 along with numerous other tutorial programs.

      +

      Return the gradients of a finite element function at the quadrature points of the current cell, face, or subface (selected the last time the reinit() function was called). That is, if the first argument fe_function is a vector of nodal values of a finite element function $u_h(\mathbf x)$ defined on a DoFHandler object, then the output vector (the second argument, values) is the vector of values $\nabla u_h(\mathbf x_q^K)$ where $x_q^K$ are the quadrature points on the current cell $K$. This function is first discussed in the Results section of step-4, and it is also used in step-15 along with numerous other tutorial programs.

      This function may only be used if the finite element in use is a scalar one, i.e. has only one vector component. There is a corresponding function of the same name for vector-valued finite elements.

      Parameters
      - +
      [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
      [out]gradientsThe gradients of the function specified by fe_function at the quadrature points of the current cell. The gradients are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the gradients of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
      [out]gradientsThe gradients of the function specified by fe_function at the quadrature points of the current cell. The gradients are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the gradients of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
      -
      Postcondition
      gradients[q] will contain the gradient of the field described by fe_function at the $q$th quadrature point. gradients[q][d] represents the derivative in coordinate direction $d$ at quadrature point $q$.
      +
      Postcondition
      gradients[q] will contain the gradient of the field described by fe_function at the $q$th quadrature point. gradients[q][d] represents the derivative in coordinate direction $d$ at quadrature point $q$.
      Note
      The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -1156,7 +1156,7 @@

      This function does the same as the other get_function_gradients(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

      -
      Postcondition
      gradients[q] is a vector of gradients of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by gradients[q] equals the number of components of the finite element, i.e. gradients[q][c] returns the gradient of the $c$th vector component at the $q$th quadrature point. Consequently, gradients[q][c][d] is the derivative in coordinate direction $d$ of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
      +
      Postcondition
      gradients[q] is a vector of gradients of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by gradients[q] equals the number of components of the finite element, i.e. gradients[q][c] returns the gradient of the $c$th vector component at the $q$th quadrature point. Consequently, gradients[q][c][d] is the derivative in coordinate direction $d$ of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

      Definition at line 2932 of file fe_values.cc.

      @@ -1263,11 +1263,11 @@
      Parameters
      - +
      [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
      [out]hessiansThe Hessians of the function specified by fe_function at the quadrature points of the current cell. The Hessians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Hessians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
      [out]hessiansThe Hessians of the function specified by fe_function at the quadrature points of the current cell. The Hessians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Hessians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
      -
      Postcondition
      hessians[q] will contain the Hessian of the field described by fe_function at the $q$th quadrature point. hessians[q][i][j] represents the $(i,j)$th component of the matrix of second derivatives at quadrature point $q$.
      +
      Postcondition
      hessians[q] will contain the Hessian of the field described by fe_function at the $q$th quadrature point. hessians[q][i][j] represents the $(i,j)$th component of the matrix of second derivatives at quadrature point $q$.
      Note
      The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -1304,7 +1304,7 @@

      This function does the same as the other get_function_hessians(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

      -
      Postcondition
      hessians[q] is a vector of Hessians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by hessians[q] equals the number of components of the finite element, i.e. hessians[q][c] returns the Hessian of the $c$th vector component at the $q$th quadrature point. Consequently, hessians[q][c][i][j] is the $(i,j)$th component of the matrix of second derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
      +
      Postcondition
      hessians[q] is a vector of Hessians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by hessians[q] equals the number of components of the finite element, i.e. hessians[q][c] returns the Hessian of the $c$th vector component at the $q$th quadrature point. Consequently, hessians[q][c][i][j] is the $(i,j)$th component of the matrix of second derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

      Definition at line 3041 of file fe_values.cc.

      @@ -1411,11 +1411,11 @@
      Parameters
      - +
      [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
      [out]laplaciansThe Laplacians of the function specified by fe_function at the quadrature points of the current cell. The Laplacians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Laplacians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the input vector.
      [out]laplaciansThe Laplacians of the function specified by fe_function at the quadrature points of the current cell. The Laplacians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Laplacians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the input vector.
      -
      Postcondition
      laplacians[q] will contain the Laplacian of the field described by fe_function at the $q$th quadrature point.
      +
      Postcondition
      laplacians[q] will contain the Laplacian of the field described by fe_function at the $q$th quadrature point.
      For each component of the output vector, there holds laplacians[q]=trace(hessians[q]), where hessians would be the output of the get_function_hessians() function.
      Note
      The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
      @@ -1449,7 +1449,7 @@

      This function does the same as the other get_function_laplacians(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

      -
      Postcondition
      laplacians[q] is a vector of Laplacians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by laplacians[q] equals the number of components of the finite element, i.e. laplacians[q][c] returns the Laplacian of the $c$th vector component at the $q$th quadrature point.
      +
      Postcondition
      laplacians[q] is a vector of Laplacians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by laplacians[q] equals the number of components of the finite element, i.e. laplacians[q][c] returns the Laplacian of the $c$th vector component at the $q$th quadrature point.
      For each component of the output vector, there holds laplacians[q][c]=trace(hessians[q][c]), where hessians would be the output of the get_function_hessians() function.
      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -1596,11 +1596,11 @@
      Parameters
      - +
      [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
      [out]third_derivativesThe third derivatives of the function specified by fe_function at the quadrature points of the current cell. The third derivatives are computed in real space (as opposed to on the unit cell). The object is assumed to already have the correct size. The data type stored by this output vector must be what you get when you multiply the third derivatives of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
      [out]third_derivativesThe third derivatives of the function specified by fe_function at the quadrature points of the current cell. The third derivatives are computed in real space (as opposed to on the unit cell). The object is assumed to already have the correct size. The data type stored by this output vector must be what you get when you multiply the third derivatives of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
      -
      Postcondition
      third_derivatives[q] will contain the third derivatives of the field described by fe_function at the $q$th quadrature point. third_derivatives[q][i][j][k] represents the $(i,j,k)$th component of the 3rd order tensor of third derivatives at quadrature point $q$.
      +
      Postcondition
      third_derivatives[q] will contain the third derivatives of the field described by fe_function at the $q$th quadrature point. third_derivatives[q][i][j][k] represents the $(i,j,k)$th component of the 3rd order tensor of third derivatives at quadrature point $q$.
      Note
      The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_3rd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -1637,7 +1637,7 @@

      This function does the same as the other get_function_third_derivatives(), but applied to multi-component (vector- valued) elements. The meaning of the arguments is as explained there.

      -
      Postcondition
      third_derivatives[q] is a vector of third derivatives of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by third_derivatives[q] equals the number of components of the finite element, i.e. third_derivatives[q][c] returns the third derivative of the $c$th vector component at the $q$th quadrature point. Consequently, third_derivatives[q][c][i][j][k] is the $(i,j,k)$th component of the tensor of third derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
      +
      Postcondition
      third_derivatives[q] is a vector of third derivatives of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by third_derivatives[q] equals the number of components of the finite element, i.e. third_derivatives[q][c] returns the third derivative of the $c$th vector component at the $q$th quadrature point. Consequently, third_derivatives[q][c][i][j][k] is the $(i,j,k)$th component of the tensor of third derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_3rd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

      Definition at line 3268 of file fe_values.cc.

      @@ -1956,7 +1956,7 @@

      Mapped quadrature weight. If this object refers to a volume evaluation (i.e. the derived class is of type FEValues), then this is the Jacobi determinant times the weight of the q_pointth unit quadrature point.

      For surface evaluations (i.e. classes FEFaceValues or FESubfaceValues), it is the mapped surface element times the weight of the quadrature point.

      -

      You can think of the quantity returned by this function as the volume or surface element $dx, ds$ in the integral that we implement here by quadrature.

      +

      You can think of the quantity returned by this function as the volume or surface element $dx, ds$ in the integral that we implement here by quadrature.

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_JxW_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -2009,7 +2009,7 @@
    -

    Return the Jacobian of the transformation at the specified quadrature point, i.e. $J_{ij}=dx_i/d\hat x_j$

    +

    Return the Jacobian of the transformation at the specified quadrature point, i.e. $J_{ij}=dx_i/d\hat x_j$

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2063,7 +2063,7 @@
    -

    Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijk}=dJ_{jk}/d\hat x_i$.

    +

    Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijk}=dJ_{jk}/d\hat x_i$.

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_grads flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2117,7 +2117,7 @@
    -

    Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, pushed forward to the real cell coordinates, i.e. $G_{ijk}=dJ_{iJ}/d\hat x_K (J_{jJ})^{-1} (J_{kK})^{-1}$.

    +

    Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, pushed forward to the real cell coordinates, i.e. $G_{ijk}=dJ_{iJ}/d\hat x_K (J_{jJ})^{-1} (J_{kK})^{-1}$.

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_pushed_forward_grads flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    /usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FEInterfaceValues.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FEInterfaceValues.html 2024-01-30 03:04:41.968794069 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FEInterfaceValues.html 2024-01-30 03:04:41.968794069 +0000 @@ -160,11 +160,11 @@

    Detailed Description

    template<int dim>
    -class NonMatching::FEInterfaceValues< dim >

    This class is intended to facilitate assembling interface terms on faces in immersed (in the sense of cut) finite element methods. These types of terms occur mainly in cut discontinuous Galerkin methods. This class works analogously to NonMatching::FEValues. The domain is assumed to be described by a level set function, $\psi : \mathbb{R}^{dim} \to \mathbb{R}$, and this class assumes that we want to integrate over two different regions of each face, $F$:

    -\[
+class NonMatching::FEInterfaceValues< dim ></div><p>This class is intended to facilitate assembling interface terms on faces in immersed (in the sense of cut) finite element methods. These types of terms occur mainly in cut discontinuous Galerkin methods. This class works analogously to <a class=NonMatching::FEValues. The domain is assumed to be described by a level set function, $\psi : \mathbb{R}^{dim} \to \mathbb{R}$, and this class assumes that we want to integrate over two different regions of each face, $F$:

    +\[
 N = \{x \in F : \psi(x) < 0 \}, \\
 P = \{x \in F : \psi(x) > 0 \},
-\] +\]" src="form_2029.png"/>

    which we as before refer to as the "inside" and "outside" regions of the face.

    @@ -198,7 +198,7 @@
    }
    void reinit(const CellIteratorType &cell, const unsigned int face_no, const unsigned int sub_face_no, const CellNeighborIteratorType &cell_neighbor, const unsigned int face_no_neighbor, const unsigned int sub_face_no_neighbor)
    -

    To reduce the amount of work, the reinit() function of this class uses the MeshClassifier passed to the constructor to check how the incoming cell relates to the level set function. The immersed quadrature rules are only generated if the cell is intersected. If the cell is completely inside or outside, it returns a cached FEInterfaceValues object created with a quadrature over the reference cell: $[0, 1]^{dim-1}$.

    +

    To reduce the amount of work, the reinit() function of this class uses the MeshClassifier passed to the constructor to check how the incoming cell relates to the level set function. The immersed quadrature rules are only generated if the cell is intersected. If the cell is completely inside or outside, it returns a cached FEInterfaceValues object created with a quadrature over the reference cell: $[0, 1]^{dim-1}$.

    Definition at line 437 of file fe_values.h.

    Member Typedef Documentation

    @@ -352,7 +352,7 @@ - + @@ -454,7 +454,7 @@
    mapping_collectionCollection of Mappings to be used.
    fe_collectionCollection of FiniteElements to be used.
    q_collectionCollection of Quadrature rules over $[0, 1]^{dim-1}$ that should be used when a face is not intersected and we do not need to generate immersed quadrature rules.
    q_collectionCollection of Quadrature rules over $[0, 1]^{dim-1}$ that should be used when a face is not intersected and we do not need to generate immersed quadrature rules.
    q_collection_1dCollection of 1-dimensional quadrature rules used to generate the immersed quadrature rules. See the QuadratureGenerator class.
    mesh_classifierObject used to determine when the immersed quadrature rules need to be generated.
    region_update_flagsStruct storing UpdateFlags for the inside/outside region of the cell.
    -

    Return an FEInterfaceValues object reinitialized with a quadrature for the inside region of the cell: $\{x \in K : \psi(x) < 0 \}$.

    +

    Return an FEInterfaceValues object reinitialized with a quadrature for the inside region of the cell: $\{x \in K : \psi(x) < 0 \}$.

    Note
    If the quadrature rule over the region is empty, e.g. because the cell is completely located in the outside domain, the returned optional will not contain a value.

    Definition at line 461 of file fe_values.cc.

    @@ -477,7 +477,7 @@
    -

    Return an FEInterfaceValues object reinitialized with a quadrature for the outside region of the cell: $\{x \in K : \psi(x) > 0 \}$.

    +

    Return an FEInterfaceValues object reinitialized with a quadrature for the outside region of the cell: $\{x \in K : \psi(x) > 0 \}$.

    Note
    If the quadrature rule over the region is empty, e.g. because the cell is completely located in the inside domain, the returned optional will not contain a value.

    Definition at line 473 of file fe_values.cc.

    @@ -508,7 +508,7 @@
    -

    Do work common to the constructors. The incoming QCollection should be quadratures integrating over $[0, 1]^{dim-1}$. These will be used on the non-intersected cells.

    +

    Do work common to the constructors. The incoming QCollection should be quadratures integrating over $[0, 1]^{dim-1}$. These will be used on the non-intersected cells.

    Definition at line 332 of file fe_values.cc.

    @@ -762,7 +762,7 @@
    -

    For each element in the FECollection passed to the constructor, this object contains an FEInterfaceValues object created with a quadrature rule over the full reference cell: $[0, 1]^{dim-1}$ and UpdateFlags for the inside region. Thus, these optionals should always contain a value.

    +

    For each element in the FECollection passed to the constructor, this object contains an FEInterfaceValues object created with a quadrature rule over the full reference cell: $[0, 1]^{dim-1}$ and UpdateFlags for the inside region. Thus, these optionals should always contain a value.

    When LocationToLevelSet of the cell is INSIDE (and we do not need to generate an immersed quadrature), we return the FEInterfaceValues object in this container corresponding to the cell's active_fe_index.

    This container is a std::deque, which is compatible with the FEInterfaceValues class that does not have a copy-constructor.

    @@ -791,7 +791,7 @@
    -

    For each element in the FECollection passed to the constructor, this object contains an FEInterfaceValues object created with a quadrature rule over the full reference cell: $[0, 1]^{dim-1}$ and UpdateFlags for the outside region. Thus, these optionals should always contain a value.

    +

    For each element in the FECollection passed to the constructor, this object contains an FEInterfaceValues object created with a quadrature rule over the full reference cell: $[0, 1]^{dim-1}$ and UpdateFlags for the outside region. Thus, these optionals should always contain a value.

    When LocationToLevelSet of the cell is OUTSIDE (and we do not need to generate an immersed quadrature), we return the FEValues object in this container corresponding to the cell's active_fe_index.

    This container is a std::deque, which is compatible with the FEInterfaceValues class that does not have a copy-constructor.

    @@ -820,7 +820,7 @@
    -

    FEInterfaceValues object created with a quadrature rule integrating over the inside region, $\{x \in B : \psi(x) < 0 \}$, that was generated in the last call to reinit(..). If the cell in the last call was not intersected or if 0 quadrature points were generated, this optional will not contain a value.

    +

    FEInterfaceValues object created with a quadrature rule integrating over the inside region, $\{x \in B : \psi(x) < 0 \}$, that was generated in the last call to reinit(..). If the cell in the last call was not intersected or if 0 quadrature points were generated, this optional will not contain a value.

    Definition at line 668 of file fe_values.h.

    @@ -847,7 +847,7 @@
    -

    FEInterfaceValues object created with a quadrature rule integrating over the outside region, $\{x \in B : \psi(x) > 0 \}$, that was generated in the last call to reinit(..). If the cell in the last call was not intersected or if 0 quadrature points were generated, this optional will not contain a value.

    +

    FEInterfaceValues object created with a quadrature rule integrating over the outside region, $\{x \in B : \psi(x) > 0 \}$, that was generated in the last call to reinit(..). If the cell in the last call was not intersected or if 0 quadrature points were generated, this optional will not contain a value.

    Definition at line 677 of file fe_values.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FEValues.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FEValues.html 2024-01-30 03:04:41.996794302 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FEValues.html 2024-01-30 03:04:41.996794302 +0000 @@ -159,17 +159,17 @@

    Detailed Description

    template<int dim>
    -class NonMatching::FEValues< dim >

    This class is intended to facilitate assembling in immersed (in the sense of cut) finite element methods when the domain is described by a level set function, $\psi : \mathbb{R}^{dim} \to \mathbb{R}$. In this type of method, we typically need to integrate over 3 different regions of each cell, $K$:

    -\[
+class NonMatching::FEValues< dim ></div><p>This class is intended to facilitate assembling in immersed (in the sense of cut) finite element methods when the domain is described by a level set function, <picture><source srcset=$\psi : \mathbb{R}^{dim} \to \mathbb{R}$. In this type of method, we typically need to integrate over 3 different regions of each cell, $K$:

    +\[
 N = \{x \in K : \psi(x) < 0 \}, \\
 P = \{x \in K : \psi(x) > 0 \}, \\
 S = \{x \in K : \psi(x) = 0 \}.
-\] +\]" src="form_2026.png"/>

    Thus we need quadrature rules for these 3 regions:

    -

    As in the QuadratureGenerator class, we refer to $N$, $P$, and $S$ as the inside, outside, and surface regions. The constructor of this class takes a discrete level set function described by a DoFHandler and a Vector. When the reinit() function is called, the QuadratureGenerator will be called in the background to create these immersed quadrature rules. This class then creates FEValues objects for the inside/outside regions and an FEImmersedSurfaceValues object for the surface region. These objects can then be accessed through one of the functions: get_inside_fe_values(), get_outside_fe_values(), or get_surface_fe_values(). Since a cut between a cell and the domain can be arbitrarily small, the underlying algorithm may generate a quadrature rule with 0 points. This can, for example, happen if the relative size of the cut is similar to the floating-point accuracy. Since the FEValues-like objects are not allowed to contain 0 points, the object that get_inside/outside/surface_fe_values() returns is wrapped in a std_cxx17::optional. This requires us to check if the returned FEValues-like object contains a value before we use it:

    +

    As in the QuadratureGenerator class, we refer to $N$, $P$, and $S$ as the inside, outside, and surface regions. The constructor of this class takes a discrete level set function described by a DoFHandler and a Vector. When the reinit() function is called, the QuadratureGenerator will be called in the background to create these immersed quadrature rules. This class then creates FEValues objects for the inside/outside regions and an FEImmersedSurfaceValues object for the surface region. These objects can then be accessed through one of the functions: get_inside_fe_values(), get_outside_fe_values(), or get_surface_fe_values(). Since a cut between a cell and the domain can be arbitrarily small, the underlying algorithm may generate a quadrature rule with 0 points. This can, for example, happen if the relative size of the cut is similar to the floating-point accuracy. Since the FEValues-like objects are not allowed to contain 0 points, the object that get_inside/outside/surface_fe_values() returns is wrapped in a std_cxx17::optional. This requires us to check if the returned FEValues-like object contains a value before we use it:

    for (const auto &cell : dof_handler.active_cell_iterators())
    {
    @@ -190,7 +190,7 @@
    }
    std_cxx17::optional<::FEValues< dim > > fe_values_inside
    Definition fe_values.h:345
    -

    Of course, it is somewhat expensive to generate the immersed quadrature rules and create FEValues objects with the generated quadratures. To reduce the amount of work, the reinit() function of this class uses the MeshClassifier passed to the constructor to check how the incoming cell relates to the level set function. It only generates the immersed quadrature rules if the cell is intersected. If the cell is completely inside or outside, it returns a cached FEValues object created with a quadrature over the reference cell: $[0, 1]^{dim}$.

    +

    Of course, it is somewhat expensive to generate the immersed quadrature rules and create FEValues objects with the generated quadratures. To reduce the amount of work, the reinit() function of this class uses the MeshClassifier passed to the constructor to check how the incoming cell relates to the level set function. It only generates the immersed quadrature rules if the cell is intersected. If the cell is completely inside or outside, it returns a cached FEValues object created with a quadrature over the reference cell: $[0, 1]^{dim}$.

    Definition at line 144 of file fe_values.h.

    Member Typedef Documentation

    @@ -341,7 +341,7 @@ - + @@ -398,7 +398,7 @@
    mapping_collectionCollection of Mappings to be used.
    fe_collectionCollection of FiniteElements to be used.
    q_collectionCollection of Quadrature rules over $[0, 1]^{dim}$ that should be used when a cell is not intersected and we do not need to generate immersed quadrature rules.
    q_collectionCollection of Quadrature rules over $[0, 1]^{dim}$ that should be used when a cell is not intersected and we do not need to generate immersed quadrature rules.
    q_collection_1dCollection of 1-dimensional quadrature rules used to generate the immersed quadrature rules. See the QuadratureGenerator class.
    mesh_classifierObject used to determine when the immersed quadrature rules need to be generated.
    region_update_flagsStruct storing UpdateFlags for the inside/outside/surface region of the cell.
    -

    Return an FEValues object reinitialized with a quadrature for the inside region of the cell: $\{x \in K : \psi(x) < 0 \}$.

    +

    Return an FEValues object reinitialized with a quadrature for the inside region of the cell: $\{x \in K : \psi(x) < 0 \}$.

    Note
    If the quadrature rule over the region is empty, e.g. because the cell is completely located in the outside domain, the returned optional will not contain a value.

    Definition at line 241 of file fe_values.cc.

    @@ -421,7 +421,7 @@
    -

    Return an FEValues object reinitialized with a quadrature for the outside region of the cell: $\{x \in K : \psi(x) > 0 \}$.

    +

    Return an FEValues object reinitialized with a quadrature for the outside region of the cell: $\{x \in K : \psi(x) > 0 \}$.

    Note
    If the quadrature rule over the region is empty, e.g. because the cell is completely located in the inside domain, the returned optional will not contain a value.

    Definition at line 253 of file fe_values.cc.

    @@ -444,7 +444,7 @@
    -

    Return an FEValues object reinitialized with a quadrature for the surface region of the cell: $\{x \in K : \psi(x) = 0 \}$.

    +

    Return an FEValues object reinitialized with a quadrature for the surface region of the cell: $\{x \in K : \psi(x) = 0 \}$.

    Note
    If the quadrature rule over the region is empty, e.g. because the cell is not intersected, the returned optional will not contain a value.

    Definition at line 265 of file fe_values.cc.

    @@ -475,7 +475,7 @@
    -

    Do work common to the constructors. The incoming QCollection should be quadratures integrating over $[0, 1]^{dim}$. These will be used on the non-intersected cells.

    +

    Do work common to the constructors. The incoming QCollection should be quadratures integrating over $[0, 1]^{dim}$. These will be used on the non-intersected cells.

    Definition at line 105 of file fe_values.cc.

    @@ -692,7 +692,7 @@
    -

    For each element in the FECollection passed to the constructor, this object contains an FEValues object created with a quadrature rule over the full reference cell: $[0, 1]^{dim}$ and UpdateFlags for the inside region. Thus, these optionals should always contain a value.

    +

    For each element in the FECollection passed to the constructor, this object contains an FEValues object created with a quadrature rule over the full reference cell: $[0, 1]^{dim}$ and UpdateFlags for the inside region. Thus, these optionals should always contain a value.

    When LocationToLevelSet of the cell is INSIDE (and we do not need to generate an immersed quadrature), we return the FEValues object in this container corresponding to the cell's active_fe_index.

    This container is a std::deque, which is compatible with the FEValues class that does not have a copy-constructor.

    @@ -721,7 +721,7 @@
    -

    For each element in the FECollection passed to the constructor, this object contains an FEValues object created with a quadrature rule over the full reference cell: $[0, 1]^{dim}$ and UpdateFlags for the outside region. Thus, these optionals should always contain a value.

    +

    For each element in the FECollection passed to the constructor, this object contains an FEValues object created with a quadrature rule over the full reference cell: $[0, 1]^{dim}$ and UpdateFlags for the outside region. Thus, these optionals should always contain a value.

    When LocationToLevelSet of the cell is OUTSIDE (and we do not need to generate an immersed quadrature), we return the FEValues object in this container corresponding to the cell's active_fe_index.

    This container is a std::deque, which is compatible with the FEValues class that does not have a copy-constructor.

    @@ -750,7 +750,7 @@
    -

    FEValues object created with a quadrature rule integrating over the inside region, $\{x \in B : \psi(x) < 0 \}$, that was generated in the last call to reinit(..). If the cell in the last call was not intersected or if 0 quadrature points were generated, this optional will not contain a value.

    +

    FEValues object created with a quadrature rule integrating over the inside region, $\{x \in B : \psi(x) < 0 \}$, that was generated in the last call to reinit(..). If the cell in the last call was not intersected or if 0 quadrature points were generated, this optional will not contain a value.

    Definition at line 345 of file fe_values.h.

    @@ -777,7 +777,7 @@
    -

    FEValues object created with a quadrature rule integrating over the outside region, $\{x \in B : \psi(x) > 0 \}$, that was generated in the last call to reinit(..). If the cell in the last call was not intersected or if 0 quadrature points were generated, this optional will not contain a value.

    +

    FEValues object created with a quadrature rule integrating over the outside region, $\{x \in B : \psi(x) > 0 \}$, that was generated in the last call to reinit(..). If the cell in the last call was not intersected or if 0 quadrature points were generated, this optional will not contain a value.

    Definition at line 354 of file fe_values.h.

    @@ -804,7 +804,7 @@
    -

    FEImmersedSurfaceValues object created with a quadrature rule integrating over the surface region, $\{x \in B : \psi(x) = 0 \}$, that was generated in the last call to reinit(..). If the cell in the last call was not intersected or if 0 quadrature points were generated, this optional will not contain a value.

    +

    FEImmersedSurfaceValues object created with a quadrature rule integrating over the surface region, $\{x \in B : \psi(x) = 0 \}$, that was generated in the last call to reinit(..). If the cell in the last call was not intersected or if 0 quadrature points were generated, this optional will not contain a value.

    Definition at line 364 of file fe_values.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FaceQuadratureGenerator.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FaceQuadratureGenerator.html 2024-01-30 03:04:42.016794469 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FaceQuadratureGenerator.html 2024-01-30 03:04:42.020794502 +0000 @@ -133,16 +133,16 @@

    Detailed Description

    template<int dim>
    -class NonMatching::FaceQuadratureGenerator< dim >

    This class creates immersed quadrature rules over a face, $F$, of a BoundingBox, when the domain is described by a level set function, $\psi$.

    -

    In the same way as in the QuadratureGenerator class, this class generates quadrature rules to integrate over 3 different regions of the face, $F \subset \mathbb{R}^{dim}$:

    -\[
+class NonMatching::FaceQuadratureGenerator< dim ></div><p>This class creates immersed quadrature rules over a face, <picture><source srcset=$F$, of a BoundingBox, when the domain is described by a level set function, $\psi$.

    +

    In the same way as in the QuadratureGenerator class, this class generates quadrature rules to integrate over 3 different regions of the face, $F \subset \mathbb{R}^{dim}$:

    +\[
 N = \{x \in F : \psi(x) < 0 \}, \\
 P = \{x \in F : \psi(x) > 0 \}, \\
 S = \{x \in F : \psi(x) = 0 \},
-\] +\]" src="form_2069.png"/>

    -

    which are again referred to as the "inside", $N$, "outside", $P$, and "surface" region, $S$. These type of quadrature rules are in general needed in immersed discontinuous Galerkin methods.

    -

    Under the hood, this class uses the QuadratureGenerator class to build these rules. This is done by restricting the dim-dimensional level set function to the face, thus creating a (dim-1)-dimensional level set function, $\phi$. It then creates the (dim-1)-dimensional quadratures by calling QuadratureGenerator with $\phi$. This means that what holds for the QuadratureGenerator class in general also holds for this class. In particular, if the 1d-quadrature that is used as base contains $n$ points, the number of points will be proportional to $n^{dim-1}$ in the in the inside/outside quadratures and to $n^{dim-2}$ in the surface quadrature.

    +

    which are again referred to as the "inside", $N$, "outside", $P$, and "surface" region, $S$. These type of quadrature rules are in general needed in immersed discontinuous Galerkin methods.

    +

    Under the hood, this class uses the QuadratureGenerator class to build these rules. This is done by restricting the dim-dimensional level set function to the face, thus creating a (dim-1)-dimensional level set function, $\phi$. It then creates the (dim-1)-dimensional quadratures by calling QuadratureGenerator with $\phi$. This means that what holds for the QuadratureGenerator class in general also holds for this class. In particular, if the 1d-quadrature that is used as base contains $n$ points, the number of points will be proportional to $n^{dim-1}$ in the in the inside/outside quadratures and to $n^{dim-2}$ in the surface quadrature.

    Definition at line 292 of file quadrature_generator.h.

    Member Typedef Documentation

    @@ -239,7 +239,7 @@
    -

    Return the quadrature rule for the region $\{x \in F : \psi(x) < 0 \}$ created in the previous call to generate(). Here, $F$ is the face of the BoundingBox passed to generate().

    +

    Return the quadrature rule for the region $\{x \in F : \psi(x) < 0 \}$ created in the previous call to generate(). Here, $F$ is the face of the BoundingBox passed to generate().

    Definition at line 1774 of file quadrature_generator.cc.

    @@ -261,7 +261,7 @@
    -

    Return the quadrature rule for the region $\{x \in F : \psi(x) > 0 \}$ created in the previous call to generate(). Here, $F$ is the face of the BoundingBox passed to generate().

    +

    Return the quadrature rule for the region $\{x \in F : \psi(x) > 0 \}$ created in the previous call to generate(). Here, $F$ is the face of the BoundingBox passed to generate().

    Definition at line 1782 of file quadrature_generator.cc.

    @@ -283,8 +283,8 @@
    -

    Return the quadrature rule for the region $\{x \in F : \psi(x) = 0 \}$ created in the previous call to generate(). Here, $F$ is the face of the BoundingBox passed to generate().

    -
    Note
    The normal at the quadrature points will be parallel to $\nabla \psi$.
    +

    Return the quadrature rule for the region $\{x \in F : \psi(x) = 0 \}$ created in the previous call to generate(). Here, $F$ is the face of the BoundingBox passed to generate().

    +
    Note
    The normal at the quadrature points will be parallel to $\nabla \psi$.

    Definition at line 1791 of file quadrature_generator.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FaceQuadratureGenerator_3_011_01_4.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FaceQuadratureGenerator_3_011_01_4.html 2024-01-30 03:04:42.036794636 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FaceQuadratureGenerator_3_011_01_4.html 2024-01-30 03:04:42.036794636 +0000 @@ -220,7 +220,7 @@

    -

    Return the quadrature rule for the region $\{x \in F : \psi(x) < 0 \}$ created in the previous call to generate(). Here, $F$ is the face of the BoundingBox passed to generate().

    +

    Return the quadrature rule for the region $\{x \in F : \psi(x) < 0 \}$ created in the previous call to generate(). Here, $F$ is the face of the BoundingBox passed to generate().

    Definition at line 1853 of file quadrature_generator.cc.

    @@ -242,7 +242,7 @@

    -

    Return the quadrature rule for the region $\{x \in F : \psi(x) > 0 \}$ created in the previous call to generate(). Here, $F$ is the face of the BoundingBox passed to generate().

    +

    Return the quadrature rule for the region $\{x \in F : \psi(x) > 0 \}$ created in the previous call to generate(). Here, $F$ is the face of the BoundingBox passed to generate().

    Definition at line 1860 of file quadrature_generator.cc.

    @@ -262,7 +262,7 @@
    -

    Return the quadrature rule for the region $\{x \in F : \psi(x) = 0 \}$ where, $F$ is the face of the BoundingBox passed to generate().

    +

    Return the quadrature rule for the region $\{x \in F : \psi(x) = 0 \}$ where, $F$ is the face of the BoundingBox passed to generate().

    Note
    In 1d, this quadrature always contains 0 points.

    Definition at line 1868 of file quadrature_generator.cc.

    @@ -309,7 +309,7 @@
    -

    Quadrature for the region $\{x \in F : \psi(x) < 0 \}$. Created in the last call to generate().

    +

    Quadrature for the region $\{x \in F : \psi(x) < 0 \}$. Created in the last call to generate().

    Definition at line 455 of file quadrature_generator.h.

    @@ -334,7 +334,7 @@
    -

    Quadrature for the region $\{x \in F : \psi(x) > 0 \}$. Created in the last call to generate().

    +

    Quadrature for the region $\{x \in F : \psi(x) > 0 \}$. Created in the last call to generate().

    Definition at line 462 of file quadrature_generator.h.

    @@ -359,7 +359,7 @@
    -

    Quadrature for the region $\{x \in F : \psi(x) = 0 \}$. This quadrature always contains zero points in 1d.

    +

    Quadrature for the region $\{x \in F : \psi(x) = 0 \}$. This quadrature always contains zero points in 1d.

    Definition at line 469 of file quadrature_generator.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1ImmersedSurfaceQuadrature.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1ImmersedSurfaceQuadrature.html 2024-01-30 03:04:42.068794902 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1ImmersedSurfaceQuadrature.html 2024-01-30 03:04:42.068794902 +0000 @@ -207,41 +207,41 @@

    Detailed Description

    template<int dim, int spacedim = dim>
    -class NonMatching::ImmersedSurfaceQuadrature< dim, spacedim >

    This class defines a quadrature formula to integrate over the intersection between an oriented surface, $\hat{S}$, and a cell or face. The word "immersed" in the class name reflects that the surface may intersect the cell/face in an arbitrary way.

    -

    The spacedim template parameter of this class is the dimension that the (spacedim-1)-dimensional surface is embedded in: $\hat{S} \subset \mathbb{R}^{\text{spacedim}}$. The dim parameter describes the dimension of the "object" that the surface intersects. That is, dim = spacedim corresponds to the surface intersecting a cell and dim = spacedim - 1 corresponds to the surface intersecting a face. The quadrature formula is described by a set of quadrature points, $\hat{x}_q \in \mathbb{R}^{\text{dim}}$, weights, $w_q$, and normalized surface normals, $\hat{n}_q \in \mathbb{R}^{\text{spacedim}}$.

    -

    Consider first the case dim = spacedim. We typically want to compute integrals in real space. A surface, $S$, intersecting a cell, $K$, in real space can be mapped onto a surface, $\hat{S}$, intersecting the unit cell, $\hat{K}$. Thus an integral over $S\cap K$ in real space can be transformed to an integral over $\hat{S} \cap \hat{K}$ according to

    -\[
+class NonMatching::ImmersedSurfaceQuadrature< dim, spacedim ></div><p>This class defines a quadrature formula to integrate over the intersection between an oriented surface, <picture><source srcset=$\hat{S}$, and a cell or face. The word "immersed" in the class name reflects that the surface may intersect the cell/face in an arbitrary way.

    +

    The spacedim template parameter of this class is the dimension that the (spacedim-1)-dimensional surface is embedded in: $\hat{S} \subset \mathbb{R}^{\text{spacedim}}$. The dim parameter describes the dimension of the "object" that the surface intersects. That is, dim = spacedim corresponds to the surface intersecting a cell and dim = spacedim - 1 corresponds to the surface intersecting a face. The quadrature formula is described by a set of quadrature points, $\hat{x}_q \in \mathbb{R}^{\text{dim}}$, weights, $w_q$, and normalized surface normals, $\hat{n}_q \in \mathbb{R}^{\text{spacedim}}$.

    +

    Consider first the case dim = spacedim. We typically want to compute integrals in real space. A surface, $S$, intersecting a cell, $K$, in real space can be mapped onto a surface, $\hat{S}$, intersecting the unit cell, $\hat{K}$. Thus an integral over $S\cap K$ in real space can be transformed to an integral over $\hat{S} \cap \hat{K}$ according to

    +\[
 \int_{S\cap K} f dS =
 \int_{S\cap K} f |d\bar{S}| =
 \int_{\hat{S}\cap\hat{K}} f \circ F_{K} \det(J) |\left( J^{-1} \right
 )^T d\hat{S}|,
-\] +\]" src="form_2043.png"/>

    -

    where $F_K$ is the mapping from reference to real space and $J$ is its Jacobian matrix. This transformation is possible since the continuous surface elements are vectors: $d\bar{S}, d\hat{S} \in \mathbb{R}^{spacedim}$, which are parallel to the normals of $S$ and $\hat{S}$. That is, the normal is needed to do the transformation. Thus, in addition to storing points and weights, this quadrature stores also the normalized normal for each quadrature point. This can be viewed as storing a discrete surface element,

    -\[
+<p> where <picture><source srcset=$F_K$ is the mapping from reference to real space and $J$ is its Jacobian matrix. This transformation is possible since the continuous surface elements are vectors: $d\bar{S}, d\hat{S} \in \mathbb{R}^{spacedim}$, which are parallel to the normals of $S$ and $\hat{S}$. That is, the normal is needed to do the transformation. Thus, in addition to storing points and weights, this quadrature stores also the normalized normal for each quadrature point. This can be viewed as storing a discrete surface element,

    +\[
 \Delta \hat{S}_q \dealcoloneq w_q \hat{n}_q \approx d\hat{S}(\hat{x}_q),
-\] +\]" src="form_2046.png"/>

    for each quadrature point. The surface integral in real space would then be approximated as

    -\[
+<picture><source srcset=\[
 \int_{S\cap K} f dS \approx
 \sum_{q} f \left(F_{K}(\hat{x}_{q}) \right) \det(J_q)
 |\left( J_q^{-1} \right)^T \hat{n}_q| w_q.
-\] +\]" src="form_2047.png"/>

    -

    When dim = spacedim - 1, this class represents a (spacedim-2)-dimensional integral. That is, if spacedim = 3 we have a line integral immersed in a face. Let $\hat{r}(t)$, $t \in [0,T]$ be an arc-length parameterizations of $\hat{F}\cap \hat{S}$, i.e., the part of the surface that intersects the face in reference space. This means that $\bar{r}(t) = F_K(\hat{r}(t))$ is a parameterization of $S\cap F$. The transformation of the line integral now reads

    -\[
+<p>When dim = spacedim - 1, this class represents a (spacedim-2)-dimensional integral. That is, if spacedim = 3 we have a line integral immersed in a face. Let <picture><source srcset=$\hat{r}(t)$, $t \in [0,T]$ be an arc-length parameterizations of $\hat{F}\cap \hat{S}$, i.e., the part of the surface that intersects the face in reference space. This means that $\bar{r}(t) = F_K(\hat{r}(t))$ is a parameterization of $S\cap F$. The transformation of the line integral now reads

    +\[
 \int_{S\cap F} f dr
 = \int_{0}^T f(\bar{r}(t)) \left \|\frac{d\bar{r}}{dt} \right \| dt
 = \int_{0}^T f(F_K(\hat{r}(t))) \left \| J \frac{d\hat{r}}{dt} \right \| dt
 \approx \sum_{q} f \left(F_{K}(\hat{x}_{q}) \right) \|J(\hat{x}_q)
 \hat{t}_q \| w_q,
-\] +\]" src="form_2053.png"/>

    -

    where $\hat{t}_q = \frac{d\hat{r}}{dt}(x_q) $ is the tangent to the curve at $\hat{x}_q$. This tangent can also be computed as $t_q = \hat{n}_q \times \hat{n}_F / \| \hat{n}_q \times \hat{n}_F \|$ where $\hat{n}_F$ is the face normal. It would be possible to compute the tangent by only knowing the normal to the curve in the face plane (i.e. the dim-dimensional normal). However, when these quadratures are used, the weak form typically involves the so-called conormal, which can not be computed without knowing the surface normal in $\mathbb{R}^{\text{spacedim}}$. The conormal is the unit vector parallel to the projection of the face normal into the surface plane. This is the same as the normalized boundary form.

    +

    where $\hat{t}_q = \frac{d\hat{r}}{dt}(x_q) $ is the tangent to the curve at $\hat{x}_q$. This tangent can also be computed as $t_q = \hat{n}_q \times \hat{n}_F / \| \hat{n}_q \times \hat{n}_F \|$ where $\hat{n}_F$ is the face normal. It would be possible to compute the tangent by only knowing the normal to the curve in the face plane (i.e. the dim-dimensional normal). However, when these quadratures are used, the weak form typically involves the so-called conormal, which can not be computed without knowing the surface normal in $\mathbb{R}^{\text{spacedim}}$. The conormal is the unit vector parallel to the projection of the face normal into the surface plane. This is the same as the normalized boundary form.

    Definition at line 107 of file immersed_surface_quadrature.h.

    Member Typedef Documentation

    /usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1QuadratureGenerator.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1QuadratureGenerator.html 2024-01-30 03:04:42.092795102 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1QuadratureGenerator.html 2024-01-30 03:04:42.092795102 +0000 @@ -131,24 +131,24 @@

    Detailed Description

    template<int dim>
    -class NonMatching::QuadratureGenerator< dim >

    This class creates immersed quadrature rules over a BoundingBox, $B \subset \mathbb{R}^{dim}$, when the domain is described by a level set function, $\psi$.

    +class NonMatching::QuadratureGenerator< dim >

    This class creates immersed quadrature rules over a BoundingBox, $B \subset \mathbb{R}^{dim}$, when the domain is described by a level set function, $\psi$.

    This class creates quadrature rules for the intersections between the box and the three different regions defined by the level set function. That is, it creates quadrature rules to integrate over the following regions

    -\[
+<picture><source srcset=\[
 N = \{x \in B : \psi(x) < 0 \}, \\
 P = \{x \in B : \psi(x) > 0 \}, \\
 S = \{x \in B : \psi(x) = 0 \}.
-\] +\]" src="form_2063.png"/>

    -

    When working with level set functions, the most common is to describe a domain, $\Omega$, as

    -\[
+<p>When working with level set functions, the most common is to describe a domain, <picture><source srcset=$\Omega$, as

    +\[
 \Omega = \{ x \in \mathbb{R}^{dim} : \psi(x) < 0 \}.
-\] +\]" src="form_2064.png"/>

    -

    Given this, we shall use the name convention that $N$ is the "inside" region (i.e. inside $\Omega$), $P$ is the "outside" region and $S$ is the "surface" region. The "inside" and "outside" quadratures will also be referred to as the "bulk"-quadratures.

    -

    The underlying algorithm use a 1-dimensional quadrature rule as base for creating the immersed quadrature rules. Gauss-Legendre quadrature (QGauss) is recommended. The constructor takes an hp::QCollection<1>. One can select which 1d-quadrature in the collection should be used through the set_1d_quadrature() function. The number of quadrature points in the constructed quadratures will vary depending on the level set function. More quadrature points will be created if the intersection is "bad", for example, if the zero-contour has a high curvature compared to the size of the box. However, if the number of points in the 1d quadrature is $n$ the number of points will be proportional to $n^{dim}$ in the bulk quadratures and to $n^{dim-1}$ in the surface quadrature. For example, in the 2d-example in the above figure, there are 2 points in the 1d-quadrature. If the 1d-quadrature is a Gauss-Legendre quadrature and the grid has size $h$, the immersed quadratures typically give global errors proportional to $h^{2n}$, both for the bulk and surface integrals. If the 1d-quadrature has positive weights, the weights of the immersed quadratures will also be positive.

    +

    Given this, we shall use the name convention that $N$ is the "inside" region (i.e. inside $\Omega$), $P$ is the "outside" region and $S$ is the "surface" region. The "inside" and "outside" quadratures will also be referred to as the "bulk"-quadratures.

    +

    The underlying algorithm use a 1-dimensional quadrature rule as base for creating the immersed quadrature rules. Gauss-Legendre quadrature (QGauss) is recommended. The constructor takes an hp::QCollection<1>. One can select which 1d-quadrature in the collection should be used through the set_1d_quadrature() function. The number of quadrature points in the constructed quadratures will vary depending on the level set function. More quadrature points will be created if the intersection is "bad", for example, if the zero-contour has a high curvature compared to the size of the box. However, if the number of points in the 1d quadrature is $n$ the number of points will be proportional to $n^{dim}$ in the bulk quadratures and to $n^{dim-1}$ in the surface quadrature. For example, in the 2d-example in the above figure, there are 2 points in the 1d-quadrature. If the 1d-quadrature is a Gauss-Legendre quadrature and the grid has size $h$, the immersed quadratures typically give global errors proportional to $h^{2n}$, both for the bulk and surface integrals. If the 1d-quadrature has positive weights, the weights of the immersed quadratures will also be positive.

    A detailed description of the underlying algorithm can be found in "High-Order %Quadrature Methods for Implicitly Defined Surfaces and Volumes in Hyperrectangles, R. I. Saye, SIAM J. Sci. Comput., 37(2), <a href="http://www.dx.doi.org/10.1137/140966290"> @@ -244,7 +244,7 @@

    -

    Return the quadrature rule for the region $\{x \in B : \psi(x) < 0 \}$ created in the previous call to generate(). Here, $B$ is BoundingBox passed to generate().

    +

    Return the quadrature rule for the region $\{x \in B : \psi(x) < 0 \}$ created in the previous call to generate(). Here, $B$ is BoundingBox passed to generate().

    Definition at line 1668 of file quadrature_generator.cc.

    @@ -266,7 +266,7 @@
    -

    Return the quadrature rule for the region $\{x \in B : \psi(x) > 0 \}$ created in the previous call to generate(). Here, $B$ is BoundingBox passed to generate().

    +

    Return the quadrature rule for the region $\{x \in B : \psi(x) > 0 \}$ created in the previous call to generate(). Here, $B$ is BoundingBox passed to generate().

    Definition at line 1677 of file quadrature_generator.cc.

    @@ -288,8 +288,8 @@
    -

    Return the quadrature rule for the region $\{x \in B : \psi(x) = 0 \}$ created in the previous call to generate(). Here, $B$ is BoundingBox passed to generate().

    -
    Note
    The normal at the quadrature points will be parallel to $\nabla \psi$.
    +

    Return the quadrature rule for the region $\{x \in B : \psi(x) = 0 \}$ created in the previous call to generate(). Here, $B$ is BoundingBox passed to generate().

    +
    Note
    The normal at the quadrature points will be parallel to $\nabla \psi$.

    Definition at line 1686 of file quadrature_generator.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1DiscreteQuadratureGeneratorImplementation_1_1RefSpaceFEFieldFunction.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1DiscreteQuadratureGeneratorImplementation_1_1RefSpaceFEFieldFunction.html 2024-01-30 03:04:42.132795436 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1DiscreteQuadratureGeneratorImplementation_1_1RefSpaceFEFieldFunction.html 2024-01-30 03:04:42.132795436 +0000 @@ -230,10 +230,10 @@
    template<int dim, class VectorType = Vector<double>>
    class NonMatching::internal::DiscreteQuadratureGeneratorImplementation::RefSpaceFEFieldFunction< dim, VectorType >

    This class evaluates a function defined by a solution vector and a DoFHandler transformed to reference space. To be precise, if we let $\hat{x}$ be a point on the reference cell, this class implements the function

    $\hat{f}(\hat{x}) = \sum_{j=0}^{n-1} f_j \hat{\phi}_j(\hat{x})$,

    -

    where $f_j$ are the local solution values and $\hat{\phi}_j(\hat(x))$ are the local reference space shape functions. The gradient and Hessian of this function are thus derivatives with respect to the reference space coordinates, $\hat{x}_0, \hat{x}_1, \ldots$.

    +

    where $f_j$ are the local solution values and $\hat{\phi}_j(\hat(x))$ are the local reference space shape functions. The gradient and Hessian of this function are thus derivatives with respect to the reference space coordinates, $\hat{x}_0, \hat{x}_1, \ldots$.

    Note that this class is similar to FEFieldFunction, but that FEFieldFunction implements the following function on a given cell, $K$,

    $f(x) = \sum_{j=0}^{n-1} f_j \hat{\phi}_j(F_K^{-1}(x))$,

    -

    which has the same coefficients but uses real space basis functions. Here, $F_K$ is the mapping from the reference cell to the real cell.

    +

    which has the same coefficients but uses real space basis functions. Here, $F_K$ is the mapping from the reference cell to the real cell.

    Before calling the value/gradient/hessian function, the set_active_cell function must be called to specify which cell the function should be evaluated on.

    Definition at line 1312 of file quadrature_generator.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1QGenerator.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1QGenerator.html 2024-01-30 03:04:42.160795669 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1QGenerator.html 2024-01-30 03:04:42.160795669 +0000 @@ -150,20 +150,20 @@

    Detailed Description

    template<int dim, int spacedim>
    class NonMatching::internal::QuadratureGeneratorImplementation::QGenerator< dim, spacedim >

    This class implements the Saye-algorithm cited in the documentation of the QuadratureGenerator class.

    -

    The generate function takes a number of $dim$-dimensional level set functions, $\psi_i$, and a BoundingBox<dim>, and builds a partitioning of quadratures, as defined in documentation of the QPartitioning class. That is, this class builds an object of type QPartitioning<dim>.

    -

    If all $\psi_i$ passed to generate can be determined to be positive or negative definite, the QPartitioning will consist of a single quadrature forming a tensor product.

    -

    If this is not the case, the algorithm uses recursion over the spatial dimension. The spacedim template parameter denotes the dimension we started with and dim denotes on what level we are in the recursion. That is, we first construct a QPartitioning<dim - 1> and then build the higher dimensional quadratures from these. What we in the end actually want is a spacedim-dimensional partitioning of quadratures, for a single level set function, $\psi$.

    -

    The algorithm is based on the implicit function theorem. Starting with a single level set function, $\psi$, we try to find a direction $i$, such that

    -

    $|\frac{\partial \psi}{\partial x_i}| > 0$.

    +

    The generate function takes a number of $dim$-dimensional level set functions, $\psi_i$, and a BoundingBox<dim>, and builds a partitioning of quadratures, as defined in documentation of the QPartitioning class. That is, this class builds an object of type QPartitioning<dim>.

    +

    If all $\psi_i$ passed to generate can be determined to be positive or negative definite, the QPartitioning will consist of a single quadrature forming a tensor product.

    +

    If this is not the case, the algorithm uses recursion over the spatial dimension. The spacedim template parameter denotes the dimension we started with and dim denotes on what level we are in the recursion. That is, we first construct a QPartitioning<dim - 1> and then build the higher dimensional quadratures from these. What we in the end actually want is a spacedim-dimensional partitioning of quadratures, for a single level set function, $\psi$.

    +

    The algorithm is based on the implicit function theorem. Starting with a single level set function, $\psi$, we try to find a direction $i$, such that

    +

    $|\frac{\partial \psi}{\partial x_i}| > 0$.

    throughout the whole box. This means that the zero-contour of the level set function can be parameterized by an implicit function

    -

    $H = H(x_0, ..., x_{i-1}, x_{i+1}, ..., x_{dim-1})$,

    +

    $H = H(x_0, ..., x_{i-1}, x_{i+1}, ..., x_{dim-1})$,

    so that

    -

    $\psi(..., x_{i-1}, H(..., x_{i-1}, x_{i+1}, ...), x_{i+1}, ...) = 0$,

    -

    over a subset, $I \subset C \subset \mathbb{R}^{dim-1}$, of the cross section, $C$, of the box (see BoundingBox::cross_section). Here, $I$ is the "indefinite"-region defined in the QPartitioning class. To follow convention in the original paper, we will -refer to $H$ as the "height-function" and to $i$ as the "height-function direction".

    -

    If a height function direction can be found, we go down in dimension by creating two new level set functions, $\{\psi_0, \psi_1\}$, which are the restriction of $\psi$ to the top and bottom faces of the box (in the height function direction). We then delegate to QGenerator<dim-1, spacedim> to create a QPartitioning<dim-1> over the cross section.

    +

    $\psi(..., x_{i-1}, H(..., x_{i-1}, x_{i+1}, ...), x_{i+1}, ...) = 0$,

    +

    over a subset, $I \subset C \subset \mathbb{R}^{dim-1}$, of the cross section, $C$, of the box (see BoundingBox::cross_section). Here, $I$ is the "indefinite"-region defined in the QPartitioning class. To follow convention in the original paper, we will -refer to $H$ as the "height-function" and to $i$ as the "height-function direction".

    +

    If a height function direction can be found, we go down in dimension by creating two new level set functions, $\{\psi_0, \psi_1\}$, which are the restriction of $\psi$ to the top and bottom faces of the box (in the height function direction). We then delegate to QGenerator<dim-1, spacedim> to create a QPartitioning<dim-1> over the cross section.

    When we reach the base case, $dim = 1$, the creation of QPartitioning<1> is simple. See the documentation in specialized class: QGenerator<1, spacedim>.

    As we go up through the dimensions and create the higher dimensional quadratures, we need to know the function value of the height functions at the lower dimensional quadrature points. Since the functions are implicit, we need to do root-finding on the level set functions to find the function values. For this we use the class UpThroughDimensionCreator, see documentation there.

    -

    When we have $n$ level set functions (i.e. after having gone down in dimension), we try to find a height function direction, which works for all those $\psi_i$ which are intersected by the zero contour (i.e. those not positive or negative definite). If such a direction exist, we will have a maximum of $n$ associated implicit height functions, $H_j$. Each $H_j$ parametrize the $x_i$-coordinate of the zero-contour over a region, $I_j$. The indefinite region in the lower dimensional partitioning is the union of these $I = \cup_j I_j$.

    +

    When we have $n$ level set functions (i.e. after having gone down in dimension), we try to find a height function direction, which works for all those $\psi_i$ which are intersected by the zero contour (i.e. those not positive or negative definite). If such a direction exist, we will have a maximum of $n$ associated implicit height functions, $H_j$. Each $H_j$ parametrize the $x_i$-coordinate of the zero-contour over a region, $I_j$. The indefinite region in the lower dimensional partitioning is the union of these $I = \cup_j I_j$.

    As we try to find a height function direction, we estimate bounds on the gradient components by approximating each component as a 1st-order Taylor-polynomial. If a direction can not be found, the box is split and we recurse on each smaller box. This makes an implicit function more likely to exist since we seek it over a smaller portion of the zero contour. It also makes the estimated bounds tighter since we extrapolate the Taylor-polynomial a shorter distance.

    Since we can not split a box forever, there is an maximum number of allowed splits on the additional data struct passed to the constructor. If this is reached, the algorithm uses the midpoint method as a last resort.

    @@ -313,7 +313,7 @@
    -

    Gets the $(dim - 1)$-dimensional quadratures from the lower dimensional algorithm and creates the $dim$-dimensional quadrature rules over the box from the lower dimensional ones.

    +

    Gets the $(dim - 1)$-dimensional quadratures from the lower dimensional algorithm and creates the $dim$-dimensional quadrature rules over the box from the lower dimensional ones.

    Definition at line 1118 of file quadrature_generator.cc.

    @@ -499,7 +499,7 @@
    -

    Object responsible for creating the $dim$-dimensional quadratures from

    +

    Object responsible for creating the $dim$-dimensional quadratures from

    Definition at line 1182 of file quadrature_generator.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1QGenerator_3_011_00_01spacedim_01_4.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1QGenerator_3_011_00_01spacedim_01_4.html 2024-01-30 03:04:42.184795869 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1QGenerator_3_011_00_01spacedim_01_4.html 2024-01-30 03:04:42.184795869 +0000 @@ -151,8 +151,8 @@

    Detailed Description

    template<int spacedim>
    class NonMatching::internal::QuadratureGeneratorImplementation::QGenerator< 1, spacedim >

    The 1d-base case of the recursive algorithm QGenerator<dim, spacedim>.

    -

    Let $L$ and $R$ be the left and right bounds of the one-dimensional BoundingBox. This interval is partitioned into $[x_0, x_1, ..., x_n]$ where $x_0 = L$, $x_n = R$, and the remaining $x_i$ are the roots of the level set functions in the interval $[L, R]$. In each interval, $[x_i, x_{i+1}]$, quadrature points are distributed according to a 1d-quadrature rule. These points are added to one of the regions of QPartitioning determined from the signs of the level set functions on the interval (see documentation of QPartitioning).

    -

    If spacedim = 1 the points $[x_1, x_{n-1}]$ are also added as surface quadrature points to QPartitioning::surface.

    +

    Let $L$ and $R$ be the left and right bounds of the one-dimensional BoundingBox. This interval is partitioned into $[x_0, x_1, ..., x_n]$ where $x_0 = L$, $x_n = R$, and the remaining $x_i$ are the roots of the level set functions in the interval $[L, R]$. In each interval, $[x_i, x_{i+1}]$, quadrature points are distributed according to a 1d-quadrature rule. These points are added to one of the regions of QPartitioning determined from the signs of the level set functions on the interval (see documentation of QPartitioning).

    +

    If spacedim = 1 the points $[x_1, x_{n-1}]$ are also added as surface quadrature points to QPartitioning::surface.

    Definition at line 1208 of file quadrature_generator.h.

    Constructor & Destructor Documentation

    /usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1QPartitioning.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1QPartitioning.html 2024-01-30 03:04:42.204796035 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1QPartitioning.html 2024-01-30 03:04:42.204796035 +0000 @@ -120,18 +120,18 @@

    Detailed Description

    template<int dim>
    class NonMatching::internal::QuadratureGeneratorImplementation::QPartitioning< dim >

    Class that stores quadrature rules to integrate over 4 different regions of a single BoundingBox, $B$. Given multiple level set functions,

    -

    $\psi_i : \mathbb{R}^{dim} \rightarrow \mathbb{R}$, $i = 0, 1, ...$,

    -

    the box, $B \subset \mathbb{R}^{dim}$, is partitioned into a "negative", "positive", and "indefinite" region, $B = N \cup P \cup I$, according to the signs of $\psi_i$ over each region:

    +

    $\psi_i : \mathbb{R}^{dim} \rightarrow \mathbb{R}$, $i = 0, 1, ...$,

    +

    the box, $B \subset \mathbb{R}^{dim}$, is partitioned into a "negative", "positive", and "indefinite" region, $B = N \cup P \cup I$, according to the signs of $\psi_i$ over each region:

    -\[
+<picture><source srcset=\[
 N = \{x \in B : \psi_i(x) < 0, \forall i \}, \\
 P = \{x \in B : \psi_i(x) > 0, \forall i \}, \\
 I = B \setminus (\overline{N} \cup \overline{P}).
-\] +\]" src="form_2088.png"/>

    -

    Thus, all $\psi_i$ are positive over $P$ and negative over $N$. Over $I$ the level set functions differ in sign. This class holds quadrature rules for each of these regions. In addition, when there is a single level set function, $\psi$, it holds a surface quadrature for the zero contour of $\psi$:

    -

    $S = \{x \in B : \psi(x) = 0 \}$.

    -

    Note that when there is a single level set function, $I$ is empty and $N$ and $P$ are the regions that one typically integrates over in an immersed finite element method.

    +

    Thus, all $\psi_i$ are positive over $P$ and negative over $N$. Over $I$ the level set functions differ in sign. This class holds quadrature rules for each of these regions. In addition, when there is a single level set function, $\psi$, it holds a surface quadrature for the zero contour of $\psi$:

    +

    $S = \{x \in B : \psi(x) = 0 \}$.

    +

    Note that when there is a single level set function, $I$ is empty and $N$ and $P$ are the regions that one typically integrates over in an immersed finite element method.

    Definition at line 753 of file quadrature_generator.h.

    Member Function Documentation

    @@ -171,7 +171,7 @@
    -

    Quadrature for the region $\{x \in B : \psi_i(x) < 0 \forall i \}$ of the box, $B$.

    +

    Quadrature for the region $\{x \in B : \psi_i(x) < 0 \forall i \}$ of the box, $B$.

    Definition at line 767 of file quadrature_generator.h.

    @@ -190,7 +190,7 @@
    -

    Quadrature for the region $\{x \in B : \psi_i(x) > 0 \forall i \}$ of the box, $B$.

    +

    Quadrature for the region $\{x \in B : \psi_i(x) > 0 \forall i \}$ of the box, $B$.

    Definition at line 773 of file quadrature_generator.h.

    @@ -228,7 +228,7 @@
    -

    Quadrature for the region $\{x \in B : \psi(x) = 0 \}$ of the box, $B$.

    +

    Quadrature for the region $\{x \in B : \psi(x) = 0 \}$ of the box, $B$.

    Definition at line 785 of file quadrature_generator.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1RootFinder.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1RootFinder.html 2024-01-30 03:04:42.220796169 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1RootFinder.html 2024-01-30 03:04:42.220796169 +0000 @@ -122,7 +122,7 @@  

    Detailed Description

    -

    A class that attempts to find multiple distinct roots of a function, $f(x)$, over an interval, $[l, r]$. This is done as follows. If there is a sign change in function value between the interval end points, we solve for the root. If there is no sign change, we attempt to bound the function value away from zero on $[a, b]$, to conclude that no roots exist. If we can't exclude that there are roots, we split the interval in two: $[l, (r + l) / 2]$, $[(r + l) / 2, r]$, and use the same algorithm recursively on each interval. This means that we can typically find 2 distinct roots, but not 3.

    +

    A class that attempts to find multiple distinct roots of a function, $f(x)$, over an interval, $[l, r]$. This is done as follows. If there is a sign change in function value between the interval end points, we solve for the root. If there is no sign change, we attempt to bound the function value away from zero on $[a, b]$, to conclude that no roots exist. If we can't exclude that there are roots, we split the interval in two: $[l, (r + l) / 2]$, $[(r + l) / 2, r]$, and use the same algorithm recursively on each interval. This means that we can typically find 2 distinct roots, but not 3.

    The bounds on the functions values are estimated using the function taylor_estimate_function_bounds, which approximates the function as a second order Taylor-polynomial around the interval midpoint. When we have a sign change on an interval, this class uses boost::math::tools::toms748_solve for finding roots .

    Definition at line 608 of file quadrature_generator.h.

    @@ -168,7 +168,7 @@ std::vector< double > & roots&#href_anchor"memdoc"> -

    For each of the incoming functions, attempt to find the roots over the interval defined by interval and add these to roots. The returned roots will be sorted in ascending order: $x_0 < x_1 <...$ and duplicate roots (with respect to the tolerance in AdditionalData) will be removed.

    +

    For each of the incoming functions, attempt to find the roots over the interval defined by interval and add these to roots. The returned roots will be sorted in ascending order: $x_0 < x_1 <...$ and duplicate roots (with respect to the tolerance in AdditionalData) will be removed.

    Definition at line 532 of file quadrature_generator.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1UpThroughDimensionCreator.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1UpThroughDimensionCreator.html 2024-01-30 03:04:42.240796335 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1UpThroughDimensionCreator.html 2024-01-30 03:04:42.244796368 +0000 @@ -131,13 +131,13 @@

    Detailed Description

    template<int dim, int spacedim>
    -class NonMatching::internal::QuadratureGeneratorImplementation::UpThroughDimensionCreator< dim, spacedim >

    This class is responsible for creating quadrature points for the $dim$-dimensional quadrature partitioning from an $(dim - 1)$-dimensional "indefinite" quadrature (see QPartitioning documentation).

    -

    To be precise, let $[L, R]$ be the extents of the box in the height function direction and let $I \subset \mathbb{R}^{dim-1}$ be the lower dimensional indefinite region. This class will create quadrature points over $I \times [L, R] \subset \mathbb{R}^{dim}$ and in the case $dim=spacedim$, points for the surface quadrature.

    -

    For each lower dimensional quadrature point, $(x_I, w_I)$ in the indefinite quadrature, we create several 1d-level set functions by restricting $\psi_j$ to $x_I$. We then partition the interval $[L, R]$ into $[y_0, y_1, ..., y_n]$, where $y_0 = L$, $y_n = R$, and the remaining $y_i$ are the roots of the 1d-level set functions in $[L, R]$. Since the level set functions change sign between the roots, each interval belong to different regions in the quadrature partitioning.

    -

    In each interval, $[y_i, y_{i+1}]$, we distribute points according to the 1d-base quadrature, $(x_q, w_q)$ and take the cartesian product with $(x_I, w_I)$ to create the $dim$-dimensional quadrature points, $(X_q, W_q)$: $X_q = x_I \times (y_i + (y_{i+1} - y_i) x_q)$, $W_q = w_I (y_{i+1} - y_i) w_q$.

    -

    When $dim=spacedim$, we have a single level set function, $\psi$. Since we have fulfilled the implicit function theorem, there is a single root $y_1 \in [L, R]$. The point, $x_s = x_I \times y_1$, will be added as a point in the surface quadrature. One can show that the correct weight of this point is

    -

    $w_s = \frac{\|\nabla \psi(x_s)\|}{|\partial_i \psi(x_s)|} w_I$,

    -

    where $i$ is the height function direction.

    +class NonMatching::internal::QuadratureGeneratorImplementation::UpThroughDimensionCreator< dim, spacedim >

    This class is responsible for creating quadrature points for the $dim$-dimensional quadrature partitioning from an $(dim - 1)$-dimensional "indefinite" quadrature (see QPartitioning documentation).

    +

    To be precise, let $[L, R]$ be the extents of the box in the height function direction and let $I \subset \mathbb{R}^{dim-1}$ be the lower dimensional indefinite region. This class will create quadrature points over $I \times [L, R] \subset \mathbb{R}^{dim}$ and in the case $dim=spacedim$, points for the surface quadrature.

    +

    For each lower dimensional quadrature point, $(x_I, w_I)$ in the indefinite quadrature, we create several 1d-level set functions by restricting $\psi_j$ to $x_I$. We then partition the interval $[L, R]$ into $[y_0, y_1, ..., y_n]$, where $y_0 = L$, $y_n = R$, and the remaining $y_i$ are the roots of the 1d-level set functions in $[L, R]$. Since the level set functions change sign between the roots, each interval belong to different regions in the quadrature partitioning.

    +

    In each interval, $[y_i, y_{i+1}]$, we distribute points according to the 1d-base quadrature, $(x_q, w_q)$ and take the cartesian product with $(x_I, w_I)$ to create the $dim$-dimensional quadrature points, $(X_q, W_q)$: $X_q = x_I \times (y_i + (y_{i+1} - y_i) x_q)$, $W_q = w_I (y_{i+1} - y_i) w_q$.

    +

    When $dim=spacedim$, we have a single level set function, $\psi$. Since we have fulfilled the implicit function theorem, there is a single root $y_1 \in [L, R]$. The point, $x_s = x_I \times y_1$, will be added as a point in the surface quadrature. One can show that the correct weight of this point is

    +

    $w_s = \frac{\|\nabla \psi(x_s)\|}{|\partial_i \psi(x_s)|} w_I$,

    +

    where $i$ is the height function direction.

    Definition at line 828 of file quadrature_generator.h.

    Constructor & Destructor Documentation

    @@ -197,7 +197,7 @@ QPartitioning< dim > & q_partitioning&#href_anchor"memdoc"> -

    Create $dim$-dimensional immersed quadratures from the incoming $(dim-1)$-dimensional quadratures and add these to q_partitioning.

    +

    Create $dim$-dimensional immersed quadratures from the incoming $(dim-1)$-dimensional quadratures and add these to q_partitioning.

    Definition at line 725 of file quadrature_generator.cc.

    @@ -270,7 +270,7 @@

    Create a surface quadrature point from the lower-dimensional point and add it to surface_quadrature.

    -

    This function is only called when $dim=spacedim$ and there is a single level set function. At this point there should only be a single root in the interval $[L, R]$

    +

    This function is only called when $dim=spacedim$ and there is a single level set function. At this point there should only be a single root in the interval $[L, R]$

    Definition at line 781 of file quadrature_generator.cc.

    @@ -379,7 +379,7 @@
    -

    1d-functions, that are restrictions of each dim-dimensional level set function passed to generate() to some $(dim-1)$-dimensional point.

    +

    1d-functions, that are restrictions of each dim-dimensional level set function passed to generate() to some $(dim-1)$-dimensional point.

    Definition at line 898 of file quadrature_generator.h.

    @@ -433,7 +433,7 @@
    -

    The roots of the functions in point_restrictions. This will be the values of the height functions, $\{H_i(x_I)\}$ at some lower dimensional quadrature point, $x_I \in \mathbb{R}^{dim-1}$.

    +

    The roots of the functions in point_restrictions. This will be the values of the height functions, $\{H_i(x_I)\}$ at some lower dimensional quadrature point, $x_I \in \mathbb{R}^{dim-1}$.

    Definition at line 911 of file quadrature_generator.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classNonlinearSolverSelector.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classNonlinearSolverSelector.html 2024-01-30 03:04:42.268796569 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classNonlinearSolverSelector.html 2024-01-30 03:04:42.268796569 +0000 @@ -523,10 +523,10 @@

    A function object that users may supply and that is intended to prepare the linear solver for subsequent calls to solve_jacobian_system().

    The job of setup_jacobian() is to prepare the linear solver for subsequent calls to solve_with_jacobian(), in the solution of linear systems $Ax = b$. The exact nature of this system depends on the SolutionStrategy that has been selected.

    In the cases strategy = SolutionStrategy::newton or SolutionStrategy::linesearch, $A$ is the Jacobian $J = \partial
-F/\partial u$. If strategy = SolutionStrategy::picard, $A$ is the approximate Jacobian matrix $L$.

    +F/\partial u$" src="form_2213.png"/>. If strategy = SolutionStrategy::picard, $A$ is the approximate Jacobian matrix $L$.

    Parameters
    - +
    current_uCurrent value of $u$
    current_uCurrent value of $u$
    /usr/share/doc/packages/dealii/doxygen/deal.II/classNonlinearSolverSelector_1_1AdditionalData.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classNonlinearSolverSelector_1_1AdditionalData.html 2024-01-30 03:04:42.288796735 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classNonlinearSolverSelector_1_1AdditionalData.html 2024-01-30 03:04:42.288796735 +0000 @@ -350,7 +350,7 @@
    -

    Relative $l_2$ tolerance of the residual to be reached.

    +

    Relative $l_2$ tolerance of the residual to be reached.

    Note
    Solver terminates successfully if either the function tolerance or the relative tolerance has been reached.

    Definition at line 186 of file nonlinear.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1ArclengthProjectionLineManifold.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1ArclengthProjectionLineManifold.html 2024-01-30 03:04:42.328797069 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1ArclengthProjectionLineManifold.html 2024-01-30 03:04:42.328797069 +0000 @@ -573,7 +573,7 @@
    -

    Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the chartdim-dimensional to the spacedim-dimensional space. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

    +

    Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the chartdim-dimensional to the spacedim-dimensional space. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

    This function is used in the computations required by the get_tangent_vector() function. Since not all users of the Manifold class interface will require calling that function, the current function is implemented but will trigger an exception whenever called. This allows derived classes to avoid implementing the push_forward_gradient function if this functionality is not needed in the user program.

    Refer to the general documentation of this class for more information.

    @@ -603,7 +603,7 @@
    -

    Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

    +

    Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

    For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

    \begin{align*}
   \zeta(t) &= \xi_1 +  t (\xi_2-\xi_1)
@@ -619,7 +619,7 @@
                                     -F^{-1}(\mathbf x_1)\right]\right).
 \end{align*}

    -

    What the current function is supposed to return is $\mathbf s'(0)$. By the chain rule, this is equal to

    +

    What the current function is supposed to return is $\mathbf s'(0)$. By the chain rule, this is equal to

    \begin{align*}
   \mathbf s'(0) &=
     \frac{d}{dt}\left. F\left(F^{-1}(\mathbf x_1)
/usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1DirectionalProjectionManifold.html differs (HTML document, ASCII text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1DirectionalProjectionManifold.html	2024-01-30 03:04:42.372797435 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1DirectionalProjectionManifold.html	2024-01-30 03:04:42.372797435 +0000
@@ -500,7 +500,7 @@
   </tr>
 </table>
 </div><div class= -

    Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. For the current class, we assume that the manifold is flat, so the geodesic is the straight line between the two points, and we return $\mathbf x_2-\mathbf x_1$. The normalization of the vector is chosen so that it fits the convention described in Manifold::get_tangent_vector().

    +

    Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. For the current class, we assume that the manifold is flat, so the geodesic is the straight line between the two points, and we return $\mathbf x_2-\mathbf x_1$. The normalization of the vector is chosen so that it fits the convention described in Manifold::get_tangent_vector().

    Note
    If you use this class as a stepping stone to build a manifold that only "slightly" deviates from a flat manifold, by overloading the project_to_manifold() function.
    Parameters
    @@ -509,7 +509,7 @@
    -
    Returns
    A "direction" vector tangential to the geodesic. Here, this is $\mathbf x_2-\mathbf x_1$, possibly modified by the periodicity of the domain as set in the constructor, to use the "shortest" connection between the points through the periodic boundary as necessary.
    +
    Returns
    A "direction" vector tangential to the geodesic. Here, this is $\mathbf x_2-\mathbf x_1$, possibly modified by the periodicity of the domain as set in the constructor, to use the "shortest" connection between the points through the periodic boundary as necessary.

    Reimplemented from Manifold< dim, spacedim >.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1NURBSPatchManifold.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1NURBSPatchManifold.html 2024-01-30 03:04:42.416797802 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1NURBSPatchManifold.html 2024-01-30 03:04:42.416797802 +0000 @@ -448,7 +448,7 @@
    -

    Given a point in the spacedim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the uv coordinate system to the Euclidean coordinate system. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

    +

    Given a point in the spacedim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the uv coordinate system to the Euclidean coordinate system. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

    This function is used in the computations required by the get_tangent_vector() function.

    Refer to the general documentation of this class for more information.

    @@ -637,7 +637,7 @@
    -

    Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the chartdim-dimensional to the spacedim-dimensional space. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

    +

    Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the chartdim-dimensional to the spacedim-dimensional space. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

    This function is used in the computations required by the get_tangent_vector() function. Since not all users of the Manifold class interface will require calling that function, the current function is implemented but will trigger an exception whenever called. This allows derived classes to avoid implementing the push_forward_gradient function if this functionality is not needed in the user program.

    Refer to the general documentation of this class for more information.

    @@ -667,7 +667,7 @@
    -

    Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

    +

    Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

    For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

    \begin{align*}
   \zeta(t) &= \xi_1 +  t (\xi_2-\xi_1)
@@ -683,7 +683,7 @@
                                     -F^{-1}(\mathbf x_1)\right]\right).
 \end{align*}

    -

    What the current function is supposed to return is $\mathbf s'(0)$. By the chain rule, this is equal to

    +

    What the current function is supposed to return is $\mathbf s'(0)$. By the chain rule, this is equal to

    \begin{align*}
   \mathbf s'(0) &=
     \frac{d}{dt}\left. F\left(F^{-1}(\mathbf x_1)
/usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1NormalProjectionManifold.html differs (HTML document, ASCII text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1NormalProjectionManifold.html	2024-01-30 03:04:42.460798168 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1NormalProjectionManifold.html	2024-01-30 03:04:42.460798168 +0000
@@ -494,7 +494,7 @@
   </tr>
 </table>
 </div><div class= -

    Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. For the current class, we assume that the manifold is flat, so the geodesic is the straight line between the two points, and we return $\mathbf x_2-\mathbf x_1$. The normalization of the vector is chosen so that it fits the convention described in Manifold::get_tangent_vector().

    +

    Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. For the current class, we assume that the manifold is flat, so the geodesic is the straight line between the two points, and we return $\mathbf x_2-\mathbf x_1$. The normalization of the vector is chosen so that it fits the convention described in Manifold::get_tangent_vector().

    Note
    If you use this class as a stepping stone to build a manifold that only "slightly" deviates from a flat manifold, by overloading the project_to_manifold() function.
    Parameters
    @@ -503,7 +503,7 @@
    -
    Returns
    A "direction" vector tangential to the geodesic. Here, this is $\mathbf x_2-\mathbf x_1$, possibly modified by the periodicity of the domain as set in the constructor, to use the "shortest" connection between the points through the periodic boundary as necessary.
    +
    Returns
    A "direction" vector tangential to the geodesic. Here, this is $\mathbf x_2-\mathbf x_1$, possibly modified by the periodicity of the domain as set in the constructor, to use the "shortest" connection between the points through the periodic boundary as necessary.

    Reimplemented from Manifold< dim, spacedim >.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1NormalToMeshProjectionManifold.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1NormalToMeshProjectionManifold.html 2024-01-30 03:04:42.504798535 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1NormalToMeshProjectionManifold.html 2024-01-30 03:04:42.504798535 +0000 @@ -494,7 +494,7 @@
    -

    Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. For the current class, we assume that the manifold is flat, so the geodesic is the straight line between the two points, and we return $\mathbf x_2-\mathbf x_1$. The normalization of the vector is chosen so that it fits the convention described in Manifold::get_tangent_vector().

    +

    Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. For the current class, we assume that the manifold is flat, so the geodesic is the straight line between the two points, and we return $\mathbf x_2-\mathbf x_1$. The normalization of the vector is chosen so that it fits the convention described in Manifold::get_tangent_vector().

    Note
    If you use this class as a stepping stone to build a manifold that only "slightly" deviates from a flat manifold, by overloading the project_to_manifold() function.
    Parameters
    @@ -503,7 +503,7 @@
    -
    Returns
    A "direction" vector tangential to the geodesic. Here, this is $\mathbf x_2-\mathbf x_1$, possibly modified by the periodicity of the domain as set in the constructor, to use the "shortest" connection between the points through the periodic boundary as necessary.
    +
    Returns
    A "direction" vector tangential to the geodesic. Here, this is $\mathbf x_2-\mathbf x_1$, possibly modified by the periodicity of the domain as set in the constructor, to use the "shortest" connection between the points through the periodic boundary as necessary.

    Reimplemented from Manifold< dim, spacedim >.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classPArpackSolver.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPArpackSolver.html 2024-01-30 03:04:42.544798868 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPArpackSolver.html 2024-01-30 03:04:42.544798868 +0000 @@ -273,7 +273,7 @@

    Detailed Description

    template<typename VectorType>
    class PArpackSolver< VectorType >

    Interface for using PARPACK. PARPACK is a collection of Fortran77 subroutines designed to solve large scale eigenvalue problems. Here we interface to the routines pdneupd, pdseupd, pdnaupd, pdsaupd of PARPACK. The package is designed to compute a few eigenvalues and corresponding eigenvectors of a general n by n matrix A. It is most appropriate for large sparse matrices A.

    -

    In this class we make use of the method applied to the generalized eigenspectrum problem $(A-\lambda B)x=0$, for $x\neq0$; where $A$ is a system matrix, $B$ is a mass matrix, and $\lambda, x$ are a set of eigenvalues and eigenvectors respectively.

    +

    In this class we make use of the method applied to the generalized eigenspectrum problem $(A-\lambda B)x=0$, for $x\neq0$; where $A$ is a system matrix, $B$ is a mass matrix, and $\lambda, x$ are a set of eigenvalues and eigenvectors respectively.

    The ArpackSolver can be used in application codes in the following way:

    const unsigned int num_arnoldi_vectors = 2*size_of_spectrum + 2;
    @@ -298,8 +298,8 @@
    const AdditionalData additional_data
    -

    for the generalized eigenvalue problem $Ax=B\lambda x$, where the variable size_of_spectrum tells PARPACK the number of eigenvector/eigenvalue pairs to solve for. Here, lambda is a vector that will contain the eigenvalues computed, x a vector of objects of type V that will contain the eigenvectors computed.

    -

    Currently, only three modes of (P)Arpack are implemented. In mode 3 (default), OP is an inverse operation for the matrix A - sigma * B, where sigma is a shift value, set to zero by default. Whereas in mode 2, OP is an inverse of M. Finally, mode 1 corresponds to standard eigenvalue problem without spectral transformation $Ax=\lambda x$. The mode can be specified via AdditionalData object. Note that for shift-and-invert (mode=3), the sought eigenpairs are those after the spectral transformation is applied.

    +

    for the generalized eigenvalue problem $Ax=B\lambda x$, where the variable size_of_spectrum tells PARPACK the number of eigenvector/eigenvalue pairs to solve for. Here, lambda is a vector that will contain the eigenvalues computed, x a vector of objects of type V that will contain the eigenvectors computed.

    +

    Currently, only three modes of (P)Arpack are implemented. In mode 3 (default), OP is an inverse operation for the matrix A - sigma * B, where sigma is a shift value, set to zero by default. Whereas in mode 2, OP is an inverse of M. Finally, mode 1 corresponds to standard eigenvalue problem without spectral transformation $Ax=\lambda x$. The mode can be specified via AdditionalData object. Note that for shift-and-invert (mode=3), the sought eigenpairs are those after the spectral transformation is applied.

    The OP can be specified by using a LinearOperator:

    const double shift = 5.0;
    const auto op_A = linear_operator<vector_t>(A);
    const auto op_B = linear_operator<vector_t>(B);
    @@ -631,7 +631,7 @@ const unsigned int n_eigenvalues&#href_anchor"memdoc"> -

    Solve the generalized eigensprectrum problem $A x=\lambda B x$ by calling the pd(n/s)eupd and pd(n/s)aupd functions of PARPACK.

    +

    Solve the generalized eigensprectrum problem $A x=\lambda B x$ by calling the pd(n/s)eupd and pd(n/s)aupd functions of PARPACK.

    In mode=3, inverse should correspond to $[A-\sigma B]^{-1}$, whereas in mode=2 it should represent $B^{-1}$. For mode=1 both B and inverse are ignored.

    Definition at line 770 of file parpack_solver.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1CommunicationPattern.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1CommunicationPattern.html 2024-01-30 03:04:42.568799068 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1CommunicationPattern.html 2024-01-30 03:04:42.568799068 +0000 @@ -291,7 +291,7 @@ const MPI_Comm communicator&#href_anchor"memdoc">

    Reinitialization that takes the number of locally-owned degrees of freedom local_size and an index set for the required ghost indices ghost_indices.

    -

    The local index range is translated to global indices in an ascending and one-to-one fashion, i.e., the indices of process $p$ sit exactly between the indices of the processes $p-1$ and $p+1$, respectively.

    +

    The local index range is translated to global indices in an ascending and one-to-one fashion, i.e., the indices of process $p$ sit exactly between the indices of the processes $p-1$ and $p+1$, respectively.

    The export_to_ghost_array will populate an array containing values from locally-owned AND ghost indices, as for the relevant set of dofs of a usual FEM simulation.

    Definition at line 46 of file petsc_communication_pattern.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1FullMatrix.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1FullMatrix.html 2024-01-30 03:04:42.624799534 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1FullMatrix.html 2024-01-30 03:04:42.620799502 +0000 @@ -1505,8 +1505,8 @@
    -

    Return the l1-norm of the matrix, that is $|M|_1=max_{all columns
-j}\sum_{all rows i} |M_ij|$, (max. sum of columns). This is the natural matrix norm that is compatible to the l1-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    +

    Return the l1-norm of the matrix, that is $|M|_1=max_{all columns
+j}\sum_{all rows i} |M_ij|$, (max. sum of columns). This is the natural matrix norm that is compatible to the l1-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    Definition at line 417 of file petsc_matrix_base.cc.

    @@ -1534,8 +1534,8 @@
    -

    Return the linfty-norm of the matrix, that is $|M|_infty=max_{all rows
-i}\sum_{all columns j} |M_ij|$, (max. sum of rows). This is the natural matrix norm that is compatible to the linfty-norm of vectors, i.e. $|Mv|_infty \leq |M|_infty |v|_infty$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    +

    Return the linfty-norm of the matrix, that is $|M|_infty=max_{all rows
+i}\sum_{all columns j} |M_ij|$, (max. sum of rows). This is the natural matrix norm that is compatible to the linfty-norm of vectors, i.e. $|Mv|_infty \leq |M|_infty |v|_infty$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    Definition at line 430 of file petsc_matrix_base.cc.

    @@ -1591,7 +1591,7 @@
    -

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    +

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    Obviously, the matrix needs to be quadratic for this operation.

    The implementation of this function is not as efficient as the one in the MatrixBase class used in deal.II (i.e. the original one, not the PETSc wrapper class) since PETSc doesn't support this operation and needs a temporary vector.

    Note that if the current object represents a parallel distributed matrix (of type PETScWrappers::MPI::SparseMatrix), then the given vector has to be a distributed vector as well. Conversely, if the matrix is not distributed, then neither may the vector be.

    @@ -1622,7 +1622,7 @@
    -

    Compute the matrix scalar product $\left(u,Mv\right)$.

    +

    Compute the matrix scalar product $\left(u,Mv\right)$.

    The implementation of this function is not as efficient as the one in the MatrixBase class used in deal.II (i.e. the original one, not the PETSc wrapper class) since PETSc doesn't support this operation and needs a temporary vector.

    Note that if the current object represents a parallel distributed matrix (of type PETScWrappers::MPI::SparseMatrix), then both vectors have to be distributed vectors as well. Conversely, if the matrix is not distributed, then neither of the vectors may be.

    @@ -2081,7 +2081,7 @@
    -

    Test whether a matrix is symmetric. Default tolerance is $1000\times32$-bit machine precision.

    +

    Test whether a matrix is symmetric. Default tolerance is $1000\times32$-bit machine precision.

    Definition at line 694 of file petsc_matrix_base.cc.

    @@ -2109,7 +2109,7 @@
    -

    Test whether a matrix is Hermitian, i.e. it is the complex conjugate of its transpose. Default tolerance is $1000\times32$-bit machine precision.

    +

    Test whether a matrix is Hermitian, i.e. it is the complex conjugate of its transpose. Default tolerance is $1000\times32$-bit machine precision.

    Definition at line 704 of file petsc_matrix_base.cc.

    @@ -2337,7 +2337,7 @@
    -

    Base function to perform the matrix-matrix multiplication $C = AB$, or, if a vector $V$ whose size is compatible with B is given, $C = A \text{diag}(V) B$, where $\text{diag}(V)$ defines a diagonal matrix with the vector entries.

    +

    Base function to perform the matrix-matrix multiplication $C = AB$, or, if a vector $V$ whose size is compatible with B is given, $C = A \text{diag}(V) B$, where $\text{diag}(V)$ defines a diagonal matrix with the vector entries.

    This function assumes that the calling matrix $A$ and $B$ have compatible sizes. The size of $C$ will be set within this function.

    The content as well as the sparsity pattern of the matrix $C$ will be reset by this function, so make sure that the sparsity pattern is not used somewhere else in your program. This is an expensive operation, so think twice before you use this function.

    @@ -2372,7 +2372,7 @@
    -

    Base function to perform the matrix-matrix multiplication with the transpose of this, i.e., $C = A^T B$, or, if an optional vector $V$ whose size is compatible with $B$ is given, $C = A^T \text{diag}(V) B$, where $\text{diag}(V)$ defines a diagonal matrix with the vector entries.

    +

    Base function to perform the matrix-matrix multiplication with the transpose of this, i.e., $C = A^T B$, or, if an optional vector $V$ whose size is compatible with $B$ is given, $C = A^T \text{diag}(V) B$, where $\text{diag}(V)$ defines a diagonal matrix with the vector entries.

    This function assumes that the calling matrix $A$ and $B$ have compatible sizes. The size of $C$ will be set within this function.

    The content as well as the sparsity pattern of the matrix $C$ will be changed by this function, so make sure that the sparsity pattern is not used somewhere else in your program. This is an expensive operation, so think twice before you use this function.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1BlockSparseMatrix.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1BlockSparseMatrix.html 2024-01-30 03:04:42.688800068 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1BlockSparseMatrix.html 2024-01-30 03:04:42.688800068 +0000 @@ -875,7 +875,7 @@
    -

    Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

    +

    Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

    Definition at line 410 of file petsc_block_sparse_matrix.h.

    @@ -987,7 +987,7 @@
    -

    Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

    +

    Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

    Definition at line 441 of file petsc_block_sparse_matrix.h.

    @@ -1398,7 +1398,7 @@
    -

    Return the dimension of the codomain (or range) space. Note that the matrix is of dimension $m \times n$.

    +

    Return the dimension of the codomain (or range) space. Note that the matrix is of dimension $m \times n$.

    @@ -1424,7 +1424,7 @@
    -

    Return the dimension of the domain space. Note that the matrix is of dimension $m \times n$.

    +

    Return the dimension of the domain space. Note that the matrix is of dimension $m \times n$.

    @@ -2037,7 +2037,7 @@
    -

    Adding Matrix-vector multiplication. Add $M*src$ on $dst$ with $M$ being this matrix.

    +

    Adding Matrix-vector multiplication. Add $M*src$ on $dst$ with $M$ being this matrix.

    @@ -2142,7 +2142,7 @@
    -

    Compute the matrix scalar product $\left(u,Mv\right)$.

    +

    Compute the matrix scalar product $\left(u,Mv\right)$.

    @@ -2611,7 +2611,7 @@
    -

    Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

    +

    Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

    Due to problems with deriving template arguments between the block and non-block versions of the vmult/Tvmult functions, the actual functions are implemented in derived classes, with implementations forwarding the calls to the implementations provided here under a unique name for which template arguments can be derived by the compiler.

    @@ -2719,7 +2719,7 @@
    -

    Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

    +

    Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

    Due to problems with deriving template arguments between the block and non-block versions of the vmult/Tvmult functions, the actual functions are implemented in derived classes, with implementations forwarding the calls to the implementations provided here under a unique name for which template arguments can be derived by the compiler.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1BlockVector.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1BlockVector.html 2024-01-30 03:04:42.752800601 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1BlockVector.html 2024-01-30 03:04:42.756800634 +0000 @@ -1933,7 +1933,7 @@
    -

    Return the square of the $l_2$-norm.

    +

    Return the square of the $l_2$-norm.

    @@ -1985,7 +1985,7 @@
    -

    Return the $l_1$-norm of the vector, i.e. the sum of the absolute values.

    +

    Return the $l_1$-norm of the vector, i.e. the sum of the absolute values.

    @@ -2011,7 +2011,7 @@
    -

    Return the $l_2$-norm of the vector, i.e. the square root of the sum of the squares of the elements.

    +

    Return the $l_2$-norm of the vector, i.e. the square root of the sum of the squares of the elements.

    @@ -2037,7 +2037,7 @@
    -

    Return the maximum absolute value of the elements of this vector, which is the $l_\infty$-norm of a vector.

    +

    Return the maximum absolute value of the elements of this vector, which is the $l_\infty$-norm of a vector.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1SparseMatrix.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1SparseMatrix.html 2024-01-30 03:04:42.824801201 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1SparseMatrix.html 2024-01-30 03:04:42.824801201 +0000 @@ -814,7 +814,7 @@
    -

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v^\ast,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    +

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v^\ast,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    Obviously, the matrix needs to be quadratic for this operation.

    The implementation of this function is not as efficient as the one in the MatrixBase class used in deal.II (i.e. the original one, not the PETSc wrapper class) since PETSc doesn't support this operation and needs a temporary vector.

    @@ -837,7 +837,7 @@ const Vector & v&#href_anchor"memdoc"> -

    Compute the matrix scalar product $\left(u^\ast,Mv\right)$.

    +

    Compute the matrix scalar product $\left(u^\ast,Mv\right)$.

    The implementation of this function is not as efficient as the one in the MatrixBase class used in deal.II (i.e. the original one, not the PETSc wrapper class) since PETSc doesn't support this operation and needs a temporary vector.

    Definition at line 815 of file petsc_parallel_sparse_matrix.cc.

    @@ -904,7 +904,7 @@ const MPI::Vector & V = MPI::Vector()&#href_anchor"memdoc"> -

    Perform the matrix-matrix multiplication $C = AB$, or, $C = A \text{diag}(V) B$ given a compatible vector $V$.

    +

    Perform the matrix-matrix multiplication $C = AB$, or, $C = A \text{diag}(V) B$ given a compatible vector $V$.

    This function calls MatrixBase::mmult() to do the actual work.

    Definition at line 876 of file petsc_parallel_sparse_matrix.cc.

    @@ -931,7 +931,7 @@ const MPI::Vector & V = MPI::Vector()&#href_anchor"memdoc"> -

    Perform the matrix-matrix multiplication with the transpose of this, i.e., $C = A^T B$, or, $C = A^T \text{diag}(V) B$ given a compatible vector $V$.

    +

    Perform the matrix-matrix multiplication with the transpose of this, i.e., $C = A^T B$, or, $C = A^T \text{diag}(V) B$ given a compatible vector $V$.

    This function calls MatrixBase::Tmmult() to do the actual work.

    Definition at line 887 of file petsc_parallel_sparse_matrix.cc.

    @@ -2059,8 +2059,8 @@
    -

    Return the l1-norm of the matrix, that is $|M|_1=max_{all columns
-j}\sum_{all rows i} |M_ij|$, (max. sum of columns). This is the natural matrix norm that is compatible to the l1-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    +

    Return the l1-norm of the matrix, that is $|M|_1=max_{all columns
+j}\sum_{all rows i} |M_ij|$, (max. sum of columns). This is the natural matrix norm that is compatible to the l1-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    Definition at line 417 of file petsc_matrix_base.cc.

    @@ -2088,8 +2088,8 @@
    -

    Return the linfty-norm of the matrix, that is $|M|_infty=max_{all rows
-i}\sum_{all columns j} |M_ij|$, (max. sum of rows). This is the natural matrix norm that is compatible to the linfty-norm of vectors, i.e. $|Mv|_infty \leq |M|_infty |v|_infty$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    +

    Return the linfty-norm of the matrix, that is $|M|_infty=max_{all rows
+i}\sum_{all columns j} |M_ij|$, (max. sum of rows). This is the natural matrix norm that is compatible to the linfty-norm of vectors, i.e. $|Mv|_infty \leq |M|_infty |v|_infty$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    Definition at line 430 of file petsc_matrix_base.cc.

    @@ -2145,7 +2145,7 @@
    -

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    +

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    Obviously, the matrix needs to be quadratic for this operation.

    The implementation of this function is not as efficient as the one in the MatrixBase class used in deal.II (i.e. the original one, not the PETSc wrapper class) since PETSc doesn't support this operation and needs a temporary vector.

    Note that if the current object represents a parallel distributed matrix (of type PETScWrappers::MPI::SparseMatrix), then the given vector has to be a distributed vector as well. Conversely, if the matrix is not distributed, then neither may the vector be.

    @@ -2176,7 +2176,7 @@
    -

    Compute the matrix scalar product $\left(u,Mv\right)$.

    +

    Compute the matrix scalar product $\left(u,Mv\right)$.

    The implementation of this function is not as efficient as the one in the MatrixBase class used in deal.II (i.e. the original one, not the PETSc wrapper class) since PETSc doesn't support this operation and needs a temporary vector.

    Note that if the current object represents a parallel distributed matrix (of type PETScWrappers::MPI::SparseMatrix), then both vectors have to be distributed vectors as well. Conversely, if the matrix is not distributed, then neither of the vectors may be.

    @@ -2635,7 +2635,7 @@
    -

    Test whether a matrix is symmetric. Default tolerance is $1000\times32$-bit machine precision.

    +

    Test whether a matrix is symmetric. Default tolerance is $1000\times32$-bit machine precision.

    Definition at line 694 of file petsc_matrix_base.cc.

    @@ -2663,7 +2663,7 @@
    -

    Test whether a matrix is Hermitian, i.e. it is the complex conjugate of its transpose. Default tolerance is $1000\times32$-bit machine precision.

    +

    Test whether a matrix is Hermitian, i.e. it is the complex conjugate of its transpose. Default tolerance is $1000\times32$-bit machine precision.

    Definition at line 704 of file petsc_matrix_base.cc.

    @@ -2891,7 +2891,7 @@
    -

    Base function to perform the matrix-matrix multiplication $C = AB$, or, if a vector $V$ whose size is compatible with B is given, $C = A \text{diag}(V) B$, where $\text{diag}(V)$ defines a diagonal matrix with the vector entries.

    +

    Base function to perform the matrix-matrix multiplication $C = AB$, or, if a vector $V$ whose size is compatible with B is given, $C = A \text{diag}(V) B$, where $\text{diag}(V)$ defines a diagonal matrix with the vector entries.

    This function assumes that the calling matrix $A$ and $B$ have compatible sizes. The size of $C$ will be set within this function.

    The content as well as the sparsity pattern of the matrix $C$ will be reset by this function, so make sure that the sparsity pattern is not used somewhere else in your program. This is an expensive operation, so think twice before you use this function.

    @@ -2926,7 +2926,7 @@
    -

    Base function to perform the matrix-matrix multiplication with the transpose of this, i.e., $C = A^T B$, or, if an optional vector $V$ whose size is compatible with $B$ is given, $C = A^T \text{diag}(V) B$, where $\text{diag}(V)$ defines a diagonal matrix with the vector entries.

    +

    Base function to perform the matrix-matrix multiplication with the transpose of this, i.e., $C = A^T B$, or, if an optional vector $V$ whose size is compatible with $B$ is given, $C = A^T \text{diag}(V) B$, where $\text{diag}(V)$ defines a diagonal matrix with the vector entries.

    This function assumes that the calling matrix $A$ and $B$ have compatible sizes. The size of $C$ will be set within this function.

    The content as well as the sparsity pattern of the matrix $C$ will be changed by this function, so make sure that the sparsity pattern is not used somewhere else in your program. This is an expensive operation, so think twice before you use this function.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1Vector.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1Vector.html 2024-01-30 03:04:42.888801734 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1Vector.html 2024-01-30 03:04:42.888801734 +0000 @@ -1929,7 +1929,7 @@
    -

    Return the square of the $l_2$-norm.

    +

    Return the square of the $l_2$-norm.

    Definition at line 612 of file petsc_vector_base.cc.

    @@ -1985,7 +1985,7 @@
    -

    $l_1$-norm of the vector. The sum of the absolute values.

    +

    $l_1$-norm of the vector. The sum of the absolute values.

    Note
    In complex-valued PETSc priori to 3.7.0 this norm is implemented as the sum of absolute values of real and imaginary parts of elements of a complex vector.

    Definition at line 672 of file petsc_vector_base.cc.

    @@ -2014,7 +2014,7 @@
    -

    $l_2$-norm of the vector. The square root of the sum of the squares of the elements.

    +

    $l_2$-norm of the vector. The square root of the sum of the squares of the elements.

    Definition at line 685 of file petsc_vector_base.cc.

    @@ -2042,7 +2042,7 @@
    -

    $l_p$-norm of the vector. The pth root of the sum of the pth powers of the absolute values of the elements.

    +

    $l_p$-norm of the vector. The pth root of the sum of the pth powers of the absolute values of the elements.

    Definition at line 698 of file petsc_vector_base.cc.

    @@ -2070,7 +2070,7 @@
    -

    $l_\infty$-norm of the vector. Return the value of the vector element with the maximum absolute value.

    +

    $l_\infty$-norm of the vector. Return the value of the vector element with the maximum absolute value.

    Definition at line 740 of file petsc_vector_base.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MatrixBase.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MatrixBase.html 2024-01-30 03:04:42.940802167 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MatrixBase.html 2024-01-30 03:04:42.940802167 +0000 @@ -1290,8 +1290,8 @@
    -

    Return the l1-norm of the matrix, that is $|M|_1=max_{all columns
-j}\sum_{all rows i} |M_ij|$, (max. sum of columns). This is the natural matrix norm that is compatible to the l1-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    +

    Return the l1-norm of the matrix, that is $|M|_1=max_{all columns
+j}\sum_{all rows i} |M_ij|$, (max. sum of columns). This is the natural matrix norm that is compatible to the l1-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    Definition at line 417 of file petsc_matrix_base.cc.

    @@ -1311,8 +1311,8 @@
    -

    Return the linfty-norm of the matrix, that is $|M|_infty=max_{all rows
-i}\sum_{all columns j} |M_ij|$, (max. sum of rows). This is the natural matrix norm that is compatible to the linfty-norm of vectors, i.e. $|Mv|_infty \leq |M|_infty |v|_infty$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    +

    Return the linfty-norm of the matrix, that is $|M|_infty=max_{all rows
+i}\sum_{all columns j} |M_ij|$, (max. sum of rows). This is the natural matrix norm that is compatible to the linfty-norm of vectors, i.e. $|Mv|_infty \leq |M|_infty |v|_infty$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    Definition at line 430 of file petsc_matrix_base.cc.

    @@ -1352,7 +1352,7 @@
    -

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    +

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    Obviously, the matrix needs to be quadratic for this operation.

    The implementation of this function is not as efficient as the one in the MatrixBase class used in deal.II (i.e. the original one, not the PETSc wrapper class) since PETSc doesn't support this operation and needs a temporary vector.

    Note that if the current object represents a parallel distributed matrix (of type PETScWrappers::MPI::SparseMatrix), then the given vector has to be a distributed vector as well. Conversely, if the matrix is not distributed, then neither may the vector be.

    @@ -1376,7 +1376,7 @@ const VectorBase & v&#href_anchor"memdoc"> -

    Compute the matrix scalar product $\left(u,Mv\right)$.

    +

    Compute the matrix scalar product $\left(u,Mv\right)$.

    The implementation of this function is not as efficient as the one in the MatrixBase class used in deal.II (i.e. the original one, not the PETSc wrapper class) since PETSc doesn't support this operation and needs a temporary vector.

    Note that if the current object represents a parallel distributed matrix (of type PETScWrappers::MPI::SparseMatrix), then both vectors have to be distributed vectors as well. Conversely, if the matrix is not distributed, then neither of the vectors may be.

    @@ -1733,7 +1733,7 @@
    -

    Test whether a matrix is symmetric. Default tolerance is $1000\times32$-bit machine precision.

    +

    Test whether a matrix is symmetric. Default tolerance is $1000\times32$-bit machine precision.

    Definition at line 694 of file petsc_matrix_base.cc.

    @@ -1753,7 +1753,7 @@
    -

    Test whether a matrix is Hermitian, i.e. it is the complex conjugate of its transpose. Default tolerance is $1000\times32$-bit machine precision.

    +

    Test whether a matrix is Hermitian, i.e. it is the complex conjugate of its transpose. Default tolerance is $1000\times32$-bit machine precision.

    Definition at line 704 of file petsc_matrix_base.cc.

    @@ -1958,7 +1958,7 @@
    -

    Base function to perform the matrix-matrix multiplication $C = AB$, or, if a vector $V$ whose size is compatible with B is given, $C = A \text{diag}(V) B$, where $\text{diag}(V)$ defines a diagonal matrix with the vector entries.

    +

    Base function to perform the matrix-matrix multiplication $C = AB$, or, if a vector $V$ whose size is compatible with B is given, $C = A \text{diag}(V) B$, where $\text{diag}(V)$ defines a diagonal matrix with the vector entries.

    This function assumes that the calling matrix $A$ and $B$ have compatible sizes. The size of $C$ will be set within this function.

    The content as well as the sparsity pattern of the matrix $C$ will be reset by this function, so make sure that the sparsity pattern is not used somewhere else in your program. This is an expensive operation, so think twice before you use this function.

    @@ -1993,7 +1993,7 @@
    -

    Base function to perform the matrix-matrix multiplication with the transpose of this, i.e., $C = A^T B$, or, if an optional vector $V$ whose size is compatible with $B$ is given, $C = A^T \text{diag}(V) B$, where $\text{diag}(V)$ defines a diagonal matrix with the vector entries.

    +

    Base function to perform the matrix-matrix multiplication with the transpose of this, i.e., $C = A^T B$, or, if an optional vector $V$ whose size is compatible with $B$ is given, $C = A^T \text{diag}(V) B$, where $\text{diag}(V)$ defines a diagonal matrix with the vector entries.

    This function assumes that the calling matrix $A$ and $B$ have compatible sizes. The size of $C$ will be set within this function.

    The content as well as the sparsity pattern of the matrix $C$ will be changed by this function, so make sure that the sparsity pattern is not used somewhere else in your program. This is an expensive operation, so think twice before you use this function.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MatrixFree.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MatrixFree.html 2024-01-30 03:04:43.004802701 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MatrixFree.html 2024-01-30 03:04:43.004802701 +0000 @@ -1949,8 +1949,8 @@
    -

    Return the l1-norm of the matrix, that is $|M|_1=max_{all columns
-j}\sum_{all rows i} |M_ij|$, (max. sum of columns). This is the natural matrix norm that is compatible to the l1-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    +

    Return the l1-norm of the matrix, that is $|M|_1=max_{all columns
+j}\sum_{all rows i} |M_ij|$, (max. sum of columns). This is the natural matrix norm that is compatible to the l1-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    Definition at line 417 of file petsc_matrix_base.cc.

    @@ -1978,8 +1978,8 @@
    -

    Return the linfty-norm of the matrix, that is $|M|_infty=max_{all rows
-i}\sum_{all columns j} |M_ij|$, (max. sum of rows). This is the natural matrix norm that is compatible to the linfty-norm of vectors, i.e. $|Mv|_infty \leq |M|_infty |v|_infty$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    +

    Return the linfty-norm of the matrix, that is $|M|_infty=max_{all rows
+i}\sum_{all columns j} |M_ij|$, (max. sum of rows). This is the natural matrix norm that is compatible to the linfty-norm of vectors, i.e. $|Mv|_infty \leq |M|_infty |v|_infty$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    Definition at line 430 of file petsc_matrix_base.cc.

    @@ -2035,7 +2035,7 @@
    -

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    +

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    Obviously, the matrix needs to be quadratic for this operation.

    The implementation of this function is not as efficient as the one in the MatrixBase class used in deal.II (i.e. the original one, not the PETSc wrapper class) since PETSc doesn't support this operation and needs a temporary vector.

    Note that if the current object represents a parallel distributed matrix (of type PETScWrappers::MPI::SparseMatrix), then the given vector has to be a distributed vector as well. Conversely, if the matrix is not distributed, then neither may the vector be.

    @@ -2066,7 +2066,7 @@
    -

    Compute the matrix scalar product $\left(u,Mv\right)$.

    +

    Compute the matrix scalar product $\left(u,Mv\right)$.

    The implementation of this function is not as efficient as the one in the MatrixBase class used in deal.II (i.e. the original one, not the PETSc wrapper class) since PETSc doesn't support this operation and needs a temporary vector.

    Note that if the current object represents a parallel distributed matrix (of type PETScWrappers::MPI::SparseMatrix), then both vectors have to be distributed vectors as well. Conversely, if the matrix is not distributed, then neither of the vectors may be.

    @@ -2405,7 +2405,7 @@
    -

    Test whether a matrix is symmetric. Default tolerance is $1000\times32$-bit machine precision.

    +

    Test whether a matrix is symmetric. Default tolerance is $1000\times32$-bit machine precision.

    Definition at line 694 of file petsc_matrix_base.cc.

    @@ -2433,7 +2433,7 @@
    -

    Test whether a matrix is Hermitian, i.e. it is the complex conjugate of its transpose. Default tolerance is $1000\times32$-bit machine precision.

    +

    Test whether a matrix is Hermitian, i.e. it is the complex conjugate of its transpose. Default tolerance is $1000\times32$-bit machine precision.

    Definition at line 704 of file petsc_matrix_base.cc.

    @@ -2661,7 +2661,7 @@
    -

    Base function to perform the matrix-matrix multiplication $C = AB$, or, if a vector $V$ whose size is compatible with B is given, $C = A \text{diag}(V) B$, where $\text{diag}(V)$ defines a diagonal matrix with the vector entries.

    +

    Base function to perform the matrix-matrix multiplication $C = AB$, or, if a vector $V$ whose size is compatible with B is given, $C = A \text{diag}(V) B$, where $\text{diag}(V)$ defines a diagonal matrix with the vector entries.

    This function assumes that the calling matrix $A$ and $B$ have compatible sizes. The size of $C$ will be set within this function.

    The content as well as the sparsity pattern of the matrix $C$ will be reset by this function, so make sure that the sparsity pattern is not used somewhere else in your program. This is an expensive operation, so think twice before you use this function.

    @@ -2696,7 +2696,7 @@
    -

    Base function to perform the matrix-matrix multiplication with the transpose of this, i.e., $C = A^T B$, or, if an optional vector $V$ whose size is compatible with $B$ is given, $C = A^T \text{diag}(V) B$, where $\text{diag}(V)$ defines a diagonal matrix with the vector entries.

    +

    Base function to perform the matrix-matrix multiplication with the transpose of this, i.e., $C = A^T B$, or, if an optional vector $V$ whose size is compatible with $B$ is given, $C = A^T \text{diag}(V) B$, where $\text{diag}(V)$ defines a diagonal matrix with the vector entries.

    This function assumes that the calling matrix $A$ and $B$ have compatible sizes. The size of $C$ will be set within this function.

    The content as well as the sparsity pattern of the matrix $C$ will be changed by this function, so make sure that the sparsity pattern is not used somewhere else in your program. This is an expensive operation, so think twice before you use this function.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1NonlinearSolver.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1NonlinearSolver.html 2024-01-30 03:04:43.032802934 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1NonlinearSolver.html 2024-01-30 03:04:43.036802967 +0000 @@ -164,7 +164,7 @@

    Detailed Description

    template<typename VectorType = PETScWrappers::VectorBase, typename PMatrixType = PETScWrappers::MatrixBase, typename AMatrixType = PMatrixType>
    class PETScWrappers::NonlinearSolver< VectorType, PMatrixType, AMatrixType >

    Interface to PETSc SNES solver for nonlinear equations. The SNES solver is described in the PETSc manual.

    -

    This class solves the nonlinear system of algebraic equations $F(x) = 0$.

    +

    This class solves the nonlinear system of algebraic equations $F(x) = 0$.

    The interface to PETSc is realized by means of std::function callbacks like in the TrilinosWrappers::NOXSolver and SUNDIALS::KINSOL classes.

    NonlinearSolver supports any vector and matrix type having constructors and methods:

    class VectorType : public Subscriptor
    @@ -181,7 +181,7 @@
    Mat & petsc_matrix();
    ...

    In particular, the supported types are the ones that can wrap PETSc's native vector and matrix classes, that are able to modify them in place, and that can return PETSc native types when requested.

    -

    To use the solvers the user needs to provide the implementation of $F$ via the NonlinearSolver::residual callback.

    +

    To use the solvers the user needs to provide the implementation of $F$ via the NonlinearSolver::residual callback.

    The default linearization procedure of a solver instantiated with this class consists in using Jacobian-Free-Newton-Krylov; the action of tangent matrices inside a linear solver process are approximated via matrix-free finite-differencing of the nonlinear residual equations. For details, consult the PETSc manual.

    In alternative, users can also provide the implementation of the Jacobian. This can be accomplished in two ways:

    -

    Solve the nonlinear system of equations $F(x) = 0$.

    +

    Solve the nonlinear system of equations $F(x) = 0$.

    This function returns the number of iterations. The vector x must contain the initial guess. Upon returning, the x vector contains the solution.

    @@ -449,7 +449,7 @@ PMatrixType & P&#href_anchor"memdoc"> -

    Solve the nonlinear system of equations $F(x) = 0$.

    +

    Solve the nonlinear system of equations $F(x) = 0$.

    This function returns the number of iterations. The vector x must contain the initial guess. Upon returning, the x vector contains the solution.

    Here we also set the matrix to precondition the tangent system.

    @@ -477,7 +477,7 @@ PMatrixType & P&#href_anchor"memdoc"> -

    Solve the nonlinear system of equations $F(x) = 0$.

    +

    Solve the nonlinear system of equations $F(x) = 0$.

    This function returns the number of iterations. The vector x must contain the initial guess. Upon returning, the x vector contains the solution.

    Here we also set the matrices to describe and precondition the tangent system.

    @@ -497,7 +497,7 @@
    -

    Callback for the computation of the nonlinear residual $F(x)$.

    +

    Callback for the computation of the nonlinear residual $F(x)$.

    Note
    This variable represents a user provided callback. See there for a description of how to deal with errors and other requirements and conventions.

    Definition at line 374 of file petsc_snes.h.

    @@ -517,7 +517,7 @@
    -

    Callback for the computation of the Jacobian $\dfrac{\partial F}{\partial x}$.

    +

    Callback for the computation of the Jacobian $\dfrac{\partial F}{\partial x}$.

    Note
    This variable represents a user provided callback. See there for a description of how to deal with errors and other requirements and conventions.

    Definition at line 386 of file petsc_snes.h.

    @@ -559,7 +559,7 @@

    Callback to set up the Jacobian system.

    -

    This callback gives full control to users to set up the tangent operator $\dfrac{\partial F}{\partial x}$.

    +

    This callback gives full control to users to set up the tangent operator $\dfrac{\partial F}{\partial x}$.

    Solvers must be provided via NonlinearSolver::solve_with_jacobian.

    Note
    This variable represents a user provided callback. See there for a description of how to deal with errors and other requirements and conventions.
    @@ -602,7 +602,7 @@

    Callback for the computation of the energy function.

    -

    This is usually not needed, since by default SNES assumes that the objective function to be minimized is $\frac{1}{2} || F(x) ||^2 $.

    +

    This is usually not needed, since by default SNES assumes that the objective function to be minimized is $\frac{1}{2} || F(x) ||^2 $.

    However, if the nonlinear equations are derived from energy arguments, it may be useful to use this callback to perform linesearch or to test for the reduction in a trust region step.

    The value of the energy function must be returned in energy_value.

    Note
    This variable represents a user provided callback. See there for a description of how to deal with errors and other requirements and conventions.
    /usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1SparseMatrix.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1SparseMatrix.html 2024-01-30 03:04:43.100803501 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1SparseMatrix.html 2024-01-30 03:04:43.100803501 +0000 @@ -830,7 +830,7 @@ const MPI::Vector & V = MPI::Vector()&#href_anchor"memdoc"> -

    Perform the matrix-matrix multiplication $C = AB$, or, $C = A \text{diag}(V) B$ given a compatible vector $V$.

    +

    Perform the matrix-matrix multiplication $C = AB$, or, $C = A \text{diag}(V) B$ given a compatible vector $V$.

    This function calls MatrixBase::mmult() to do the actual work.

    Definition at line 258 of file petsc_sparse_matrix.cc.

    @@ -857,7 +857,7 @@ const MPI::Vector & V = MPI::Vector()&#href_anchor"memdoc"> -

    Perform the matrix-matrix multiplication with the transpose of this, i.e., $C = A^T B$, or, $C = A^T \text{diag}(V) B$ given a compatible vector $V$.

    +

    Perform the matrix-matrix multiplication with the transpose of this, i.e., $C = A^T B$, or, $C = A^T \text{diag}(V) B$ given a compatible vector $V$.

    This function calls MatrixBase::Tmmult() to do the actual work.

    Definition at line 269 of file petsc_sparse_matrix.cc.

    @@ -1939,8 +1939,8 @@
    -

    Return the l1-norm of the matrix, that is $|M|_1=max_{all columns
-j}\sum_{all rows i} |M_ij|$, (max. sum of columns). This is the natural matrix norm that is compatible to the l1-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    +

    Return the l1-norm of the matrix, that is $|M|_1=max_{all columns
+j}\sum_{all rows i} |M_ij|$, (max. sum of columns). This is the natural matrix norm that is compatible to the l1-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    Definition at line 417 of file petsc_matrix_base.cc.

    @@ -1968,8 +1968,8 @@
    -

    Return the linfty-norm of the matrix, that is $|M|_infty=max_{all rows
-i}\sum_{all columns j} |M_ij|$, (max. sum of rows). This is the natural matrix norm that is compatible to the linfty-norm of vectors, i.e. $|Mv|_infty \leq |M|_infty |v|_infty$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    +

    Return the linfty-norm of the matrix, that is $|M|_infty=max_{all rows
+i}\sum_{all columns j} |M_ij|$, (max. sum of rows). This is the natural matrix norm that is compatible to the linfty-norm of vectors, i.e. $|Mv|_infty \leq |M|_infty |v|_infty$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    Definition at line 430 of file petsc_matrix_base.cc.

    @@ -2025,7 +2025,7 @@
    -

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    +

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    Obviously, the matrix needs to be quadratic for this operation.

    The implementation of this function is not as efficient as the one in the MatrixBase class used in deal.II (i.e. the original one, not the PETSc wrapper class) since PETSc doesn't support this operation and needs a temporary vector.

    Note that if the current object represents a parallel distributed matrix (of type PETScWrappers::MPI::SparseMatrix), then the given vector has to be a distributed vector as well. Conversely, if the matrix is not distributed, then neither may the vector be.

    @@ -2056,7 +2056,7 @@
    -

    Compute the matrix scalar product $\left(u,Mv\right)$.

    +

    Compute the matrix scalar product $\left(u,Mv\right)$.

    The implementation of this function is not as efficient as the one in the MatrixBase class used in deal.II (i.e. the original one, not the PETSc wrapper class) since PETSc doesn't support this operation and needs a temporary vector.

    Note that if the current object represents a parallel distributed matrix (of type PETScWrappers::MPI::SparseMatrix), then both vectors have to be distributed vectors as well. Conversely, if the matrix is not distributed, then neither of the vectors may be.

    @@ -2515,7 +2515,7 @@
    -

    Test whether a matrix is symmetric. Default tolerance is $1000\times32$-bit machine precision.

    +

    Test whether a matrix is symmetric. Default tolerance is $1000\times32$-bit machine precision.

    Definition at line 694 of file petsc_matrix_base.cc.

    @@ -2543,7 +2543,7 @@
    -

    Test whether a matrix is Hermitian, i.e. it is the complex conjugate of its transpose. Default tolerance is $1000\times32$-bit machine precision.

    +

    Test whether a matrix is Hermitian, i.e. it is the complex conjugate of its transpose. Default tolerance is $1000\times32$-bit machine precision.

    Definition at line 704 of file petsc_matrix_base.cc.

    @@ -2771,7 +2771,7 @@
    -

    Base function to perform the matrix-matrix multiplication $C = AB$, or, if a vector $V$ whose size is compatible with B is given, $C = A \text{diag}(V) B$, where $\text{diag}(V)$ defines a diagonal matrix with the vector entries.

    +

    Base function to perform the matrix-matrix multiplication $C = AB$, or, if a vector $V$ whose size is compatible with B is given, $C = A \text{diag}(V) B$, where $\text{diag}(V)$ defines a diagonal matrix with the vector entries.

    This function assumes that the calling matrix $A$ and $B$ have compatible sizes. The size of $C$ will be set within this function.

    The content as well as the sparsity pattern of the matrix $C$ will be reset by this function, so make sure that the sparsity pattern is not used somewhere else in your program. This is an expensive operation, so think twice before you use this function.

    @@ -2806,7 +2806,7 @@
    -

    Base function to perform the matrix-matrix multiplication with the transpose of this, i.e., $C = A^T B$, or, if an optional vector $V$ whose size is compatible with $B$ is given, $C = A^T \text{diag}(V) B$, where $\text{diag}(V)$ defines a diagonal matrix with the vector entries.

    +

    Base function to perform the matrix-matrix multiplication with the transpose of this, i.e., $C = A^T B$, or, if an optional vector $V$ whose size is compatible with $B$ is given, $C = A^T \text{diag}(V) B$, where $\text{diag}(V)$ defines a diagonal matrix with the vector entries.

    This function assumes that the calling matrix $A$ and $B$ have compatible sizes. The size of $C$ will be set within this function.

    The content as well as the sparsity pattern of the matrix $C$ will be changed by this function, so make sure that the sparsity pattern is not used somewhere else in your program. This is an expensive operation, so think twice before you use this function.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1TimeStepper.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1TimeStepper.html 2024-01-30 03:04:43.128803734 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1TimeStepper.html 2024-01-30 03:04:43.128803734 +0000 @@ -177,20 +177,20 @@
    template<typename VectorType = PETScWrappers::VectorBase, typename PMatrixType = PETScWrappers::MatrixBase, typename AMatrixType = PMatrixType>
    class PETScWrappers::TimeStepper< VectorType, PMatrixType, AMatrixType >

    Interface to the PETSc TS solver for Ordinary Differential Equations and Differential-Algebraic Equations. The TS solver is described in the PETSc manual.

    This class supports two kinds of formulations. The explicit formulation:

    -\[
+<picture><source srcset=\[
   \begin{cases}
       \dot y = G(t,y)\, , \\
       y(t_0) = y_0\, , \\
   \end{cases}
-\] +\]" src="form_1737.png"/>

    and the implicit formulation:

    -\[
+<picture><source srcset=\[
   \begin{cases}
       F(t,y,\dot y) = 0\, , \\
       y(t_0) = y_0\, . \\
   \end{cases}
-\] +\]" src="form_1738.png"/>

    The interface to PETSc is realized by means of std::function callbacks like in the SUNDIALS::IDA and SUNDIALS::ARKode classes.

    TimeStepper supports any vector and matrix type having constructors and methods:

    @@ -208,7 +208,7 @@
    Mat & petsc_matrix();
    ...

    In particular, the supported types are the ones that can wrap PETSc's native vector and matrix classes, that are able to modify them in place, and that can return PETSc native types when requested.

    -

    To use explicit solvers (like for example explicit Runge-Kutta methods), the user only needs to provide the implementation of $G$ via the TimeStepper::explicit_function. For implicit solvers, users have also the alternative of providing the $F$ function via TimeStepper::implicit_function. IMEX methods are also supported by providing both callbacks.

    +

    To use explicit solvers (like for example explicit Runge-Kutta methods), the user only needs to provide the implementation of $G$ via the TimeStepper::explicit_function. For implicit solvers, users have also the alternative of providing the $F$ function via TimeStepper::implicit_function. IMEX methods are also supported by providing both callbacks.

    The default linearization procedure of an implicit solver instantiated with this class consists in using Jacobian-Free-Newton-Krylov; the action of tangent matrices inside a linear solver process are approximated via matrix-free finite-differencing of the nonlinear residual equations that are ODE-solver specific. For details, consult the PETSc manual.

    In alternative, users can also provide the implementations of the Jacobians. This can be accomplished in two ways:

    -

    Callback for the computation of the implicit residual $F(t, y, \dot y)$.

    +

    Callback for the computation of the implicit residual $F(t, y, \dot y)$.

    Note
    This variable represents a user provided callback. See there for a description of how to deal with errors and other requirements and conventions.

    Definition at line 453 of file petsc_ts.h.

    @@ -583,9 +583,9 @@
    -

    Callback for the computation of the implicit Jacobian $\dfrac{\partial F}{\partial y} + \alpha \dfrac{\partial F}{\partial \dot
-y}$.

    -

    All implicit solvers implementations are recast to use the above linearization. The $\alpha$ parameter is time-step and solver-type specific.

    +

    Callback for the computation of the implicit Jacobian $\dfrac{\partial F}{\partial y} + \alpha \dfrac{\partial F}{\partial \dot
+y}$.

    +

    All implicit solvers implementations are recast to use the above linearization. The $\alpha$ parameter is time-step and solver-type specific.

    Note
    This variable represents a user provided callback. See there for a description of how to deal with errors and other requirements and conventions.

    Definition at line 475 of file petsc_ts.h.

    @@ -605,7 +605,7 @@
    -

    Callback for the computation of the explicit residual $G(t, y)$.

    +

    Callback for the computation of the explicit residual $G(t, y)$.

    Note
    This variable represents a user provided callback. See there for a description of how to deal with errors and other requirements and conventions.

    Definition at line 486 of file petsc_ts.h.

    @@ -625,8 +625,8 @@
    -

    Callback for the computation of the explicit Jacobian $\dfrac{\partial
-G}{\partial y}$.

    +

    Callback for the computation of the explicit Jacobian $\dfrac{\partial
+G}{\partial y}$.

    Note
    This variable represents a user provided callback. See there for a description of how to deal with errors and other requirements and conventions.

    Definition at line 501 of file petsc_ts.h.

    @@ -668,9 +668,9 @@

    Callback for the set up of the Jacobian system.

    -

    This callback gives full control to users to set up the linearized equations $\dfrac{\partial F}{\partial y} + \alpha \dfrac{\partial F}{\partial \dot
-y}$.

    -

    All implicit solvers implementations are recast to use the above linearization. The $\alpha$ parameter is time-step and solver-type specific.

    +

    This callback gives full control to users to set up the linearized equations $\dfrac{\partial F}{\partial y} + \alpha \dfrac{\partial F}{\partial \dot
+y}$.

    +

    All implicit solvers implementations are recast to use the above linearization. The $\alpha$ parameter is time-step and solver-type specific.

    Solvers must be provided via TimeStepper::solve_with_jacobian.

    Note
    This variable represents a user provided callback. See there for a description of how to deal with errors and other requirements and conventions.
    /usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1VectorBase.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1VectorBase.html 2024-01-30 03:04:43.176804133 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1VectorBase.html 2024-01-30 03:04:43.176804133 +0000 @@ -1143,7 +1143,7 @@
    -

    Return the square of the $l_2$-norm.

    +

    Return the square of the $l_2$-norm.

    Definition at line 612 of file petsc_vector_base.cc.

    @@ -1183,7 +1183,7 @@
    -

    $l_1$-norm of the vector. The sum of the absolute values.

    +

    $l_1$-norm of the vector. The sum of the absolute values.

    Note
    In complex-valued PETSc priori to 3.7.0 this norm is implemented as the sum of absolute values of real and imaginary parts of elements of a complex vector.

    Definition at line 672 of file petsc_vector_base.cc.

    @@ -1204,7 +1204,7 @@
    -

    $l_2$-norm of the vector. The square root of the sum of the squares of the elements.

    +

    $l_2$-norm of the vector. The square root of the sum of the squares of the elements.

    Definition at line 685 of file petsc_vector_base.cc.

    @@ -1224,7 +1224,7 @@
    -

    $l_p$-norm of the vector. The pth root of the sum of the pth powers of the absolute values of the elements.

    +

    $l_p$-norm of the vector. The pth root of the sum of the pth powers of the absolute values of the elements.

    Definition at line 698 of file petsc_vector_base.cc.

    @@ -1244,7 +1244,7 @@
    -

    $l_\infty$-norm of the vector. Return the value of the vector element with the maximum absolute value.

    +

    $l_\infty$-norm of the vector. Return the value of the vector element with the maximum absolute value.

    Definition at line 740 of file petsc_vector_base.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classPackagedOperation.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPackagedOperation.html 2024-01-30 03:04:43.204804367 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPackagedOperation.html 2024-01-30 03:04:43.204804367 +0000 @@ -189,7 +189,7 @@
    // ..
    Vector<double> result = a + b - c + d;
    -

    or the computation of a residual $b-Ax$:

    or the computation of a residual $b-Ax$:

    // ..
    const auto op_a = linear_operator(A);
    /usr/share/doc/packages/dealii/doxygen/deal.II/classParticles_1_1ParticleHandler.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classParticles_1_1ParticleHandler.html 2024-01-30 03:04:43.272804933 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classParticles_1_1ParticleHandler.html 2024-01-30 03:04:43.272804933 +0000 @@ -1039,8 +1039,8 @@ const typename Triangulation< dim, spacedim >::active_cell_iterator & cell&#href_anchor"memdoc"> -

    Insert a particle into the collection of particles. Return an iterator to the new position of the particle. This function involves a copy of the particle and its properties. Note that this function is of $O(N \log
-N)$ complexity for $N$ particles.

    +

    Insert a particle into the collection of particles. Return an iterator to the new position of the particle. This function involves a copy of the particle and its properties. Note that this function is of $O(N \log
+N)$ complexity for $N$ particles.

    Definition at line 579 of file particle_handler.cc.

    @@ -2696,7 +2696,7 @@
    -

    List from the active cells on the present MPI process to positions in either owned_particles or ghost_particles for fast $\mathcal O(1)$ access to the particles of a cell.

    +

    List from the active cells on the present MPI process to positions in either owned_particles or ghost_particles for fast $\mathcal O(1)$ access to the particles of a cell.

    Definition at line 945 of file particle_handler.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classParticles_1_1PropertyPool.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classParticles_1_1PropertyPool.html 2024-01-30 03:04:43.300805167 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classParticles_1_1PropertyPool.html 2024-01-30 03:04:43.300805167 +0000 @@ -632,7 +632,7 @@
    -

    This function makes sure that all internally stored memory blocks are sorted in the same order as one would loop over the handles_to_sort container. This makes sure memory access is contiguous with actual memory location. Because the ordering is given in the input argument the complexity of this function is $O(N)$ where $N$ is the number of elements in the input argument.

    +

    This function makes sure that all internally stored memory blocks are sorted in the same order as one would loop over the handles_to_sort container. This makes sure memory access is contiguous with actual memory location. Because the ordering is given in the input argument the complexity of this function is $O(N)$ where $N$ is the number of elements in the input argument.

    Definition at line 195 of file property_pool.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classPersistentTriangulation.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPersistentTriangulation.html 2024-01-30 03:04:43.436806300 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPersistentTriangulation.html 2024-01-30 03:04:43.436806300 +0000 @@ -1831,7 +1831,7 @@
    -

    Refine all cells times times. In other words, in each one of the times iterations, loop over all cells and refine each cell uniformly into $2^\text{dim}$ children. In practice, this function repeats the following operations times times: call set_all_refine_flags() followed by execute_coarsening_and_refinement(). The end result is that the number of cells increases by a factor of $(2^\text{dim})^\text{times}=2^{\text{dim} \times \text{times}}$.

    +

    Refine all cells times times. In other words, in each one of the times iterations, loop over all cells and refine each cell uniformly into $2^\text{dim}$ children. In practice, this function repeats the following operations times times: call set_all_refine_flags() followed by execute_coarsening_and_refinement(). The end result is that the number of cells increases by a factor of $(2^\text{dim})^\text{times}=2^{\text{dim} \times \text{times}}$.

    The execute_coarsening_and_refinement() function called in this loop may throw an exception if it creates cells that are distorted (see its documentation for an explanation). This exception will be propagated through this function if that happens, and you may not get the actual number of refinement steps in that case.

    Note
    This function triggers the pre- and post-refinement signals before and after doing each individual refinement cycle (i.e. more than once if times > 1) . See the section on signals in the general documentation of this class.
    @@ -6196,7 +6196,7 @@
    -

    Iterator to the first quad, used or not, on the given level. If a level has no quads, a past-the-end iterator is returned. If quads are no cells, i.e. for $dim>2$ no level argument must be given.

    +

    Iterator to the first quad, used or not, on the given level. If a level has no quads, a past-the-end iterator is returned. If quads are no cells, i.e. for $dim>2$ no level argument must be given.

    Note
    The given level argument needs to correspond to a level of the triangulation, i.e., should be less than the value returned by n_levels(). On the other hand, for parallel computations using a parallel::distributed::Triangulation object, it is often convenient to write loops over the cells of all levels of the global mesh, even if the local portion of the triangulation does not actually have cells at one of the higher levels. In those cases, the level argument is accepted if it is less than what the n_global_levels() function returns. If the given level is between the values returned by n_levels() and n_global_levels(), then no cells exist in the local portion of the triangulation at this level, and the function simply returns what end() would return.
    /usr/share/doc/packages/dealii/doxygen/deal.II/classPoint.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPoint.html 2024-01-30 03:04:43.500806833 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPoint.html 2024-01-30 03:04:43.504806866 +0000 @@ -798,7 +798,7 @@
    -

    Return the Euclidean distance of this point to the point p, i.e. the $l_2$ norm of the difference between the vectors representing the two points.

    +

    Return the Euclidean distance of this point to the point p, i.e. the $l_2$ norm of the difference between the vectors representing the two points.

    Note
    This function can also be used in device code.
    @@ -1462,7 +1462,7 @@
    -

    Return an unrolled index in the range $[0,\text{dim}^{\text{rank}}-1]$ for the element of the tensor indexed by the argument to the function.

    +

    Return an unrolled index in the range $[0,\text{dim}^{\text{rank}}-1]$ for the element of the tensor indexed by the argument to the function.

    @@ -1490,7 +1490,7 @@
    -

    Opposite of component_to_unrolled_index: For an index in the range $[0, \text{dim}^{\text{rank}}-1]$, return which set of indices it would correspond to.

    +

    Opposite of component_to_unrolled_index: For an index in the range $[0, \text{dim}^{\text{rank}}-1]$, return which set of indices it would correspond to.

    @@ -2205,11 +2205,11 @@

    Entrywise multiplication of two tensor objects of general rank.

    This multiplication is also called "Hadamard-product" (c.f. https://en.wikipedia.org/wiki/Hadamard_product_(matrices)), and generates a new tensor of size <rank, dim>:

    -\[
+<picture><source srcset=\[
   \text{result}_{i, j}
   = \text{left}_{i, j}\circ
     \text{right}_{i, j}
-\] +\]" src="form_857.png"/>

    Template Parameters
    @@ -2248,17 +2248,17 @@
    -

    The dot product (single contraction) for tensors. This function return a tensor of rank $(\text{rank}_1 + \text{rank}_2 - 2)$ that is the contraction of the last index of a tensor src1 of rank rank_1 with the first index of a tensor src2 of rank rank_2:

    -\[
+<p>The dot product (single contraction) for tensors. This function return a tensor of rank <picture><source srcset=$(\text{rank}_1 + \text{rank}_2 - 2)$ that is the contraction of the last index of a tensor src1 of rank rank_1 with the first index of a tensor src2 of rank rank_2:

    +\[
   \text{result}_{i_1,\ldots,i_{r1},j_1,\ldots,j_{r2}}
   = \sum_{k}
     \text{left}_{i_1,\ldots,i_{r1}, k}
     \text{right}_{k, j_1,\ldots,j_{r2}}
-\] +\]" src="form_827.png"/>

    Note
    For the Tensor class, the multiplication operator only performs a contraction over a single pair of indices. This is in contrast to the multiplication operator for SymmetricTensor, for which the corresponding operator*() performs a double contraction. The origin of the difference in how operator*() is implemented between Tensor and SymmetricTensor is that for the former, the product between two Tensor objects of same rank and dimension results in another Tensor object – that it, operator*() corresponds to the multiplicative group action within the group of tensors. On the other hand, there is no corresponding multiplicative group action with the set of symmetric tensors because, in general, the product of two symmetric tensors is a nonsymmetric tensor. As a consequence, for a mathematician, it is clear that operator*() for symmetric tensors must have a different meaning: namely the dot or scalar product that maps two symmetric tensors of rank 2 to a scalar. This corresponds to the double-dot (colon) operator whose meaning is then extended to the product of any two even-ranked symmetric tensors.
    -In case the contraction yields a tensor of rank 0, that is, if rank_1==rank_2==1, then a scalar number is returned as an unwrapped number type. Return the $l_1$ norm of the given rank-2 tensor, where $\|\mathbf T\|_1 = \max_j \sum_i |T_{ij}|$ (maximum of the sums over columns).
    +In case the contraction yields a tensor of rank 0, that is, if rank_1==rank_2==1, then a scalar number is returned as an unwrapped number type. Return the $l_1$ norm of the given rank-2 tensor, where $\|\mathbf T\|_1 = \max_j \sum_i |T_{ij}|$ (maximum of the sums over columns).

    Definition at line 3035 of file tensor.h.

    @@ -2288,7 +2288,7 @@
    -

    Return the $l_\infty$ norm of the given rank-2 tensor, where $\|\mathbf T\|_\infty = \max_i \sum_j |T_{ij}|$ (maximum of the sums over rows).

    +

    Return the $l_\infty$ norm of the given rank-2 tensor, where $\|\mathbf T\|_\infty = \max_i \sum_j |T_{ij}|$ (maximum of the sums over rows).

    Definition at line 3061 of file tensor.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classPolarManifold.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPolarManifold.html 2024-01-30 03:04:43.548807233 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPolarManifold.html 2024-01-30 03:04:43.548807233 +0000 @@ -447,7 +447,7 @@
    -

    Given a point in the spacedim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the polar coordinate system to the Euclidean coordinate system. In other words, it is a matrix of size $\text{spacedim}\times\text{spacedim}$.

    +

    Given a point in the spacedim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the polar coordinate system to the Euclidean coordinate system. In other words, it is a matrix of size $\text{spacedim}\times\text{spacedim}$.

    This function is used in the computations required by the get_tangent_vector() function.

    Refer to the general documentation of this class for more information.

    @@ -687,7 +687,7 @@
    -

    Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the chartdim-dimensional to the spacedim-dimensional space. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

    +

    Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the chartdim-dimensional to the spacedim-dimensional space. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

    This function is used in the computations required by the get_tangent_vector() function. Since not all users of the Manifold class interface will require calling that function, the current function is implemented but will trigger an exception whenever called. This allows derived classes to avoid implementing the push_forward_gradient function if this functionality is not needed in the user program.

    Refer to the general documentation of this class for more information.

    @@ -721,7 +721,7 @@
    -

    Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

    +

    Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

    For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

    \begin{align*}
   \zeta(t) &= \xi_1 +  t (\xi_2-\xi_1)
@@ -737,7 +737,7 @@
                                     -F^{-1}(\mathbf x_1)\right]\right).
 \end{align*}

    -

    What the current function is supposed to return is $\mathbf s'(0)$. By the chain rule, this is equal to

    +

    What the current function is supposed to return is $\mathbf s'(0)$. By the chain rule, this is equal to

    \begin{align*}
   \mathbf s'(0) &=
     \frac{d}{dt}\left. F\left(F^{-1}(\mathbf x_1)
/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomialsBDM.html differs (HTML document, ASCII text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomialsBDM.html	2024-01-30 03:04:43.572807433 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomialsBDM.html	2024-01-30 03:04:43.572807433 +0000
@@ -147,15 +147,15 @@
 <a name=

    Detailed Description

    template<int dim>
    class PolynomialsBDM< dim >

    This class implements the Hdiv-conforming, vector-valued Brezzi-Douglas-Marini ( BDM ) polynomials described in Brezzi and Fortin's Mixed and Hybrid Finite Element Methods (refer to pages 119 - 124).

    -

    The BDM polynomial space contain the entire $(P_{k})^{n}$ space (constructed with PolynomialSpace Legendre polynomials) as well as part of $(P_{k+1})^{n}$ (ie. $(P_{k})^{n} \subset BDM_{k} \subset (P_{k+1})^{n}$). Furthermore, $BDM_{k}$ elements are designed so that $\nabla \cdot q \in P_{k-1} (K)$ and $q \cdot n |_{e_{i}} \in P_{k}(e_{i})$. More details of two and three dimensional $BDM_{k}$ elements are given below.

    +

    The BDM polynomial space contain the entire $(P_{k})^{n}$ space (constructed with PolynomialSpace Legendre polynomials) as well as part of $(P_{k+1})^{n}$ (ie. $(P_{k})^{n} \subset BDM_{k} \subset (P_{k+1})^{n}$). Furthermore, $BDM_{k}$ elements are designed so that $\nabla \cdot q \in P_{k-1} (K)$ and $q \cdot n |_{e_{i}} \in P_{k}(e_{i})$. More details of two and three dimensional $BDM_{k}$ elements are given below.

    In 2d:
    -

    $ BDM_{k} = \{\mathbf{q} | \mathbf{q} = p_{k} (x,y) +
+<dd><p class=$ BDM_{k} = \{\mathbf{q} | \mathbf{q} = p_{k} (x,y) +
       r \; \text{curl} (x^{k+1}y) + s \;
-      \text{curl} (xy^{k+1}), p_{k} \in (P_{k})^{2} \}$.

    -

    Note: the curl of a scalar function is given by $\text{curl}(f(x,y)) =
-   \begin{pmatrix} f_{y}(x,y) \\ -f_{x}(x,y) \end{pmatrix}$.

    -

    The basis used to construct the $BDM_{1}$ shape functions is

    -\begin{align*}
+      \text{curl} (xy^{k+1}), p_{k} \in (P_{k})^{2} \}$.

    +

    Note: the curl of a scalar function is given by $\text{curl}(f(x,y)) =
+   \begin{pmatrix} f_{y}(x,y) \\ -f_{x}(x,y) \end{pmatrix}$.

    +

    The basis used to construct the $BDM_{1}$ shape functions is

    +\begin{align*}
      \phi_0 = \begin{pmatrix} 1 \\ 0 \end{pmatrix},
      \phi_1 = \begin{pmatrix} -\sqrt{3}+2\sqrt{3}x \\ 0 \end{pmatrix},
      \phi_2 = \begin{pmatrix} -\sqrt{3}+2\sqrt{3}y \\ 0 \end{pmatrix},
@@ -164,13 +164,13 @@
      \phi_5 = \begin{pmatrix} 0 \\ -\sqrt{3}+2\sqrt{3}y \end{pmatrix},
      \phi_6 = \begin{pmatrix} x^2 \\ -2xy \end{pmatrix},
      \phi_7 = \begin{pmatrix} 2xy \\ -y^2 \end{pmatrix}.
-   \end{align*} + \end{align*}" src="form_642.png"/>

    -

    The dimension of the $BDM_{k}$ space is $(k+1)(k+2)+2$, with $k+1$ unknowns per edge and $k(k-1)$ interior unknowns.

    +

    The dimension of the $BDM_{k}$ space is $(k+1)(k+2)+2$, with $k+1$ unknowns per edge and $k(k-1)$ interior unknowns.

    In 3d:
    -

    $ BDM_{k} =
+<dd><p class=$ BDM_{k} =
         \{\mathbf{q} | \mathbf{q} = p_{k} (x,y,z)
         + \sum_{i=0}^{k} (
         r_{i} \; \text{curl}
@@ -179,9 +179,9 @@
         \begin{pmatrix} yz^{i+1}x^{k-i}\\0\\0 \end{pmatrix}
         + t_{i} \; \text{curl}
         \begin{pmatrix}0\\zx^{i+1}y^{k-i}\\0\end{pmatrix})
-        , p_{k} \in (P_{k})^{3} \}$.

    -

    Note: the 3d description of $BDM_{k}$ is not unique. See Mixed and Hybrid Finite Element Methods page 122 for an alternative definition.

    -

    The dimension of the $BDM_{k}$ space is $\dfrac{(k+1)(k+2)(k+3)}{2}+3(k+1)$, with $\dfrac{(k+1)(k+2)}{2}$ unknowns per face and $\dfrac{(k-1)k(k+1)}{2}$ interior unknowns.

    + , p_{k} \in (P_{k})^{3} \}$" src="form_646.png"/>.

    +

    Note: the 3d description of $BDM_{k}$ is not unique. See Mixed and Hybrid Finite Element Methods page 122 for an alternative definition.

    +

    The dimension of the $BDM_{k}$ space is $\dfrac{(k+1)(k+2)(k+3)}{2}+3(k+1)$, with $\dfrac{(k+1)(k+2)}{2}$ unknowns per face and $\dfrac{(k-1)k(k+1)}{2}$ interior unknowns.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomialsBernardiRaugel.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomialsBernardiRaugel.html 2024-01-30 03:04:43.596807632 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomialsBernardiRaugel.html 2024-01-30 03:04:43.596807632 +0000 @@ -143,7 +143,7 @@
    template<int dim>
    class PolynomialsBernardiRaugel< dim >

    This class implements the Bernardi-Raugel polynomials similarly to the description in the Mathematics of Computation paper from 1985 by Christine Bernardi and Geneviève Raugel.

    The Bernardi-Raugel polynomials are originally defined as an enrichment of the $(P_1)^d$ elements on simplicial meshes for Stokes problems by the addition of bubble functions, yielding a locking-free finite element which is a subset of $(P_2)^d$ elements. This implementation is an enrichment of $(Q_1)^d$ elements which is a subset of $(Q_2)^d$ elements for quadrilateral and hexahedral meshes.

    -

    The $BR_1$ bubble functions are defined to have magnitude 1 at the center of face $e_i$ and direction $\mathbf{n}_i$ normal to face $e_i$, and magnitude 0 on all other vertices and faces. Ordering is consistent with the face numbering in GeometryInfo. The vector $\mathbf{n}_i$ points in the positive axis direction and not necessarily normal to the element for consistent orientation across edges.

    +

    The $BR_1$ bubble functions are defined to have magnitude 1 at the center of face $e_i$ and direction $\mathbf{n}_i$ normal to face $e_i$, and magnitude 0 on all other vertices and faces. Ordering is consistent with the face numbering in GeometryInfo. The vector $\mathbf{n}_i$ points in the positive axis direction and not necessarily normal to the element for consistent orientation across edges.

    2d bubble functions (in order)
    $x=0$ edge: $\mathbf{p}_1 = \mathbf{n}_1 (1-x)(y)(1-y)$
     @f$x=1@f$ edge: @f$\mathbf{p}_2 = \mathbf{n}_2 (x)(y)(1-y)@f$
     
    /usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomialsBernstein.html differs (HTML document, ASCII text, with very long lines)
    --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomialsBernstein.html	2024-01-30 03:04:43.628807899 +0000
    +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomialsBernstein.html	2024-01-30 03:04:43.628807899 +0000
    @@ -1232,7 +1232,7 @@
       
     
     
    -

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    +

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    Definition at line 314 of file polynomial.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1HermiteInterpolation.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1HermiteInterpolation.html 2024-01-30 03:04:43.664808199 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1HermiteInterpolation.html 2024-01-30 03:04:43.664808199 +0000 @@ -1184,7 +1184,7 @@
    -

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    +

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    Definition at line 314 of file polynomial.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1HermiteLikeInterpolation.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1HermiteLikeInterpolation.html 2024-01-30 03:04:43.700808499 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1HermiteLikeInterpolation.html 2024-01-30 03:04:43.700808499 +0000 @@ -1200,7 +1200,7 @@
    -

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    +

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    Definition at line 314 of file polynomial.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Hierarchical.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Hierarchical.html 2024-01-30 03:04:43.736808800 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Hierarchical.html 2024-01-30 03:04:43.736808800 +0000 @@ -1271,7 +1271,7 @@
    -

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    +

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    Definition at line 314 of file polynomial.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1LagrangeEquidistant.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1LagrangeEquidistant.html 2024-01-30 03:04:43.768809066 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1LagrangeEquidistant.html 2024-01-30 03:04:43.772809099 +0000 @@ -1213,7 +1213,7 @@
    -

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    +

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    Definition at line 314 of file polynomial.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Legendre.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Legendre.html 2024-01-30 03:04:43.804809366 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Legendre.html 2024-01-30 03:04:43.804809366 +0000 @@ -1174,7 +1174,7 @@
    -

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    +

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    Definition at line 314 of file polynomial.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Lobatto.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Lobatto.html 2024-01-30 03:04:43.840809665 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Lobatto.html 2024-01-30 03:04:43.840809665 +0000 @@ -219,7 +219,7 @@

    Detailed Description

    Lobatto polynomials of arbitrary degree on [0,1].

    -

    These polynomials are the integrated Legendre polynomials on [0,1]. The first two polynomials are the standard linear shape functions given by $l_0(x) = 1-x$ and $l_1(x) = x$. For $i\geq2$ we use the definition $l_i(x) = \frac{1}{\Vert L_{i-1}\Vert_2}\int_0^x L_{i-1}(t)\,dt$, where $L_i$ denotes the $i$-th Legendre polynomial on $[0,1]$. The Lobatto polynomials $l_0,\ldots,l_k$ form a complete basis of the polynomials space of degree $k$.

    +

    These polynomials are the integrated Legendre polynomials on [0,1]. The first two polynomials are the standard linear shape functions given by $l_0(x) = 1-x$ and $l_1(x) = x$. For $i\geq2$ we use the definition $l_i(x) = \frac{1}{\Vert L_{i-1}\Vert_2}\int_0^x L_{i-1}(t)\,dt$, where $L_i$ denotes the $i$-th Legendre polynomial on $[0,1]$. The Lobatto polynomials $l_0,\ldots,l_k$ form a complete basis of the polynomials space of degree $k$.

    Calling the constructor with a given index k will generate the polynomial with index k. But only for $k\geq 1$ the index equals the degree of the polynomial. For k==0 also a polynomial of degree 1 is generated.

    These polynomials are used for the construction of the shape functions of Nédélec elements of arbitrary order.

    @@ -1204,7 +1204,7 @@
    -

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    +

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    Definition at line 314 of file polynomial.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Monomial.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Monomial.html 2024-01-30 03:04:43.872809932 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Monomial.html 2024-01-30 03:04:43.872809932 +0000 @@ -1277,7 +1277,7 @@
    -

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    +

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    Definition at line 314 of file polynomial.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Polynomial.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Polynomial.html 2024-01-30 03:04:43.908810232 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Polynomial.html 2024-01-30 03:04:43.908810232 +0000 @@ -1204,7 +1204,7 @@
    -

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    +

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    Definition at line 314 of file polynomial.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1PolynomialsHermite.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1PolynomialsHermite.html 2024-01-30 03:04:43.944810532 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1PolynomialsHermite.html 2024-01-30 03:04:43.944810532 +0000 @@ -1197,7 +1197,7 @@
    -

    The order of the highest derivative in which the Hermite basis can be used to impose continuity across element boundaries. It's related to the degree $p$ by $p = 2 \times\mathtt{regularity} +1$.

    +

    The order of the highest derivative in which the Hermite basis can be used to impose continuity across element boundaries. It's related to the degree $p$ by $p = 2 \times\mathtt{regularity} +1$.

    Definition at line 132 of file polynomials_hermite.h.

    @@ -1323,7 +1323,7 @@
    -

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    +

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    Definition at line 314 of file polynomial.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classPreconditionChebyshev.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPreconditionChebyshev.html 2024-01-30 03:04:43.976810799 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPreconditionChebyshev.html 2024-01-30 03:04:43.976810799 +0000 @@ -506,7 +506,7 @@
    -

    Return the dimension of the codomain (or range) space. Note that the matrix is of dimension $m \times n$.

    +

    Return the dimension of the codomain (or range) space. Note that the matrix is of dimension $m \times n$.

    @@ -526,7 +526,7 @@
    -

    Return the dimension of the domain space. Note that the matrix is of dimension $m \times n$.

    +

    Return the dimension of the domain space. Note that the matrix is of dimension $m \times n$.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classPreconditionIdentity.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPreconditionIdentity.html 2024-01-30 03:04:44.000810999 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPreconditionIdentity.html 2024-01-30 03:04:44.000810999 +0000 @@ -444,7 +444,7 @@
    -

    Return the dimension of the codomain (or range) space. Note that the matrix is of dimension $m \times n$.

    +

    Return the dimension of the codomain (or range) space. Note that the matrix is of dimension $m \times n$.

    Note
    This function should only be called if the preconditioner has been initialized.
    @@ -463,7 +463,7 @@
    -

    Return the dimension of the domain space. Note that the matrix is of dimension $m \times n$.

    +

    Return the dimension of the domain space. Note that the matrix is of dimension $m \times n$.

    Note
    This function should only be called if the preconditioner has been initialized.
    /usr/share/doc/packages/dealii/doxygen/deal.II/classPreconditionJacobi.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPreconditionJacobi.html 2024-01-30 03:04:44.028811232 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPreconditionJacobi.html 2024-01-30 03:04:44.028811232 +0000 @@ -458,7 +458,7 @@
    -

    Return the dimension of the codomain (or range) space. Note that the matrix is of dimension $m \times n$.

    +

    Return the dimension of the codomain (or range) space. Note that the matrix is of dimension $m \times n$.

    @@ -486,7 +486,7 @@
    -

    Return the dimension of the domain space. Note that the matrix is of dimension $m \times n$.

    +

    Return the dimension of the domain space. Note that the matrix is of dimension $m \times n$.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classPreconditionPSOR.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPreconditionPSOR.html 2024-01-30 03:04:44.056811465 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPreconditionPSOR.html 2024-01-30 03:04:44.056811465 +0000 @@ -501,7 +501,7 @@
    -

    Return the dimension of the codomain (or range) space. Note that the matrix is of dimension $m \times n$.

    +

    Return the dimension of the codomain (or range) space. Note that the matrix is of dimension $m \times n$.

    @@ -529,7 +529,7 @@
    -

    Return the dimension of the domain space. Note that the matrix is of dimension $m \times n$.

    +

    Return the dimension of the domain space. Note that the matrix is of dimension $m \times n$.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classPreconditionRelaxation.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPreconditionRelaxation.html 2024-01-30 03:04:44.084811698 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPreconditionRelaxation.html 2024-01-30 03:04:44.084811698 +0000 @@ -332,7 +332,7 @@
    -

    Return the dimension of the codomain (or range) space. Note that the matrix is of dimension $m \times n$.

    +

    Return the dimension of the codomain (or range) space. Note that the matrix is of dimension $m \times n$.

    @@ -352,7 +352,7 @@
    -

    Return the dimension of the domain space. Note that the matrix is of dimension $m \times n$.

    +

    Return the dimension of the domain space. Note that the matrix is of dimension $m \times n$.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classPreconditionRichardson.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPreconditionRichardson.html 2024-01-30 03:04:44.108811898 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPreconditionRichardson.html 2024-01-30 03:04:44.108811898 +0000 @@ -467,7 +467,7 @@
    -

    Return the dimension of the codomain (or range) space. Note that the matrix is of dimension $m \times n$.

    +

    Return the dimension of the codomain (or range) space. Note that the matrix is of dimension $m \times n$.

    Note
    This function should only be called if the preconditioner has been initialized.
    @@ -486,7 +486,7 @@
    -

    Return the dimension of the domain space. Note that the matrix is of dimension $m \times n$.

    +

    Return the dimension of the domain space. Note that the matrix is of dimension $m \times n$.

    Note
    This function should only be called if the preconditioner has been initialized.
    /usr/share/doc/packages/dealii/doxygen/deal.II/classPreconditionSOR.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPreconditionSOR.html 2024-01-30 03:04:44.136812132 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPreconditionSOR.html 2024-01-30 03:04:44.136812132 +0000 @@ -471,7 +471,7 @@
    -

    Return the dimension of the codomain (or range) space. Note that the matrix is of dimension $m \times n$.

    +

    Return the dimension of the codomain (or range) space. Note that the matrix is of dimension $m \times n$.

    @@ -499,7 +499,7 @@
    -

    Return the dimension of the domain space. Note that the matrix is of dimension $m \times n$.

    +

    Return the dimension of the domain space. Note that the matrix is of dimension $m \times n$.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classPreconditionSSOR.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPreconditionSSOR.html 2024-01-30 03:04:44.164812365 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPreconditionSSOR.html 2024-01-30 03:04:44.164812365 +0000 @@ -456,7 +456,7 @@
    -

    Return the dimension of the codomain (or range) space. Note that the matrix is of dimension $m \times n$.

    +

    Return the dimension of the codomain (or range) space. Note that the matrix is of dimension $m \times n$.

    @@ -484,7 +484,7 @@
    -

    Return the dimension of the domain space. Note that the matrix is of dimension $m \times n$.

    +

    Return the dimension of the domain space. Note that the matrix is of dimension $m \times n$.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classPreconditionSelector.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPreconditionSelector.html 2024-01-30 03:04:44.192812598 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPreconditionSelector.html 2024-01-30 03:04:44.192812598 +0000 @@ -409,7 +409,7 @@
    -

    Return the dimension of the codomain (or range) space. Note that the matrix is of dimension $m \times n$.

    +

    Return the dimension of the codomain (or range) space. Note that the matrix is of dimension $m \times n$.

    Definition at line 232 of file precondition_selector.h.

    @@ -439,7 +439,7 @@
    -

    Return the dimension of the domain space. Note that the matrix is of dimension $m \times n$.

    +

    Return the dimension of the domain space. Note that the matrix is of dimension $m \times n$.

    Definition at line 241 of file precondition_selector.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussChebyshev.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussChebyshev.html 2024-01-30 03:04:44.220812832 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussChebyshev.html 2024-01-30 03:04:44.220812832 +0000 @@ -197,7 +197,7 @@

    Detailed Description

    template<int dim>
    -class QGaussChebyshev< dim >

    Gauss-Chebyshev quadrature rules integrate the weighted product $\int_{-1}^1 f(x) w(x) dx$ with weight given by: $w(x) = 1/\sqrt{1-x^2}$. The nodes and weights are known analytically, and are exact for monomials up to the order $2n-1$, where $n$ is the number of quadrature points. Here we rescale the quadrature formula so that it is defined on the interval $[0,1]$ instead of $[-1,1]$. So the quadrature formulas integrate exactly the integral $\int_0^1 f(x) w(x) dx$ with the weight: $w(x) =
+class QGaussChebyshev< dim ></div><p>Gauss-Chebyshev quadrature rules integrate the weighted product <picture><source srcset=$\int_{-1}^1 f(x) w(x) dx$ with weight given by: $w(x) = 1/\sqrt{1-x^2}$. The nodes and weights are known analytically, and are exact for monomials up to the order $2n-1$, where $n$ is the number of quadrature points. Here we rescale the quadrature formula so that it is defined on the interval $[0,1]$ instead of $[-1,1]$. So the quadrature formulas integrate exactly the integral $\int_0^1 f(x) w(x) dx$ with the weight: $w(x) =
 1/\sqrt{x(1-x)}$. For details see: M. Abramowitz & I.A. Stegun: Handbook of Mathematical Functions, par. 25.4.38

    Definition at line 493 of file quadrature_lib.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussLobatto.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussLobatto.html 2024-01-30 03:04:44.248813065 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussLobatto.html 2024-01-30 03:04:44.248813065 +0000 @@ -198,7 +198,7 @@ class QGaussLobatto< dim >

    The Gauss-Lobatto family of quadrature rules for numerical integration.

    This modification of the Gauss quadrature uses the two interval end points as well. Being exact for polynomials of degree 2n-3, this formula is suboptimal by two degrees.

    The quadrature points are interval end points plus the roots of the derivative of the Legendre polynomial Pn-1 of degree n-1. The quadrature weights are 2/(n(n-1)(Pn-1(xi)2).

    -
    Note
    This implementation has not been optimized concerning numerical stability and efficiency. It can be easily adapted to the general case of Gauss-Lobatto-Jacobi-Bouzitat quadrature with arbitrary parameters $\alpha$, $\beta$, of which the Gauss-Lobatto-Legendre quadrature ( $\alpha
+<dl class=
    Note
    This implementation has not been optimized concerning numerical stability and efficiency. It can be easily adapted to the general case of Gauss-Lobatto-Jacobi-Bouzitat quadrature with arbitrary parameters $\alpha$, $\beta$, of which the Gauss-Lobatto-Legendre quadrature ( $\alpha
 = \beta = 0$) is a special case.
    See also
    http://en.wikipedia.org/wiki/Handbook_of_Mathematical_Functions
    /usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussLobattoChebyshev.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussLobattoChebyshev.html 2024-01-30 03:04:44.280813331 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussLobattoChebyshev.html 2024-01-30 03:04:44.280813331 +0000 @@ -197,7 +197,7 @@

    Detailed Description

    template<int dim>
    -class QGaussLobattoChebyshev< dim >

    Gauss-Lobatto-Chebyshev quadrature rules integrate the weighted product $\int_{-1}^1 f(x) w(x) dx$ with weight given by: $w(x) = 1/\sqrt{1-x^2}$, with the additional constraint that two of the quadrature points are located at the endpoints of the quadrature interval. The nodes and weights are known analytically, and are exact for monomials up to the order $2n-3$, where $n$ is the number of quadrature points. Here we rescale the quadrature formula so that it is defined on the interval $[0,1]$ instead of $[-1,1]$. So the quadrature formulas integrate exactly the integral $\int_0^1 f(x) w(x) dx$ with the weight: $w(x) = 1/\sqrt{x(1-x)}$. For details see: M. Abramowitz & I.A. Stegun: Handbook of Mathematical Functions, par. 25.4.40

    +class QGaussLobattoChebyshev< dim >

    Gauss-Lobatto-Chebyshev quadrature rules integrate the weighted product $\int_{-1}^1 f(x) w(x) dx$ with weight given by: $w(x) = 1/\sqrt{1-x^2}$, with the additional constraint that two of the quadrature points are located at the endpoints of the quadrature interval. The nodes and weights are known analytically, and are exact for monomials up to the order $2n-3$, where $n$ is the number of quadrature points. Here we rescale the quadrature formula so that it is defined on the interval $[0,1]$ instead of $[-1,1]$. So the quadrature formulas integrate exactly the integral $\int_0^1 f(x) w(x) dx$ with the weight: $w(x) = 1/\sqrt{x(1-x)}$. For details see: M. Abramowitz & I.A. Stegun: Handbook of Mathematical Functions, par. 25.4.40

    Definition at line 561 of file quadrature_lib.h.

    Member Typedef Documentation

    /usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussLog.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussLog.html 2024-01-30 03:04:44.308813565 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussLog.html 2024-01-30 03:04:44.308813565 +0000 @@ -203,8 +203,8 @@

    Detailed Description

    template<int dim>
    -class QGaussLog< dim >

    A class for Gauss quadrature with logarithmic weighting function. This formula is used to integrate $\ln|x|\;f(x)$ on the interval $[0,1]$, where $f$ is a smooth function without singularities. The collection of quadrature points and weights has been obtained using Numerical Recipes.

    -

    Notice that only the function $f(x)$ should be provided, i.e., $\int_0^1
+class QGaussLog< dim ></div><p>A class for Gauss quadrature with logarithmic weighting function. This formula is used to integrate <picture><source srcset=$\ln|x|\;f(x)$ on the interval $[0,1]$, where $f$ is a smooth function without singularities. The collection of quadrature points and weights has been obtained using Numerical Recipes.

    +

    Notice that only the function $f(x)$ should be provided, i.e., $\int_0^1
 f(x) \ln|x| dx = \sum_{i=0}^N w_i f(q_i)$. Setting the revert flag to true at construction time switches the weight from $\ln|x|$ to $\ln|1-x|$.

    The weights and functions have been tabulated up to order 12.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussLogR.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussLogR.html 2024-01-30 03:04:44.340813831 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussLogR.html 2024-01-30 03:04:44.340813831 +0000 @@ -198,15 +198,15 @@

    Detailed Description

    template<int dim>
    -class QGaussLogR< dim >

    A class for Gauss quadrature with arbitrary logarithmic weighting function. This formula is used to integrate $\ln(|x-x_0|/\alpha)\;f(x)$ on the interval $[0,1]$, where $f$ is a smooth function without singularities, and $x_0$ and $\alpha$ are given at construction time, and are the location of the singularity $x_0$ and an arbitrary scaling factor in the singularity.

    -

    You have to make sure that the point $x_0$ is not one of the Gauss quadrature points of order $N$, otherwise an exception is thrown, since the quadrature weights cannot be computed correctly.

    +class QGaussLogR< dim >

    A class for Gauss quadrature with arbitrary logarithmic weighting function. This formula is used to integrate $\ln(|x-x_0|/\alpha)\;f(x)$ on the interval $[0,1]$, where $f$ is a smooth function without singularities, and $x_0$ and $\alpha$ are given at construction time, and are the location of the singularity $x_0$ and an arbitrary scaling factor in the singularity.

    +

    You have to make sure that the point $x_0$ is not one of the Gauss quadrature points of order $N$, otherwise an exception is thrown, since the quadrature weights cannot be computed correctly.

    This quadrature formula is rather expensive, since it uses internally two Gauss quadrature formulas of order n to integrate the nonsingular part of the factor, and two GaussLog quadrature formulas to integrate on the separate segments $[0,x_0]$ and $[x_0,1]$. If the singularity is one of the extremes and the factor alpha is 1, then this quadrature is the same as QGaussLog.

    The last argument from the constructor allows you to use this quadrature rule in one of two possible ways:

    \[ \int_0^1 g(x) dx = \int_0^1 f(x)
 \ln\left(\frac{|x-x_0|}{\alpha}\right) dx = \sum_{i=0}^N w_i g(q_i) =
 \sum_{i=0}^N \bar{w}_i f(q_i) \]

    -

    Which one of the two sets of weights is provided, can be selected by the factor_out_singular_weight parameter. If it is false (the default), then the $\bar{w}_i$ weights are computed, and you should provide only the smooth function $f(x)$, since the singularity is included inside the quadrature. If the parameter is set to true, then the singularity is factored out of the quadrature formula, and you should provide a function $g(x)$, which should at least be similar to $\ln(|x-x_0|/\alpha)$.

    +

    Which one of the two sets of weights is provided, can be selected by the factor_out_singular_weight parameter. If it is false (the default), then the $\bar{w}_i$ weights are computed, and you should provide only the smooth function $f(x)$, since the singularity is included inside the quadrature. If the parameter is set to true, then the singularity is factored out of the quadrature formula, and you should provide a function $g(x)$, which should at least be similar to $\ln(|x-x_0|/\alpha)$.

    Notice that this quadrature rule is worthless if you try to use it for regular functions once you factored out the singularity.

    The weights and functions have been tabulated up to order 12.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussOneOverR.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussOneOverR.html 2024-01-30 03:04:44.372814098 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussOneOverR.html 2024-01-30 03:04:44.372814098 +0000 @@ -203,9 +203,9 @@

    Detailed Description

    template<int dim>
    -class QGaussOneOverR< dim >

    A class for Gauss quadrature with $1/R$ weighting function. This formula can be used to integrate $1/R \ f(x)$ on the reference element $[0,1]^2$, where $f$ is a smooth function without singularities, and $R$ is the distance from the point $x$ to the vertex $\xi$, given at construction time by specifying its index. Notice that this distance is evaluated in the reference element.

    -

    This quadrature formula is obtained from two QGauss quadrature formulas, upon transforming them into polar coordinate system centered at the singularity, and then again into another reference element. This allows for the singularity to be cancelled by part of the Jacobian of the transformation, which contains $R$. In practice the reference element is transformed into a triangle by collapsing one of the sides adjacent to the singularity. The Jacobian of this transformation contains $R$, which is removed before scaling the original quadrature, and this process is repeated for the next half element.

    -

    Upon construction it is possible to specify whether we want the singularity removed, or not. In other words, this quadrature can be used to integrate $g(x) = 1/R\ f(x)$, or simply $f(x)$, with the $1/R$ factor already included in the quadrature weights.

    +class QGaussOneOverR< dim >

    A class for Gauss quadrature with $1/R$ weighting function. This formula can be used to integrate $1/R \ f(x)$ on the reference element $[0,1]^2$, where $f$ is a smooth function without singularities, and $R$ is the distance from the point $x$ to the vertex $\xi$, given at construction time by specifying its index. Notice that this distance is evaluated in the reference element.

    +

    This quadrature formula is obtained from two QGauss quadrature formulas, upon transforming them into polar coordinate system centered at the singularity, and then again into another reference element. This allows for the singularity to be cancelled by part of the Jacobian of the transformation, which contains $R$. In practice the reference element is transformed into a triangle by collapsing one of the sides adjacent to the singularity. The Jacobian of this transformation contains $R$, which is removed before scaling the original quadrature, and this process is repeated for the next half element.

    +

    Upon construction it is possible to specify whether we want the singularity removed, or not. In other words, this quadrature can be used to integrate $g(x) = 1/R\ f(x)$, or simply $f(x)$, with the $1/R$ factor already included in the quadrature weights.

    Definition at line 291 of file quadrature_lib.h.

    Member Typedef Documentation

    /usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussRadauChebyshev.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussRadauChebyshev.html 2024-01-30 03:04:44.404814365 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussRadauChebyshev.html 2024-01-30 03:04:44.404814365 +0000 @@ -205,7 +205,7 @@

    Detailed Description

    template<int dim>
    -class QGaussRadauChebyshev< dim >

    Gauss-Radau-Chebyshev quadrature rules integrate the weighted product $\int_{-1}^1 f(x) w(x) dx$ with weight given by: $w(x) = 1/\sqrt{1-x^2}$ with the additional constraint that a quadrature point lies at one of the two extrema of the interval. The nodes and weights are known analytically, and are exact for monomials up to the order $2n-2$, where $n$ is the number of quadrature points. Here we rescale the quadrature formula so that it is defined on the interval $[0,1]$ instead of $[-1,1]$. So the quadrature formulas integrate exactly the integral $\int_0^1 f(x) w(x) dx$ with the weight: $w(x) = 1/\sqrt{x(1-x)}$. By default the quadrature is constructed with the left endpoint as quadrature node, but the quadrature node can be imposed at the right endpoint through the variable ep that can assume the values left or right.

    +class QGaussRadauChebyshev< dim >

    Gauss-Radau-Chebyshev quadrature rules integrate the weighted product $\int_{-1}^1 f(x) w(x) dx$ with weight given by: $w(x) = 1/\sqrt{1-x^2}$ with the additional constraint that a quadrature point lies at one of the two extrema of the interval. The nodes and weights are known analytically, and are exact for monomials up to the order $2n-2$, where $n$ is the number of quadrature points. Here we rescale the quadrature formula so that it is defined on the interval $[0,1]$ instead of $[-1,1]$. So the quadrature formulas integrate exactly the integral $\int_0^1 f(x) w(x) dx$ with the weight: $w(x) = 1/\sqrt{x(1-x)}$. By default the quadrature is constructed with the left endpoint as quadrature node, but the quadrature node can be imposed at the right endpoint through the variable ep that can assume the values left or right.

    Definition at line 516 of file quadrature_lib.h.

    Member Typedef Documentation

    /usr/share/doc/packages/dealii/doxygen/deal.II/classQR.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classQR.html 2024-01-30 03:04:44.428814564 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classQR.html 2024-01-30 03:04:44.428814564 +0000 @@ -333,28 +333,28 @@

    Remove first column and update QR factorization.

    -

    Starting from the given QR decomposition $QR= A = [a_1\,\dots a_n], \quad a_i \in {\mathbb R}^m$ we aim at computing factorization of $\tilde Q \tilde R= \tilde A = [a_2\,\dots a_n], \quad a_i \in {\mathbb
-R}^m$.

    -

    The standard approach is to partition $R$ as

    -\[
+<p>Starting from the given <a class=QR decomposition $QR= A = [a_1\,\dots a_n], \quad a_i \in {\mathbb R}^m$ we aim at computing factorization of $\tilde Q \tilde R= \tilde A = [a_2\,\dots a_n], \quad a_i \in {\mathbb
+R}^m$.

    +

    The standard approach is to partition $R$ as

    +\[
 R =
 \begin{bmatrix}
 r_{11} & w^T \\
 0      & R_{33}
 \end{bmatrix}
-\] +\]" src="form_1771.png"/>

    It then follows that

    -\[
+<picture><source srcset=\[
 Q^T \tilde A =
 \begin{bmatrix}
 0 & w^T \\
 0 & R_{33}
 \end{bmatrix}
-\] +\]" src="form_1772.png"/>

    is upper Hessenberg where unwanted sub-diagonal elements can be zeroed by a sequence of Givens rotations.

    -

    Note that $\tilde R^T \tilde R = \tilde A^T \tilde A$, where the RHS is included in $A^T A = R^T R$. Therefore $\tilde R$ can be obtained by Cholesky decomposition.

    +

    Note that $\tilde R^T \tilde R = \tilde A^T \tilde A$, where the RHS is included in $A^T A = R^T R$. Therefore $\tilde R$ can be obtained by Cholesky decomposition.

    Implements BaseQR< VectorType >.

    @@ -388,7 +388,7 @@
    -

    Set $y = Qx$. The size of $x$ should be consistent with the size of the R matrix.

    +

    Set $y = Qx$. The size of $x$ should be consistent with the size of the R matrix.

    Implements BaseQR< VectorType >.

    @@ -422,7 +422,7 @@
    -

    Set $y = Q^Tx$. The size of $x$ should be consistent with the size of column vectors.

    +

    Set $y = Q^Tx$. The size of $x$ should be consistent with the size of column vectors.

    Implements BaseQR< VectorType >.

    @@ -456,7 +456,7 @@
    -

    Set $y = QRx$. The size of $x$ should be consistent with the size of the R matrix.

    +

    Set $y = QRx$. The size of $x$ should be consistent with the size of the R matrix.

    Implements BaseQR< VectorType >.

    @@ -490,7 +490,7 @@
    -

    Set $y = R^T Q^Tx$. The size of $x$ should be consistent with the size of column vectors.

    +

    Set $y = R^T Q^Tx$. The size of $x$ should be consistent with the size of column vectors.

    Implements BaseQR< VectorType >.

    @@ -610,7 +610,7 @@
    -

    Solve $Rx=y$. Vectors x and y should be consistent with the current size of the subspace. If transpose is true, $R^Tx=y$ is solved instead.

    +

    Solve $Rx=y$. Vectors x and y should be consistent with the current size of the subspace. If transpose is true, $R^Tx=y$ is solved instead.

    @@ -671,7 +671,7 @@
    -

    Compute $y=Hx$ where $H$ is the matrix formed by the column vectors stored by this object.

    +

    Compute $y=Hx$ where $H$ is the matrix formed by the column vectors stored by this object.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classQTelles.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classQTelles.html 2024-01-30 03:04:44.460814831 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classQTelles.html 2024-01-30 03:04:44.460814831 +0000 @@ -223,7 +223,7 @@ \end{align*}" src="form_741.png"/>

    Since the library assumes $[0,1]$ as reference interval, we will map these values on the proper reference interval in the implementation.

    -

    This variable change can be used to integrate singular integrals. One example is $f(x)/|x-x_0|$ on the reference interval $[0,1]$, where $x_0$ is given at construction time, and is the location of the singularity $x_0$, and $f(x)$ is a smooth non singular function.

    +

    This variable change can be used to integrate singular integrals. One example is $f(x)/|x-x_0|$ on the reference interval $[0,1]$, where $x_0$ is given at construction time, and is the location of the singularity $x_0$, and $f(x)$ is a smooth non singular function.

    Singular quadrature formula are rather expensive, nevertheless Telles' quadrature formula are much easier to compute with respect to other singular integration techniques as Lachat-Watson.

    We have implemented the case for $dim = 1$. When we deal the case $dim >1$ we have computed the quadrature formula has a tensorial product of one dimensional Telles' quadrature formulas considering the different components of the singularity.

    The weights and functions for Gauss Legendre formula have been tabulated up to order 12.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classQWitherdenVincentSimplex.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classQWitherdenVincentSimplex.html 2024-01-30 03:04:44.492815098 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classQWitherdenVincentSimplex.html 2024-01-30 03:04:44.492815098 +0000 @@ -200,7 +200,7 @@

    Detailed Description

    template<int dim>
    class QWitherdenVincentSimplex< dim >

    Witherden-Vincent rules for simplex entities.

    -

    Like QGauss, users should specify a number n_points_1d as an indication of what polynomial degree to be integrated exactly (e.g., for $n$ points, the rule can integrate polynomials of degree $2 n - 1$ exactly). Additionally, since these rules were derived for simplices, there are also even-ordered rules (i.e., they integrate polynomials of degree $2 n$) available which do not have analogous 1d rules.

    +

    Like QGauss, users should specify a number n_points_1d as an indication of what polynomial degree to be integrated exactly (e.g., for $n$ points, the rule can integrate polynomials of degree $2 n - 1$ exactly). Additionally, since these rules were derived for simplices, there are also even-ordered rules (i.e., they integrate polynomials of degree $2 n$) available which do not have analogous 1d rules.

    The given value for n_points_1d = 1, 2, 3, 4, 5, 6, 7 (where the last two are only implemented in 2d) results in the following number of quadrature points in 2d and 3d:

    -

    Compute the value of the $i$-th linear shape function at location $\xi$ for the current reference-cell type.

    +

    Compute the value of the $i$-th linear shape function at location $\xi$ for the current reference-cell type.

    Definition at line 2371 of file reference_cell.h.

    @@ -503,7 +503,7 @@
    -

    Compute the gradient of the $i$-th linear shape function at location $\xi$ for the current reference-cell type.

    +

    Compute the gradient of the $i$-th linear shape function at location $\xi$ for the current reference-cell type.

    Definition at line 2460 of file reference_cell.h.

    @@ -547,7 +547,7 @@
    -

    Return a default linear mapping matching the current reference cell. If this reference cell is a hypercube, then the returned mapping is a MappingQ1; otherwise, it is an object of type MappingFE initialized with FE_SimplexP (if the reference cell is a triangle or tetrahedron), with FE_PyramidP (if the reference cell is a pyramid), or with FE_WedgeP (if the reference cell is a wedge). In other words, the term "linear" in the name of the function has to be understood as $d$-linear (i.e., bilinear or trilinear) for some of the coordinate directions.

    +

    Return a default linear mapping matching the current reference cell. If this reference cell is a hypercube, then the returned mapping is a MappingQ1; otherwise, it is an object of type MappingFE initialized with FE_SimplexP (if the reference cell is a triangle or tetrahedron), with FE_PyramidP (if the reference cell is a pyramid), or with FE_WedgeP (if the reference cell is a wedge). In other words, the term "linear" in the name of the function has to be understood as $d$-linear (i.e., bilinear or trilinear) for some of the coordinate directions.

    Definition at line 149 of file reference_cell.cc.

    @@ -1319,7 +1319,7 @@
    -

    Return the $d$-dimensional volume of the reference cell that corresponds to the current object, where $d$ is the dimension of the space it lives in. For example, since the quadrilateral reference cell is $[0,1]^2$, its volume is one, whereas the volume of the reference triangle is 0.5 because it occupies the area $\{0 \le x,y \le 1, x+y\le 1\}$.

    +

    Return the $d$-dimensional volume of the reference cell that corresponds to the current object, where $d$ is the dimension of the space it lives in. For example, since the quadrilateral reference cell is $[0,1]^2$, its volume is one, whereas the volume of the reference triangle is 0.5 because it occupies the area $\{0 \le x,y \le 1, x+y\le 1\}$.

    For ReferenceCells::Vertex, the reference cell is a zero-dimensional point in a zero-dimensional space. As a consequence, one cannot meaningfully define a volume for it. The function returns one for this case, because this makes it possible to define useful quadrature rules based on the center of a reference cell and its volume.

    Definition at line 2494 of file reference_cell.h.

    @@ -1438,8 +1438,8 @@
    -

    Return $i$-th unit tangential vector of a face of the reference cell. The vectors are arranged such that the cross product between the two vectors returns the unit normal vector.

    -
    Precondition
    $i$ must be between zero and dim-1.
    +

    Return $i$-th unit tangential vector of a face of the reference cell. The vectors are arranged such that the cross product between the two vectors returns the unit normal vector.

    +
    Precondition
    $i$ must be between zero and dim-1.

    Definition at line 2667 of file reference_cell.h.

    @@ -1791,7 +1791,7 @@ const bool legacy_format&#href_anchor"memdoc"> -

    Given a set of node indices of the form $(i)$ or $(i,j)$ or $(i,j,k)$ (depending on whether the reference cell is in 1d, 2d, or 3d), return the index the VTK format uses for this node for cells that are subdivided as many times in each of the coordinate directions as described by the second argument. For a uniformly subdivided cell, the second argument is an array whose elements will all be equal.

    +

    Given a set of node indices of the form $(i)$ or $(i,j)$ or $(i,j,k)$ (depending on whether the reference cell is in 1d, 2d, or 3d), return the index the VTK format uses for this node for cells that are subdivided as many times in each of the coordinate directions as described by the second argument. For a uniformly subdivided cell, the second argument is an array whose elements will all be equal.

    The last argument, legacy_format, indicates whether to use the old, VTK legacy format (when true) or the new, VTU format (when false).

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverArnoldi.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverArnoldi.html 2024-01-30 03:04:44.608816065 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverArnoldi.html 2024-01-30 03:04:44.608816065 +0000 @@ -242,7 +242,7 @@
    -

    Composite method that solves the eigensystem $Ax=\lambda x$. The eigenvector sent in has to have at least one element that we can use as a template when resizing, since we do not know the parameters of the specific vector class used (i.e. local_dofs for MPI vectors). However, while copying eigenvectors, at least twice the memory size of eigenvectors is being used (and can be more). To avoid doing this, the fairly standard calling sequence executed here is used: Set up matrices for solving; Actually solve the system; Gather the solution(s).

    +

    Composite method that solves the eigensystem $Ax=\lambda x$. The eigenvector sent in has to have at least one element that we can use as a template when resizing, since we do not know the parameters of the specific vector class used (i.e. local_dofs for MPI vectors). However, while copying eigenvectors, at least twice the memory size of eigenvectors is being used (and can be more). To avoid doing this, the fairly standard calling sequence executed here is used: Set up matrices for solving; Actually solve the system; Gather the solution(s).

    Note
    Note that the number of converged eigenvectors can be larger than the number of eigenvectors requested; this is due to a round off error (success) of the eigenproblem solver context. If this is found to be the case we simply do not bother with more eigenpairs than requested, but handle that it may be more than specified by ignoring any extras. By default one eigenvector/eigenvalue pair is computed.

    This is declared here to make it possible to take a std::vector of different PETScWrappers vector types

    @@ -683,7 +683,7 @@
    -

    Initialize solver for the linear system $Ax=\lambda x$. (Note: this is required before calling solve ())

    +

    Initialize solver for the linear system $Ax=\lambda x$. (Note: this is required before calling solve ())

    Definition at line 77 of file slepc_solver.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverBase.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverBase.html 2024-01-30 03:04:44.636816297 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverBase.html 2024-01-30 03:04:44.636816297 +0000 @@ -254,7 +254,7 @@ const unsigned int n_eigenpairs = 1&#href_anchor"memdoc"> -

    Composite method that solves the eigensystem $Ax=\lambda x$. The eigenvector sent in has to have at least one element that we can use as a template when resizing, since we do not know the parameters of the specific vector class used (i.e. local_dofs for MPI vectors). However, while copying eigenvectors, at least twice the memory size of eigenvectors is being used (and can be more). To avoid doing this, the fairly standard calling sequence executed here is used: Set up matrices for solving; Actually solve the system; Gather the solution(s).

    +

    Composite method that solves the eigensystem $Ax=\lambda x$. The eigenvector sent in has to have at least one element that we can use as a template when resizing, since we do not know the parameters of the specific vector class used (i.e. local_dofs for MPI vectors). However, while copying eigenvectors, at least twice the memory size of eigenvectors is being used (and can be more). To avoid doing this, the fairly standard calling sequence executed here is used: Set up matrices for solving; Actually solve the system; Gather the solution(s).

    Note
    Note that the number of converged eigenvectors can be larger than the number of eigenvectors requested; this is due to a round off error (success) of the eigenproblem solver context. If this is found to be the case we simply do not bother with more eigenpairs than requested, but handle that it may be more than specified by ignoring any extras. By default one eigenvector/eigenvalue pair is computed.

    This is declared here to make it possible to take a std::vector of different PETScWrappers vector types

    @@ -625,7 +625,7 @@
    -

    Initialize solver for the linear system $Ax=\lambda x$. (Note: this is required before calling solve ())

    +

    Initialize solver for the linear system $Ax=\lambda x$. (Note: this is required before calling solve ())

    Definition at line 77 of file slepc_solver.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverGeneralizedDavidson.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverGeneralizedDavidson.html 2024-01-30 03:04:44.664816530 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverGeneralizedDavidson.html 2024-01-30 03:04:44.664816530 +0000 @@ -242,7 +242,7 @@
    -

    Composite method that solves the eigensystem $Ax=\lambda x$. The eigenvector sent in has to have at least one element that we can use as a template when resizing, since we do not know the parameters of the specific vector class used (i.e. local_dofs for MPI vectors). However, while copying eigenvectors, at least twice the memory size of eigenvectors is being used (and can be more). To avoid doing this, the fairly standard calling sequence executed here is used: Set up matrices for solving; Actually solve the system; Gather the solution(s).

    +

    Composite method that solves the eigensystem $Ax=\lambda x$. The eigenvector sent in has to have at least one element that we can use as a template when resizing, since we do not know the parameters of the specific vector class used (i.e. local_dofs for MPI vectors). However, while copying eigenvectors, at least twice the memory size of eigenvectors is being used (and can be more). To avoid doing this, the fairly standard calling sequence executed here is used: Set up matrices for solving; Actually solve the system; Gather the solution(s).

    Note
    Note that the number of converged eigenvectors can be larger than the number of eigenvectors requested; this is due to a round off error (success) of the eigenproblem solver context. If this is found to be the case we simply do not bother with more eigenpairs than requested, but handle that it may be more than specified by ignoring any extras. By default one eigenvector/eigenvalue pair is computed.

    This is declared here to make it possible to take a std::vector of different PETScWrappers vector types

    @@ -683,7 +683,7 @@
    -

    Initialize solver for the linear system $Ax=\lambda x$. (Note: this is required before calling solve ())

    +

    Initialize solver for the linear system $Ax=\lambda x$. (Note: this is required before calling solve ())

    Definition at line 77 of file slepc_solver.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverJacobiDavidson.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverJacobiDavidson.html 2024-01-30 03:04:44.692816764 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverJacobiDavidson.html 2024-01-30 03:04:44.692816764 +0000 @@ -242,7 +242,7 @@
    -

    Composite method that solves the eigensystem $Ax=\lambda x$. The eigenvector sent in has to have at least one element that we can use as a template when resizing, since we do not know the parameters of the specific vector class used (i.e. local_dofs for MPI vectors). However, while copying eigenvectors, at least twice the memory size of eigenvectors is being used (and can be more). To avoid doing this, the fairly standard calling sequence executed here is used: Set up matrices for solving; Actually solve the system; Gather the solution(s).

    +

    Composite method that solves the eigensystem $Ax=\lambda x$. The eigenvector sent in has to have at least one element that we can use as a template when resizing, since we do not know the parameters of the specific vector class used (i.e. local_dofs for MPI vectors). However, while copying eigenvectors, at least twice the memory size of eigenvectors is being used (and can be more). To avoid doing this, the fairly standard calling sequence executed here is used: Set up matrices for solving; Actually solve the system; Gather the solution(s).

    Note
    Note that the number of converged eigenvectors can be larger than the number of eigenvectors requested; this is due to a round off error (success) of the eigenproblem solver context. If this is found to be the case we simply do not bother with more eigenpairs than requested, but handle that it may be more than specified by ignoring any extras. By default one eigenvector/eigenvalue pair is computed.

    This is declared here to make it possible to take a std::vector of different PETScWrappers vector types

    @@ -683,7 +683,7 @@
    -

    Initialize solver for the linear system $Ax=\lambda x$. (Note: this is required before calling solve ())

    +

    Initialize solver for the linear system $Ax=\lambda x$. (Note: this is required before calling solve ())

    Definition at line 77 of file slepc_solver.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverKrylovSchur.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverKrylovSchur.html 2024-01-30 03:04:44.720816997 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverKrylovSchur.html 2024-01-30 03:04:44.720816997 +0000 @@ -242,7 +242,7 @@
    -

    Composite method that solves the eigensystem $Ax=\lambda x$. The eigenvector sent in has to have at least one element that we can use as a template when resizing, since we do not know the parameters of the specific vector class used (i.e. local_dofs for MPI vectors). However, while copying eigenvectors, at least twice the memory size of eigenvectors is being used (and can be more). To avoid doing this, the fairly standard calling sequence executed here is used: Set up matrices for solving; Actually solve the system; Gather the solution(s).

    +

    Composite method that solves the eigensystem $Ax=\lambda x$. The eigenvector sent in has to have at least one element that we can use as a template when resizing, since we do not know the parameters of the specific vector class used (i.e. local_dofs for MPI vectors). However, while copying eigenvectors, at least twice the memory size of eigenvectors is being used (and can be more). To avoid doing this, the fairly standard calling sequence executed here is used: Set up matrices for solving; Actually solve the system; Gather the solution(s).

    Note
    Note that the number of converged eigenvectors can be larger than the number of eigenvectors requested; this is due to a round off error (success) of the eigenproblem solver context. If this is found to be the case we simply do not bother with more eigenpairs than requested, but handle that it may be more than specified by ignoring any extras. By default one eigenvector/eigenvalue pair is computed.

    This is declared here to make it possible to take a std::vector of different PETScWrappers vector types

    @@ -683,7 +683,7 @@
    -

    Initialize solver for the linear system $Ax=\lambda x$. (Note: this is required before calling solve ())

    +

    Initialize solver for the linear system $Ax=\lambda x$. (Note: this is required before calling solve ())

    Definition at line 77 of file slepc_solver.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverLAPACK.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverLAPACK.html 2024-01-30 03:04:44.748817231 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverLAPACK.html 2024-01-30 03:04:44.748817231 +0000 @@ -242,7 +242,7 @@
    -

    Composite method that solves the eigensystem $Ax=\lambda x$. The eigenvector sent in has to have at least one element that we can use as a template when resizing, since we do not know the parameters of the specific vector class used (i.e. local_dofs for MPI vectors). However, while copying eigenvectors, at least twice the memory size of eigenvectors is being used (and can be more). To avoid doing this, the fairly standard calling sequence executed here is used: Set up matrices for solving; Actually solve the system; Gather the solution(s).

    +

    Composite method that solves the eigensystem $Ax=\lambda x$. The eigenvector sent in has to have at least one element that we can use as a template when resizing, since we do not know the parameters of the specific vector class used (i.e. local_dofs for MPI vectors). However, while copying eigenvectors, at least twice the memory size of eigenvectors is being used (and can be more). To avoid doing this, the fairly standard calling sequence executed here is used: Set up matrices for solving; Actually solve the system; Gather the solution(s).

    Note
    Note that the number of converged eigenvectors can be larger than the number of eigenvectors requested; this is due to a round off error (success) of the eigenproblem solver context. If this is found to be the case we simply do not bother with more eigenpairs than requested, but handle that it may be more than specified by ignoring any extras. By default one eigenvector/eigenvalue pair is computed.

    This is declared here to make it possible to take a std::vector of different PETScWrappers vector types

    @@ -683,7 +683,7 @@
    -

    Initialize solver for the linear system $Ax=\lambda x$. (Note: this is required before calling solve ())

    +

    Initialize solver for the linear system $Ax=\lambda x$. (Note: this is required before calling solve ())

    Definition at line 77 of file slepc_solver.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverLanczos.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverLanczos.html 2024-01-30 03:04:44.776817464 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverLanczos.html 2024-01-30 03:04:44.776817464 +0000 @@ -242,7 +242,7 @@
    -

    Composite method that solves the eigensystem $Ax=\lambda x$. The eigenvector sent in has to have at least one element that we can use as a template when resizing, since we do not know the parameters of the specific vector class used (i.e. local_dofs for MPI vectors). However, while copying eigenvectors, at least twice the memory size of eigenvectors is being used (and can be more). To avoid doing this, the fairly standard calling sequence executed here is used: Set up matrices for solving; Actually solve the system; Gather the solution(s).

    +

    Composite method that solves the eigensystem $Ax=\lambda x$. The eigenvector sent in has to have at least one element that we can use as a template when resizing, since we do not know the parameters of the specific vector class used (i.e. local_dofs for MPI vectors). However, while copying eigenvectors, at least twice the memory size of eigenvectors is being used (and can be more). To avoid doing this, the fairly standard calling sequence executed here is used: Set up matrices for solving; Actually solve the system; Gather the solution(s).

    Note
    Note that the number of converged eigenvectors can be larger than the number of eigenvectors requested; this is due to a round off error (success) of the eigenproblem solver context. If this is found to be the case we simply do not bother with more eigenpairs than requested, but handle that it may be more than specified by ignoring any extras. By default one eigenvector/eigenvalue pair is computed.

    This is declared here to make it possible to take a std::vector of different PETScWrappers vector types

    @@ -683,7 +683,7 @@
    -

    Initialize solver for the linear system $Ax=\lambda x$. (Note: this is required before calling solve ())

    +

    Initialize solver for the linear system $Ax=\lambda x$. (Note: this is required before calling solve ())

    Definition at line 77 of file slepc_solver.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverPower.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverPower.html 2024-01-30 03:04:44.804817698 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverPower.html 2024-01-30 03:04:44.804817698 +0000 @@ -242,7 +242,7 @@
    -

    Composite method that solves the eigensystem $Ax=\lambda x$. The eigenvector sent in has to have at least one element that we can use as a template when resizing, since we do not know the parameters of the specific vector class used (i.e. local_dofs for MPI vectors). However, while copying eigenvectors, at least twice the memory size of eigenvectors is being used (and can be more). To avoid doing this, the fairly standard calling sequence executed here is used: Set up matrices for solving; Actually solve the system; Gather the solution(s).

    +

    Composite method that solves the eigensystem $Ax=\lambda x$. The eigenvector sent in has to have at least one element that we can use as a template when resizing, since we do not know the parameters of the specific vector class used (i.e. local_dofs for MPI vectors). However, while copying eigenvectors, at least twice the memory size of eigenvectors is being used (and can be more). To avoid doing this, the fairly standard calling sequence executed here is used: Set up matrices for solving; Actually solve the system; Gather the solution(s).

    Note
    Note that the number of converged eigenvectors can be larger than the number of eigenvectors requested; this is due to a round off error (success) of the eigenproblem solver context. If this is found to be the case we simply do not bother with more eigenpairs than requested, but handle that it may be more than specified by ignoring any extras. By default one eigenvector/eigenvalue pair is computed.

    This is declared here to make it possible to take a std::vector of different PETScWrappers vector types

    @@ -683,7 +683,7 @@
    -

    Initialize solver for the linear system $Ax=\lambda x$. (Note: this is required before calling solve ())

    +

    Initialize solver for the linear system $Ax=\lambda x$. (Note: this is required before calling solve ())

    Definition at line 77 of file slepc_solver.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1ARKode.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1ARKode.html 2024-01-30 03:04:44.848818064 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1ARKode.html 2024-01-30 03:04:44.848818064 +0000 @@ -198,85 +198,85 @@

    The class ARKode is a wrapper to SUNDIALS variable-step, embedded, additive Runge-Kutta solver which is a general purpose solver for systems of ordinary differential equations characterized by the presence of both fast and slow dynamics.

    Fast dynamics are treated implicitly, and slow dynamics are treated explicitly, using nested families of implicit and explicit Runge-Kutta solvers.

    Citing directly from ARKode documentation:

    -

    ARKode solves ODE initial value problems (IVPs) in $R^N$. These problems should be posed in explicit form as

    +

    ARKode solves ODE initial value problems (IVPs) in $R^N$. These problems should be posed in explicit form as

    -\[
+<picture><source srcset=\[
     M\dot y = f_E(t, y) + f_I (t, y), \qquad y(t_0) = y_0.
-  \] + \]" src="form_2555.png"/>

    -

    Here, $t$ is the independent variable (e.g. time), and the dependent variables are given by $y \in R^N$, and we use notation $\dot y$ to denote $dy/dt$. $M$ is a user-supplied nonsingular operator from $R^N \to R^N$. This operator may depend on $t$ but not on $y$.

    -

    For standard systems of ordinary differential equations and for problems arising from the spatial semi-discretization of partial differential equations using finite difference or finite volume methods, $M$ is typically the identity matrix, $I$. For PDEs using a finite-element spatial semi-discretization $M$ is typically a well-conditioned mass matrix.

    +

    Here, $t$ is the independent variable (e.g. time), and the dependent variables are given by $y \in R^N$, and we use notation $\dot y$ to denote $dy/dt$. $M$ is a user-supplied nonsingular operator from $R^N \to R^N$. This operator may depend on $t$ but not on $y$.

    +

    For standard systems of ordinary differential equations and for problems arising from the spatial semi-discretization of partial differential equations using finite difference or finite volume methods, $M$ is typically the identity matrix, $I$. For PDEs using a finite-element spatial semi-discretization $M$ is typically a well-conditioned mass matrix.

    The two right-hand side functions may be described as:

    -

    ARKode may be used to solve stiff, nonstiff and multi-rate problems. Roughly speaking, stiffness is characterized by the presence of at least one rapidly damped mode, whose time constant is small compared to the time scale of the solution itself. In the implicit/explicit (ImEx) splitting above, these stiff components should be included in the right-hand side function $f_I (t, y)$.

    -

    For multi-rate problems, a user should provide both of the functions $f_E$ and $f_I$ that define the IVP system.

    -

    For nonstiff problems, only $f_E$ should be provided, and $f_I$ is assumed to be zero, i.e. the system reduces to the non-split IVP:

    +

    ARKode may be used to solve stiff, nonstiff and multi-rate problems. Roughly speaking, stiffness is characterized by the presence of at least one rapidly damped mode, whose time constant is small compared to the time scale of the solution itself. In the implicit/explicit (ImEx) splitting above, these stiff components should be included in the right-hand side function $f_I (t, y)$.

    +

    For multi-rate problems, a user should provide both of the functions $f_E$ and $f_I$ that define the IVP system.

    +

    For nonstiff problems, only $f_E$ should be provided, and $f_I$ is assumed to be zero, i.e. the system reduces to the non-split IVP:

    -\[
+<picture><source srcset=\[
     M\dot y = f_E(t, y), \qquad y(t_0) = y_0.
-  \] + \]" src="form_2565.png"/>

    -

    In this scenario, the ARK methods reduce to classical explicit Runge-Kutta methods (ERK). For these classes of methods, ARKode allows orders of accuracy $q = \{2, 3, 4, 5, 6, 8\}$, with embeddings of orders $p = \{1,
-  2, 3, 4, 5, 7\}$. These default to the Heun-Euler-2-1-2, Bogacki-Shampine-4-2-3, Zonneveld-5-3-4, Cash-Karp-6-4-5, Verner-8-5-6 and Fehlberg-13-7-8 methods, respectively.

    -

    Finally, for stiff (linear or nonlinear) problems the user may provide only $f_I$, implying that $f_E = 0$, so that the system reduces to the non-split IVP

    +

    In this scenario, the ARK methods reduce to classical explicit Runge-Kutta methods (ERK). For these classes of methods, ARKode allows orders of accuracy $q = \{2, 3, 4, 5, 6, 8\}$, with embeddings of orders $p = \{1,
+  2, 3, 4, 5, 7\}$. These default to the Heun-Euler-2-1-2, Bogacki-Shampine-4-2-3, Zonneveld-5-3-4, Cash-Karp-6-4-5, Verner-8-5-6 and Fehlberg-13-7-8 methods, respectively.

    +

    Finally, for stiff (linear or nonlinear) problems the user may provide only $f_I$, implying that $f_E = 0$, so that the system reduces to the non-split IVP

    -\[
+<picture><source srcset=\[
     M\dot y = f_I(t, y), \qquad y(t_0) = y_0.
-  \] + \]" src="form_2569.png"/>

    -

    Similarly to ERK methods, in this scenario the ARK methods reduce to classical diagonally-implicit Runge-Kutta methods (DIRK). For these classes of methods, ARKode allows orders of accuracy $q = \{2, 3, 4, 5\}$, with embeddings of orders $p = \{1, 2, 3, 4\}$. These default to the SDIRK-2-1-2, ARK-4-2-3 (implicit), SDIRK-5-3-4 and ARK-8-4-5 (implicit) methods, respectively.

    +

    Similarly to ERK methods, in this scenario the ARK methods reduce to classical diagonally-implicit Runge-Kutta methods (DIRK). For these classes of methods, ARKode allows orders of accuracy $q = \{2, 3, 4, 5\}$, with embeddings of orders $p = \{1, 2, 3, 4\}$. These default to the SDIRK-2-1-2, ARK-4-2-3 (implicit), SDIRK-5-3-4 and ARK-8-4-5 (implicit) methods, respectively.

    For both DIRK and ARK methods, an implicit system of the form

    -\[
+<picture><source srcset=\[
    G(z_i) \dealcoloneq M z_i - h_n A^I_{i,i} f_I (t^I_{n,i}, z_i) - a_i = 0
-  \] + \]" src="form_2572.png"/>

    -

    must be solved for each stage $z_i , i = 1, \ldots, s$, where we have the data

    -\[
+<p> must be solved for each stage <picture><source srcset=$z_i , i = 1, \ldots, s$, where we have the data

    +\[
    a_i \dealcoloneq
    M y_{n-1} + h_n \sum_{j=1}^{i-1} [ A^E_{i,j} f_E(t^E_{n,j}, z_j)
    + A^I_{i,j} f_I (t^I_{n,j}, z_j)]
-  \] + \]" src="form_2574.png"/>

    for the ARK methods, or

    -\[
+<picture><source srcset=\[
    a_i \dealcoloneq
    M y_{n-1} + h_n \sum_{j=1}^{i-1} A^I_{i,j} f_I (t^I_{n,j}, z_j)
-  \] + \]" src="form_2575.png"/>

    -

    for the DIRK methods. Here $A^I_{i,j}$ and $A^E_{i,j}$ are the Butcher's tables for the chosen solver.

    -

    If $f_I(t,y)$ depends nonlinearly on $y$ then the systems above correspond to a nonlinear system of equations; if $f_I (t, y)$ depends linearly on $y$ then this is a linear system of equations. By specifying the flag implicit_function_is_linear, ARKode takes some shortcuts that allow a faster solution process.

    +

    for the DIRK methods. Here $A^I_{i,j}$ and $A^E_{i,j}$ are the Butcher's tables for the chosen solver.

    +

    If $f_I(t,y)$ depends nonlinearly on $y$ then the systems above correspond to a nonlinear system of equations; if $f_I (t, y)$ depends linearly on $y$ then this is a linear system of equations. By specifying the flag implicit_function_is_linear, ARKode takes some shortcuts that allow a faster solution process.

    For systems of either type, ARKode allows a choice of solution strategy. The default solver choice is a variant of Newton's method,

    -\[
+<picture><source srcset=\[
    z_i^{m+1} = z_i^m +\delta^{m+1},
-  \] + \]" src="form_2579.png"/>

    -

    where $m$ is the Newton step index, and the Newton update $\delta^{m+1}$ requires the solution of the linear Newton system

    -\[
+<p> where <picture><source srcset=$m$ is the Newton step index, and the Newton update $\delta^{m+1}$ requires the solution of the linear Newton system

    +\[
    N(z_i^m) \delta^{m+1} = -G(z_i^m),
-  \] + \]" src="form_2581.png"/>

    where

    -\[
+<picture><source srcset=\[
   N \dealcoloneq M - \gamma J, \quad J
   \dealcoloneq \frac{\partial f_I}{\partial y},
   \qquad \gamma\dealcoloneq h_n A^I_{i,i}.
-  \] + \]" src="form_2582.png"/>

    -

    As an alternate to Newton's method, ARKode may solve for each stage $z_i ,i
-  = 1, \ldots , s$ using an Anderson-accelerated fixed point iteration

    -\[
+<p>As an alternate to Newton's method, <a class=ARKode may solve for each stage $z_i ,i
+  = 1, \ldots , s$ using an Anderson-accelerated fixed point iteration

    +\[
   z_i^{m+1} = g(z_i^{m}), m=0,1,\ldots.
-  \] + \]" src="form_2584.png"/>

    Unlike with Newton's method, this option does not require the solution of a linear system at each iteration, instead opting for solution of a low-dimensional least-squares solution to construct the nonlinear update.

    -

    Finally, if the user specifies implicit_function_is_linear, i.e., $f_I(t, y)$ depends linearly on $y$, and if the Newton-based nonlinear solver is chosen, then the system will be solved using only a single Newton iteration. Notice that in order for the Newton solver to be used, then jacobian_times_vector() should be supplied. If it is not supplied then only the fixed-point iteration will be supported, and the implicit_function_is_linear setting is ignored.

    -

    The optimal solver (Newton vs fixed-point) is highly problem-dependent. Since fixed-point solvers do not require the solution of any linear systems, each iteration may be significantly less costly than their Newton counterparts. However, this can come at the cost of slower convergence (or even divergence) in comparison with Newton-like methods. These fixed-point solvers do allow for user specification of the Anderson-accelerated subspace size, $m_k$. While the required amount of solver memory grows proportionately to $m_k N$, larger values of $m_k$ may result in faster convergence.

    -

    This improvement may be significant even for "small" values, e.g. $1 \leq
-  m_k \leq 5$, and convergence may not improve (or even deteriorate) for larger values of $m_k$. While ARKode uses a Newton-based iteration as its default solver due to its increased robustness on very stiff problems, it is highly recommended that users also consider the fixed-point solver for their cases when attempting a new problem.

    -

    For either the Newton or fixed-point solvers, it is well-known that both the efficiency and robustness of the algorithm intimately depends on the choice of a good initial guess. In ARKode, the initial guess for either nonlinear solution method is a predicted value $z_i(0)$ that is computed explicitly from the previously-computed data (e.g. $y_{n-2}, y_{n-1}$, and $z_j$ where $j < i$). Additional information on the specific predictor algorithms implemented in ARKode is provided in ARKode documentation.

    +

    Finally, if the user specifies implicit_function_is_linear, i.e., $f_I(t, y)$ depends linearly on $y$, and if the Newton-based nonlinear solver is chosen, then the system will be solved using only a single Newton iteration. Notice that in order for the Newton solver to be used, then jacobian_times_vector() should be supplied. If it is not supplied then only the fixed-point iteration will be supported, and the implicit_function_is_linear setting is ignored.

    +

    The optimal solver (Newton vs fixed-point) is highly problem-dependent. Since fixed-point solvers do not require the solution of any linear systems, each iteration may be significantly less costly than their Newton counterparts. However, this can come at the cost of slower convergence (or even divergence) in comparison with Newton-like methods. These fixed-point solvers do allow for user specification of the Anderson-accelerated subspace size, $m_k$. While the required amount of solver memory grows proportionately to $m_k N$, larger values of $m_k$ may result in faster convergence.

    +

    This improvement may be significant even for "small" values, e.g. $1 \leq
+  m_k \leq 5$, and convergence may not improve (or even deteriorate) for larger values of $m_k$. While ARKode uses a Newton-based iteration as its default solver due to its increased robustness on very stiff problems, it is highly recommended that users also consider the fixed-point solver for their cases when attempting a new problem.

    +

    For either the Newton or fixed-point solvers, it is well-known that both the efficiency and robustness of the algorithm intimately depends on the choice of a good initial guess. In ARKode, the initial guess for either nonlinear solution method is a predicted value $z_i(0)$ that is computed explicitly from the previously-computed data (e.g. $y_{n-2}, y_{n-1}$, and $z_j$ where $j < i$). Additional information on the specific predictor algorithms implemented in ARKode is provided in ARKode documentation.

    The user has to provide the implementation of at least one (or both) of the following std::functions:

    To provide a simple example, consider the harmonic oscillator problem:

    -\[
+<picture><source srcset=\[
   \begin{split}
     u'' & = -k^2 u \\
     u (0) & = 0 \\
     u'(0) & = k
   \end{split}
-  \] + \]" src="form_2592.png"/>

    We write it in terms of a first order ode:

    -\[
+<picture><source srcset=\[
   \begin{matrix}
     y_0' & =  y_1 \\
     y_1' & = - k^2 y_0
   \end{matrix}
-  \] + \]" src="form_2593.png"/>

    -

    That is $y' = A y$ where

    -\[
+<p>That is <picture><source srcset=$y' = A y$ where

    +\[
   A \dealcoloneq
   \begin{pmatrix}
   0 & 1 \\
   -k^2 &0
   \end{pmatrix}
-  \] + \]" src="form_2595.png"/>

    -

    and $y(0)=(0, k)^T$.

    -

    The exact solution is $y_0(t) = \sin(k t)$, $y_1(t) = y_0'(t) = k \cos(k
-*t)$, $y_1'(t) = -k^2 \sin(k t)$.

    +

    and $y(0)=(0, k)^T$.

    +

    The exact solution is $y_0(t) = \sin(k t)$, $y_1(t) = y_0'(t) = k \cos(k
+*t)$, $y_1'(t) = -k^2 \sin(k t)$.

    A minimal implementation, using only explicit RK methods, is given by the following code snippet:

    using VectorType = Vector<double>;
    @@ -720,8 +720,8 @@
    -

    A function object that users may supply and that is intended to compute the explicit part of the IVP right hand side. Sets $explicit_f = f_E(t,
-y)$.

    +

    A function object that users may supply and that is intended to compute the explicit part of the IVP right hand side. Sets $explicit_f = f_E(t,
+y)$.

    At least one of explicit_function() or implicit_function() must be provided. According to which one is provided, explicit, implicit, or mixed RK methods are used.

    Note
    This variable represents a user provided callback. See there for a description of how to deal with errors and other requirements and conventions. In particular, ARKode can deal with "recoverable" errors in some circumstances, so callbacks can throw exceptions of type RecoverableUserCallbackError.
    @@ -742,8 +742,8 @@
    -

    A function object that users may supply and that is intended to compute the implicit part of the IVP right hand side. Sets $implicit_f = f_I(t,
-y)$.

    +

    A function object that users may supply and that is intended to compute the implicit part of the IVP right hand side. Sets $implicit_f = f_I(t,
+y)$.

    At least one of explicit_function() or implicit_function() must be provided. According to which one is provided, explicit, implicit, or mixed RK methods are used.

    Note
    This variable represents a user provided callback. See there for a description of how to deal with errors and other requirements and conventions. In particular, ARKode can deal with "recoverable" errors in some circumstances, so callbacks can throw exceptions of type RecoverableUserCallbackError.
    @@ -765,7 +765,7 @@ /usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1IDA.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1IDA.html 2024-01-30 03:04:44.876818297 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1IDA.html 2024-01-30 03:04:44.876818297 +0000 @@ -193,69 +193,69 @@

    Citing from the SUNDIALS documentation:

    Consider a system of Differential-Algebraic Equations written in the general form

    -\[
+<picture><source srcset=\[
    \begin{cases}
        F(t,y,\dot y) = 0\, , \\
        y(t_0) = y_0\, , \\
        \dot y (t_0) = \dot y_0\, .
    \end{cases}
- \] + \]" src="form_2611.png"/>

    -

    where $y,\dot y$ are vectors in $\mathbb{R}^n$, $t$ is often the time (but can also be a parametric quantity), and $F:\mathbb{R}\times\mathbb{R}^n\times \mathbb{R}^n\rightarrow\mathbb{R}^n$. Such problem is solved using Newton iteration augmented with a line search global strategy. The integration method used in IDA is the variable-order, variable-coefficient BDF (Backward Differentiation Formula), in fixed-leading-coefficient. The method order ranges from 1 to 5, with the BDF of order $q$ given by the multistep formula

    +

    where $y,\dot y$ are vectors in $\mathbb{R}^n$, $t$ is often the time (but can also be a parametric quantity), and $F:\mathbb{R}\times\mathbb{R}^n\times \mathbb{R}^n\rightarrow\mathbb{R}^n$. Such problem is solved using Newton iteration augmented with a line search global strategy. The integration method used in IDA is the variable-order, variable-coefficient BDF (Backward Differentiation Formula), in fixed-leading-coefficient. The method order ranges from 1 to 5, with the BDF of order $q$ given by the multistep formula

    -\[
+<picture><source srcset=\[
    \sum_{i=0}^q \alpha_{n,i}\,y_{n-i}=h_n\,\dot y_n\, ,
    \label{eq:bdf}
- \] + \]" src="form_2615.png"/>

    -

    where $y_n$ and $\dot y_n$ are the computed approximations of $y(t_n)$ and $\dot y(t_n)$, respectively, and the step size is $h_n=t_n-t_{n-1}$. The coefficients $\alpha_{n,i}$ are uniquely determined by the order $q$, and the history of the step sizes. The application of the BDF method to the DAE system results in a nonlinear algebraic system to be solved at each time step:

    +

    where $y_n$ and $\dot y_n$ are the computed approximations of $y(t_n)$ and $\dot y(t_n)$, respectively, and the step size is $h_n=t_n-t_{n-1}$. The coefficients $\alpha_{n,i}$ are uniquely determined by the order $q$, and the history of the step sizes. The application of the BDF method to the DAE system results in a nonlinear algebraic system to be solved at each time step:

    -\[
+<picture><source srcset=\[
    G(y_n)\equiv F\left(t_n,y_n,\dfrac{1}{h_n}\sum_{i=0}^q
   \alpha_{n,i}\,y_{n-i}\right)=0\, .
- \] + \]" src="form_2622.png"/>

    The Newton method leads to a linear system of the form

    -\[
+<picture><source srcset=\[
    J[y_{n(m+1)}-y_{n(m)}]=-G(y_{n(m)})\, ,
- \] + \]" src="form_2623.png"/>

    -

    where $y_{n(m)}$ is the $m$-th approximation to $y_n$, and $J$ is the approximation of the system Jacobian

    +

    where $y_{n(m)}$ is the $m$-th approximation to $y_n$, and $J$ is the approximation of the system Jacobian

    -\[
+<picture><source srcset=\[
    J=\dfrac{\partial G}{\partial y} = \dfrac{\partial F}{\partial y} +
   \alpha \dfrac{\partial F}{\partial \dot y}\, ,
- \] + \]" src="form_2625.png"/>

    -

    and $\alpha = \alpha_{n,0}/h_n$. It is worth mentioning that the scalar $\alpha$ changes whenever the step size or method order changes.

    +

    and $\alpha = \alpha_{n,0}/h_n$. It is worth mentioning that the scalar $\alpha$ changes whenever the step size or method order changes.

    To provide a simple example, consider the following harmonic oscillator problem:

    -\[ \begin{split}
+<picture><source srcset=\[ \begin{split}
    u'' & = -k^2 u \\
    u (0) & = 0 \\
    u'(0) & = k
  \end{split}
- \] + \]" src="form_2627.png"/>

    We write it in terms of a first order ode:

    -\[
+<picture><source srcset=\[
  \begin{matrix}
    y_0' & -y_1      & = 0 \\
    y_1' & + k^2 y_0 & = 0
  \end{matrix}
- \] + \]" src="form_2628.png"/>

    -

    That is $F(y', y, t) = y' + A y = 0 $ where A =

    -\[
+<p>That is <picture><source srcset=$F(y', y, t) = y' + A y = 0 $ where A =

    +\[
  \begin{pmatrix}
  0 & -1 \\
  k^2 &0
  \end{pmatrix}
- \] + \]" src="form_2630.png"/>

    -

    and $y(0)=(0, k)$, $y'(0) = (k, 0)$.

    -

    The exact solution is $y_0(t) = \sin(k t)$, $y_1(t) = y_0'(t)
- = k \cos(k t)$, $y_1'(t) = -k^2 \sin(k t)$.

    -

    The Jacobian to assemble is the following: $J = \alpha I + A$.

    +

    and $y(0)=(0, k)$, $y'(0) = (k, 0)$.

    +

    The exact solution is $y_0(t) = \sin(k t)$, $y_1(t) = y_0'(t)
+ = k \cos(k t)$, $y_1'(t) = -k^2 \sin(k t)$.

    +

    The Jacobian to assemble is the following: $J = \alpha I + A$.

    This is achieved by the following snippet of code:

    using VectorType = Vector<double>;
    VectorType y(2);
    @@ -342,8 +342,8 @@
  • use_y_diff: compute the algebraic components of y and differential components of y_dot, given the differential components of y. This option requires that the user specifies differential and algebraic components in the function get_differential_components.
  • use_y_dot: compute all components of y, given y_dot.
  • -

    By default, this class assumes that all components are differential, and that you want to solve a standard ode. In this case, the initial component type is set to use_y_diff, so that the y_dot at time t=initial_time is computed by solving the nonlinear problem $F(y_dot,
-y(t0), t0) = 0$ in the variable y_dot.

    +

    By default, this class assumes that all components are differential, and that you want to solve a standard ode. In this case, the initial component type is set to use_y_diff, so that the y_dot at time t=initial_time is computed by solving the nonlinear problem $F(y_dot,
+y(t0), t0) = 0$ in the variable y_dot.

    Notice that a Newton solver is used for this computation. The Newton solver parameters can be tweaked by acting on ic_alpha and ic_max_iter.

    If you reset the solver at some point, you may want to select a different computation for the initial conditions after reset. Say, for example, that you have refined a grid, and after transferring the solution to the new grid, the initial conditions are no longer consistent. Then you can choose how these are made consistent, using the same three options that you used for the initial conditions in reset_type.

    Parameters
    @@ -541,7 +541,7 @@
    -

    Compute residual. Return $F(t, y, \dot y)$.

    +

    Compute residual. Return $F(t, y, \dot y)$.

    Note
    This variable represents a user provided callback. See there for a description of how to deal with errors and other requirements and conventions. In particular, IDA can deal with "recoverable" errors in some circumstances, so callbacks can throw exceptions of type RecoverableUserCallbackError.

    Definition at line 662 of file ida.h.

    @@ -563,13 +563,13 @@

    Compute Jacobian. This function is called by IDA any time a Jacobian update is required. The user should compute the Jacobian (or update all the variables that allow the application of the Jacobian). This function is called by IDA once, before any call to solve_jacobian_system() or solve_with_jacobian().

    The Jacobian $J$ should be a (possibly inexact) computation of

    -\[
+<picture><source srcset=\[
   J=\dfrac{\partial G}{\partial y} = \dfrac{\partial F}{\partial y} +
  \alpha \dfrac{\partial F}{\partial \dot y}.
-\] +\]" src="form_2636.png"/>

    If the user uses a matrix based computation of the Jacobian, then this is the right place where an assembly routine should be called to assemble both a matrix and a preconditioner for the Jacobian system. Subsequent calls (possibly more than one) to solve_jacobian_system() or solve_with_jacobian() can assume that this function has been called at least once.

    -

    Notice that no assumption is made by this interface on what the user should do in this function. IDA only assumes that after a call to setup_jacobian() it is possible to call solve_jacobian_system() or solve_with_jacobian() to obtain a solution $x$ to the system $J x = b$.

    +

    Notice that no assumption is made by this interface on what the user should do in this function. IDA only assumes that after a call to setup_jacobian() it is possible to call solve_jacobian_system() or solve_with_jacobian() to obtain a solution $x$ to the system $J x = b$.

    Note
    This variable represents a user provided callback. See there for a description of how to deal with errors and other requirements and conventions. In particular, IDA can deal with "recoverable" errors in some circumstances, so callbacks can throw exceptions of type RecoverableUserCallbackError.

    Definition at line 701 of file ida.h.

    @@ -591,12 +591,12 @@

    Solve the Jacobian linear system. This function will be called by IDA (possibly several times) after setup_jacobian() has been called at least once. IDA tries to do its best to call setup_jacobian() the minimum amount of times. If convergence can be achieved without updating the Jacobian, then IDA does not call setup_jacobian() again. If, on the contrary, internal IDA convergence tests fail, then IDA calls again setup_jacobian() with updated vectors and coefficients so that successive calls to solve_jacobian_systems() lead to better convergence in the Newton process.

    The jacobian $J$ should be (an approximation of) the system Jacobian

    -\[
+<picture><source srcset=\[
   J=\dfrac{\partial G}{\partial y} = \dfrac{\partial F}{\partial y} +
  \alpha \dfrac{\partial F}{\partial \dot y}.
-\] +\]" src="form_2636.png"/>

    -

    A call to this function should store in dst the result of $J^{-1}$ applied to src, i.e., J*dst = src. It is the users responsibility to set up proper solvers and preconditioners inside this function.

    +

    A call to this function should store in dst the result of $J^{-1}$ applied to src, i.e., J*dst = src. It is the users responsibility to set up proper solvers and preconditioners inside this function.

    Note
    This variable represents a user provided callback. See there for a description of how to deal with errors and other requirements and conventions. In particular, IDA can deal with "recoverable" errors in some circumstances, so callbacks can throw exceptions of type RecoverableUserCallbackError.
    Deprecated
    Use solve_with_jacobian() instead which also uses a numerical tolerance.
    @@ -619,21 +619,21 @@

    Solve the Jacobian linear system up to a specified tolerance. This function will be called by IDA (possibly several times) after setup_jacobian() has been called at least once. IDA tries to do its best to call setup_jacobian() the minimum number of times. If convergence can be achieved without updating the Jacobian, then IDA does not call setup_jacobian() again. If, on the contrary, internal IDA convergence tests fail, then IDA calls again setup_jacobian() with updated vectors and coefficients so that successive calls to solve_with_jacobian() lead to better convergence in the Newton process.

    The Jacobian $J$ should be (an approximation of) the system Jacobian

    -\[
+<picture><source srcset=\[
   J=\dfrac{\partial G}{\partial y} = \dfrac{\partial F}{\partial y} +
  \alpha \dfrac{\partial F}{\partial \dot y}.
-\] +\]" src="form_2636.png"/>

    Arguments to the function are:

    Parameters
    - +
    [in]rhsThe system right hand side to solve for.
    [out]dstThe solution of $J^{-1} * src$.
    [out]dstThe solution of $J^{-1} * src$.
    [in]toleranceThe tolerance with which to solve the linear system of equations.
    -

    A call to this function should store in dst the result of $J^{-1}$ applied to src, i.e., the solution of the linear system J*dst = src. It is the user's responsibility to set up proper solvers and preconditioners either inside this function, or already within the setup_jacobian() function. (The latter is, for example, what the step-77 program does: All expensive operations happen in setup_jacobian(), given that that function is called far less often than the current one.)

    +

    A call to this function should store in dst the result of $J^{-1}$ applied to src, i.e., the solution of the linear system J*dst = src. It is the user's responsibility to set up proper solvers and preconditioners either inside this function, or already within the setup_jacobian() function. (The latter is, for example, what the step-77 program does: All expensive operations happen in setup_jacobian(), given that that function is called far less often than the current one.)

    Note
    This variable represents a user provided callback. See there for a description of how to deal with errors and other requirements and conventions. In particular, IDA can deal with "recoverable" errors in some circumstances, so callbacks can throw exceptions of type RecoverableUserCallbackError.

    Definition at line 781 of file ida.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1IDA_1_1AdditionalData.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1IDA_1_1AdditionalData.html 2024-01-30 03:04:44.900818497 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1IDA_1_1AdditionalData.html 2024-01-30 03:04:44.900818497 +0000 @@ -551,8 +551,8 @@

    Type of correction for initial conditions.

    -

    If you do not provide consistent initial conditions, (i.e., conditions for which $F(y_dot(0), y(0), 0) = 0$), you can ask SUNDIALS to compute initial conditions for you by using the ic_type parameter at construction time.

    -

    Notice that you could in principle use this capabilities to solve for steady state problems by setting y_dot to zero, and asking to compute $y(0)$ that satisfies $F(0, y(0), 0) = 0$, however the nonlinear solver used inside IDA may not be robust enough for complex problems with several millions unknowns.

    +

    If you do not provide consistent initial conditions, (i.e., conditions for which $F(y_dot(0), y(0), 0) = 0$), you can ask SUNDIALS to compute initial conditions for you by using the ic_type parameter at construction time.

    +

    Notice that you could in principle use this capabilities to solve for steady state problems by setting y_dot to zero, and asking to compute $y(0)$ that satisfies $F(0, y(0), 0) = 0$, however the nonlinear solver used inside IDA may not be robust enough for complex problems with several millions unknowns.

    Definition at line 523 of file ida.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1KINSOL.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1KINSOL.html 2024-01-30 03:04:44.928818730 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1KINSOL.html 2024-01-30 03:04:44.928818730 +0000 @@ -175,48 +175,48 @@

    Detailed Description

    template<typename VectorType = Vector<double>>
    class SUNDIALS::KINSOL< VectorType >

    Interface to SUNDIALS' nonlinear solver (KINSOL).

    -

    KINSOL is a solver for nonlinear algebraic systems in residual form $F(u)
-= 0$ or fixed point form $G(u) = u$, where $u$ is a vector which we will assume to be in ${\mathbb R}^n$ or ${\mathbb C}^n$, but that may also have a block structure and may be distributed in parallel computations; the functions $F$ and $G$ satisfy $F,G:{\mathbb R}^N \to{\mathbb R}^N$ or $F,G:{\mathbb C}^N \to{\mathbb C}^N$. It includes a Newton-Krylov solver as well as Picard and fixed point solvers, both of which can be accelerated with Anderson acceleration. KINSOL is based on the previous Fortran package NKSOL of Brown and Saad. An example of using KINSOL can be found in the step-77 tutorial program.

    +

    KINSOL is a solver for nonlinear algebraic systems in residual form $F(u)
+= 0$ or fixed point form $G(u) = u$, where $u$ is a vector which we will assume to be in ${\mathbb R}^n$ or ${\mathbb C}^n$, but that may also have a block structure and may be distributed in parallel computations; the functions $F$ and $G$ satisfy $F,G:{\mathbb R}^N \to{\mathbb R}^N$ or $F,G:{\mathbb C}^N \to{\mathbb C}^N$. It includes a Newton-Krylov solver as well as Picard and fixed point solvers, both of which can be accelerated with Anderson acceleration. KINSOL is based on the previous Fortran package NKSOL of Brown and Saad. An example of using KINSOL can be found in the step-77 tutorial program.

    KINSOL's Newton solver employs the inexact Newton method. As this solver is intended mainly for large systems, the user is required to provide their own solver function.

    At the highest level, KINSOL implements the following iteration scheme:

      -
    • set $u_0$ = an initial guess
    • -
    • For $n = 0, 1, 2, \ldots$ until convergence do:
        -
      • Solve $J(u_n)\delta_n = -F(u_n)$
      • -
      • Set $u_{n+1} = u_n + \lambda \delta_n, 0 < \lambda \leq 1$
      • +
      • set $u_0$ = an initial guess
      • +
      • For $n = 0, 1, 2, \ldots$ until convergence do:
          +
        • Solve $J(u_n)\delta_n = -F(u_n)$
        • +
        • Set $u_{n+1} = u_n + \lambda \delta_n, 0 < \lambda \leq 1$
        • Test for convergence
      -

      Here, $u_n$ is the $n$-th iterate to $u$, and $J(u) = \nabla_u F(u)$ is the system Jacobian. At each stage in the iteration process, a scalar multiple of the step $\delta_n$, is added to $u_n$ to produce a new iterate, $u_{n+1}$. A test for convergence is made before the iteration continues.

      +

      Here, $u_n$ is the $n$-th iterate to $u$, and $J(u) = \nabla_u F(u)$ is the system Jacobian. At each stage in the iteration process, a scalar multiple of the step $\delta_n$, is added to $u_n$ to produce a new iterate, $u_{n+1}$. A test for convergence is made before the iteration continues.

      Unless specified otherwise by the user, KINSOL strives to update Jacobian information as infrequently as possible to balance the high costs of matrix operations against other costs. Specifically, these updates occur when:

      • the problem is initialized,
      • -
      • $\|\lambda \delta_{n-1} \|_{D_u,\infty} \geq 1.5$ (inexact Newton only, see below for a definition of $\| \cdot \|_{D_u,\infty}$)
      • +
      • $\|\lambda \delta_{n-1} \|_{D_u,\infty} \geq 1.5$ (inexact Newton only, see below for a definition of $\| \cdot \|_{D_u,\infty}$)
      • a specified number of nonlinear iterations have passed since the last update,
      • the linear solver failed recoverably with outdated Jacobian information,
      • the global strategy failed with outdated Jacobian information, or
      • -
      • $\|\lambda \delta_{n} \|_{D_u,\infty} \leq $ tolerance with outdated Jacobian information.
      • +
      • $\|\lambda \delta_{n} \|_{D_u,\infty} \leq $ tolerance with outdated Jacobian information.

      KINSOL allows changes to the above strategy through optional solver inputs. The user can disable the initial Jacobian information evaluation or change the default value of the number of nonlinear iterations after which a Jacobian information update is enforced.

      -

      To address the case of ill-conditioned nonlinear systems, KINSOL allows prescribing scaling factors both for the solution vector and for the residual vector. For scaling to be used, the user may supply the function get_solution_scaling(), that returns values $D_u$, which are diagonal elements of the scaling matrix such that $D_u u_n$ has all components roughly the same magnitude when $u_n$ is close to a solution, and get_function_scaling(), that supply values $D_F$, which are diagonal scaling matrix elements such that $D_F F$ has all components roughly the same magnitude when $u_n$ is not too close to a solution.

      +

      To address the case of ill-conditioned nonlinear systems, KINSOL allows prescribing scaling factors both for the solution vector and for the residual vector. For scaling to be used, the user may supply the function get_solution_scaling(), that returns values $D_u$, which are diagonal elements of the scaling matrix such that $D_u u_n$ has all components roughly the same magnitude when $u_n$ is close to a solution, and get_function_scaling(), that supply values $D_F$, which are diagonal scaling matrix elements such that $D_F F$ has all components roughly the same magnitude when $u_n$ is not too close to a solution.

      When scaling values are provided for the solution vector, these values are automatically incorporated into the calculation of the perturbations used for the default difference quotient approximations for Jacobian information if the user does not supply a Jacobian solver through the solve_with_jacobian() function.

      -

      Two methods of applying a computed step $\delta_n$ to the previously computed solution vector are implemented. The first and simplest is the standard Newton strategy which applies the update with a constant $\lambda$ always set to 1. The other method is a global strategy, which attempts to use the direction implied by $\delta_n$ in the most efficient way for furthering convergence of the nonlinear problem. This technique is implemented in the second strategy, called Linesearch. This option employs both the $\alpha$ and $\beta$ conditions of the Goldstein-Armijo linesearch algorithm given in [DennisSchnabel96] , where $\lambda$ is chosen to guarantee a sufficient decrease in $F$ relative to the step length as well as a minimum step length relative to the initial rate of decrease of $F$. One property of the algorithm is that the full Newton step tends to be taken close to the solution.

      +

      Two methods of applying a computed step $\delta_n$ to the previously computed solution vector are implemented. The first and simplest is the standard Newton strategy which applies the update with a constant $\lambda$ always set to 1. The other method is a global strategy, which attempts to use the direction implied by $\delta_n$ in the most efficient way for furthering convergence of the nonlinear problem. This technique is implemented in the second strategy, called Linesearch. This option employs both the $\alpha$ and $\beta$ conditions of the Goldstein-Armijo linesearch algorithm given in [DennisSchnabel96] , where $\lambda$ is chosen to guarantee a sufficient decrease in $F$ relative to the step length as well as a minimum step length relative to the initial rate of decrease of $F$. One property of the algorithm is that the full Newton step tends to be taken close to the solution.

      The basic fixed-point iteration scheme implemented in KINSOL is given by:

        -
      • Set $u_0 =$ an initial guess
      • -
      • For $n = 0, 1, 2, \dots$ until convergence do:
          -
        • Set $u_{n+1} = G(u_n)$
        • +
        • Set $u_0 =$ an initial guess
        • +
        • For $n = 0, 1, 2, \dots$ until convergence do:
            +
          • Set $u_{n+1} = G(u_n)$
          • Test for convergence
        -

        At each stage in the iteration process, function $G$ is applied to the current iterate to produce a new iterate, $u_{n+1}$. A test for convergence is made before the iteration continues.

        -

        For Picard iteration, as implemented in KINSOL, we consider a special form of the nonlinear function $F$, such that $F(u) = Lu - N(u)$, where $L$ is a constant nonsingular matrix and $N$ is (in general) nonlinear.

        -

        Then the fixed-point function $G$ is defined as $G(u) = u - L^{-1}F(u)$. Within each iteration, the Picard step is computed then added to $u_n$ to produce the new iterate. Next, the nonlinear residual function is evaluated at the new iterate, and convergence is checked. The Picard and fixed point methods can be significantly accelerated using Anderson's acceleration method.

        +

        At each stage in the iteration process, function $G$ is applied to the current iterate to produce a new iterate, $u_{n+1}$. A test for convergence is made before the iteration continues.

        +

        For Picard iteration, as implemented in KINSOL, we consider a special form of the nonlinear function $F$, such that $F(u) = Lu - N(u)$, where $L$ is a constant nonsingular matrix and $N$ is (in general) nonlinear.

        +

        Then the fixed-point function $G$ is defined as $G(u) = u - L^{-1}F(u)$. Within each iteration, the Picard step is computed then added to $u_n$ to produce the new iterate. Next, the nonlinear residual function is evaluated at the new iterate, and convergence is checked. The Picard and fixed point methods can be significantly accelerated using Anderson's acceleration method.

        The user has to provide the implementation of the following std::functions:

        • reinit_vector; and only one of
        • residual; or
        • iteration_function;
        -

        Specifying residual() allows the user to use Newton and Picard strategies (i.e., $F(u)=0$ will be solved), while specifying iteration_function(), a fixed point iteration will be used (i.e., $G(u)=u$ will be solved). An error will be thrown if iteration_function() is set for Picard or Newton.

        +

        Specifying residual() allows the user to use Newton and Picard strategies (i.e., $F(u)=0$ will be solved), while specifying iteration_function(), a fixed point iteration will be used (i.e., $G(u)=u$ will be solved). An error will be thrown if iteration_function() is set for Picard or Newton.

        If the use of a Newton or Picard method is desired, then the user should also supply

        • solve_jacobian_system or solve_with_jacobian; and optionally
        • setup_jacobian;
        • @@ -440,12 +440,12 @@

          A function object that users may supply and that is intended to prepare the linear solver for subsequent calls to solve_with_jacobian().

          The job of setup_jacobian() is to prepare the linear solver for subsequent calls to solve_with_jacobian(), in the solution of linear systems $Ax = b$. The exact nature of this system depends on the SolutionStrategy that has been selected.

          In the cases strategy = SolutionStrategy::newton or SolutionStrategy::linesearch, $A$ is the Jacobian $J = \partial
-F/\partial u$. If strategy = SolutionStrategy::picard, $A$ is the approximate Jacobian matrix $L$. If strategy = SolutionStrategy::fixed_point, then linear systems do not arise, and this function is never called.

          +F/\partial u$" src="form_2213.png"/>. If strategy = SolutionStrategy::picard, $A$ is the approximate Jacobian matrix $L$. If strategy = SolutionStrategy::fixed_point, then linear systems do not arise, and this function is never called.

          The setup_jacobian() function may call a user-supplied function, or a function within the linear solver module, to compute Jacobian-related data that is required by the linear solver. It may also preprocess that data as needed for solve_with_jacobian(), which may involve calling a generic function (such as for LU factorization) or, more generally, build preconditioners from the assembled Jacobian. In any case, the data so generated may then be used whenever a linear system is solved.

          The point of this function is that setup_jacobian() function is not called at every Newton iteration, but only as frequently as the solver determines that it is appropriate to perform the setup task. In this way, Jacobian-related data generated by setup_jacobian() is expected to be used over a number of Newton iterations. KINSOL determines itself when it is beneficial to regenerate the Jacobian and associated information (such as preconditioners computed for the Jacobian), thereby saving the effort to regenerate the Jacobian matrix and a preconditioner for it whenever possible.

          Parameters
          - +
          current_uCurrent value of $u$
          current_uCurrent value of $u$
          current_fCurrent value of $F(u)$ or $G(u)$
          @@ -472,14 +472,14 @@
          Deprecated
          Versions of SUNDIALS after 4.0 no longer provide all of the information necessary for this callback (see below). Use the solve_with_jacobian callback described below.

          A function object that users may supply and that is intended to solve a linear system with the Jacobian matrix. This function will be called by KINSOL (possibly several times) after setup_jacobian() has been called at least once. KINSOL tries to do its best to call setup_jacobian() the minimum number of times. If convergence can be achieved without updating the Jacobian, then KINSOL does not call setup_jacobian() again. If, on the contrary, internal KINSOL convergence tests fail, then KINSOL calls setup_jacobian() again with updated vectors and coefficients so that successive calls to solve_jacobian_system() lead to better convergence in the Newton process.

          If you do not specify a solve_jacobian_system or solve_with_jacobian function, then only a fixed point iteration strategy can be used. Notice that this may not converge, or may converge very slowly.

          -

          A call to this function should store in dst the result of $J^{-1}$ applied to rhs, i.e., $J \cdot dst = rhs$. It is the user's responsibility to set up proper solvers and preconditioners inside this function (or in the setup_jacobian callback above).

          +

          A call to this function should store in dst the result of $J^{-1}$ applied to rhs, i.e., $J \cdot dst = rhs$. It is the user's responsibility to set up proper solvers and preconditioners inside this function (or in the setup_jacobian callback above).

          Arguments to the function are:

          Parameters
          - - + + - +
          [in]ycurThe current $y$ vector for the current KINSOL internal step. In the documentation above, this $y$ vector is generally denoted by $u$.
          [in]fcurThe current value of the implicit right-hand side at ycur, $f_I (t_n, ypred)$.
          [in]ycurThe current $y$ vector for the current KINSOL internal step. In the documentation above, this $y$ vector is generally denoted by $u$.
          [in]fcurThe current value of the implicit right-hand side at ycur, $f_I (t_n, ypred)$.
          [in]rhsThe system right hand side to solve for
          [out]dstThe solution of $J^{-1} * src$
          [out]dstThe solution of $J^{-1} * src$
          @@ -510,12 +510,12 @@

    A function object that users may supply and that is intended to solve a linear system with the Jacobian matrix. This function will be called by KINSOL (possibly several times) after setup_jacobian() has been called at least once. KINSOL tries to do its best to call setup_jacobian() the minimum number of times. If convergence can be achieved without updating the Jacobian, then KINSOL does not call setup_jacobian() again. If, on the contrary, internal KINSOL convergence tests fail, then KINSOL calls setup_jacobian() again with updated vectors and coefficients so that successive calls to solve_with_jacobian() lead to better convergence in the Newton process.

    If you do not specify a solve_with_jacobian function, then only a fixed point iteration strategy can be used. Notice that this may not converge, or may converge very slowly.

    -

    A call to this function should store in dst the result of $J^{-1}$ applied to rhs, i.e., $J \cdot dst = rhs$. It is the user's responsibility to set up proper solvers and preconditioners inside this function (or in the setup_jacobian callback above). The function attached to this callback is also provided with a tolerance to the linear solver, indicating that it is not necessary to solve the linear system with the Jacobian matrix exactly, but only to a tolerance that KINSOL will adapt over time.

    +

    A call to this function should store in dst the result of $J^{-1}$ applied to rhs, i.e., $J \cdot dst = rhs$. It is the user's responsibility to set up proper solvers and preconditioners inside this function (or in the setup_jacobian callback above). The function attached to this callback is also provided with a tolerance to the linear solver, indicating that it is not necessary to solve the linear system with the Jacobian matrix exactly, but only to a tolerance that KINSOL will adapt over time.

    Arguments to the function are:

    Parameters
    - +
    [in]rhsThe system right hand side to solve for.
    [out]dstThe solution of $J^{-1} * src$.
    [out]dstThe solution of $J^{-1} * src$.
    [in]toleranceThe tolerance with which to solve the linear system of equations.
    @@ -540,7 +540,7 @@

    A function object that users may supply and that is intended to return a vector whose components are the weights used by KINSOL to compute the vector norm of the solution. The implementation of this function is optional, and it is used only if implemented.

    -

    The intent for this scaling factor is for problems in which the different components of a solution have vastly different numerical magnitudes – typically because they have different physical units and represent different things. For example, if one were to solve a nonlinear Stokes problem, the solution vector has components that correspond to velocities and other components that correspond to pressures. These have different physical units and depending on which units one chooses, they may have roughly comparable numerical sizes or maybe they don't. To give just one example, in simulations of flow in the Earth's interior, one has velocities on the order of maybe ten centimeters per year, and pressures up to around 100 GPa. If one expresses this in SI units, this corresponds to velocities of around $0.000,000,003=3 \times 10^{-9}$ m/s, and pressures around $10^9 \text{kg}/\text{m}/\text{s}^2$, i.e., vastly different. In such cases, computing the $l_2$ norm of a solution-type vector (e.g., the difference between the previous and the current solution) makes no sense because the norm will either be dominated by the velocity components or the pressure components. The scaling vector this function returns is intended to provide each component of the solution with a scaling factor that is generally chosen as the inverse of a "typical velocity" or "typical pressure" so that upon multiplication of a vector component by the corresponding scaling vector component, one obtains a number that is of order of magnitude of one (i.e., a reasonably small multiple of one times the typical velocity/pressure). The KINSOL manual states this as follows: "The user should supply values \_form#href_anchor".

    +

    The intent for this scaling factor is for problems in which the different components of a solution have vastly different numerical magnitudes – typically because they have different physical units and represent different things. For example, if one were to solve a nonlinear Stokes problem, the solution vector has components that correspond to velocities and other components that correspond to pressures. These have different physical units and depending on which units one chooses, they may have roughly comparable numerical sizes or maybe they don't. To give just one example, in simulations of flow in the Earth's interior, one has velocities on the order of maybe ten centimeters per year, and pressures up to around 100 GPa. If one expresses this in SI units, this corresponds to velocities of around $0.000,000,003=3 \times 10^{-9}$ m/s, and pressures around $10^9 \text{kg}/\text{m}/\text{s}^2$, i.e., vastly different. In such cases, computing the $l_2$ norm of a solution-type vector (e.g., the difference between the previous and the current solution) makes no sense because the norm will either be dominated by the velocity components or the pressure components. The scaling vector this function returns is intended to provide each component of the solution with a scaling factor that is generally chosen as the inverse of a "typical velocity" or "typical pressure" so that upon multiplication of a vector component by the corresponding scaling vector component, one obtains a number that is of order of magnitude of one (i.e., a reasonably small multiple of one times the typical velocity/pressure). The KINSOL manual states this as follows: "The user should supply values \_form#href_anchor".

    If no function is provided to a KINSOL object, then this is interpreted as implicitly saying that all of these scaling factors should be considered as one.

    Note
    This variable represents a user provided callback. See there for a description of how to deal with errors and other requirements and conventions. In particular, KINSOL can deal with "recoverable" errors in some circumstances, so callbacks can throw exceptions of type RecoverableUserCallbackError.
    @@ -562,7 +562,7 @@

    A function object that users may supply and that is intended to return a vector whose components are the weights used by KINSOL to compute the vector norm of the function evaluation away from the solution. The implementation of this function is optional, and it is used only if implemented.

    -

    The point of this function and the scaling vector it returns is similar to the one discussed above for get_solution_scaling, except that it is for a vector that scales the components of the function $F(U)$, rather than the components of $U$, when computing norms. As above, if no function is provided, then this is equivalent to using a scaling vector whose components are all equal to one.

    +

    The point of this function and the scaling vector it returns is similar to the one discussed above for get_solution_scaling, except that it is for a vector that scales the components of the function $F(U)$, rather than the components of $U$, when computing norms. As above, if no function is provided, then this is equivalent to using a scaling vector whose components are all equal to one.

    Note
    This variable represents a user provided callback. See there for a description of how to deal with errors and other requirements and conventions. In particular, KINSOL can deal with "recoverable" errors in some circumstances, so callbacks can throw exceptions of type RecoverableUserCallbackError.

    Definition at line 691 of file kinsol.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classScaLAPACKMatrix.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classScaLAPACKMatrix.html 2024-01-30 03:04:45.008819397 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classScaLAPACKMatrix.html 2024-01-30 03:04:45.008819397 +0000 @@ -362,15 +362,15 @@

    Detailed Description

    template<typename NumberType>
    class ScaLAPACKMatrix< NumberType >

    A wrapper class around ScaLAPACK parallel dense linear algebra.

    -

    ScaLAPACK assumes that matrices are distributed according to the block-cyclic decomposition scheme. An $M$ by $N$ matrix is first decomposed into $\lceil M / MB \rceil$ by $\lceil N / NB \rceil$ blocks which are then uniformly distributed across the 2d process grid with $p q \le Np$ processes, where $p,q$ are grid dimensions and $Np$ is the total number of processes. The parameters MB and NB are referred to as row and column block size and determine the granularity of the block-cyclic distribution.

    -

    In the following the block-cyclic distribution of a $10 \times 9$ matrix onto a $3\times 3$ Cartesian process grid with block sizes $\text{MB}=\text{NB}=2$ is displayed.

    +

    ScaLAPACK assumes that matrices are distributed according to the block-cyclic decomposition scheme. An $M$ by $N$ matrix is first decomposed into $\lceil M / MB \rceil$ by $\lceil N / NB \rceil$ blocks which are then uniformly distributed across the 2d process grid with $p q \le Np$ processes, where $p,q$ are grid dimensions and $Np$ is the total number of processes. The parameters MB and NB are referred to as row and column block size and determine the granularity of the block-cyclic distribution.

    +

    In the following the block-cyclic distribution of a $10 \times 9$ matrix onto a $3\times 3$ Cartesian process grid with block sizes $\text{MB}=\text{NB}=2$ is displayed.

    Block-Cyclic Distribution
    -

    Note that the odd number of columns of the local matrices owned by the processes P2, P5 and P8 accounts for $N=9$ not being an integral multiple of $\text{NB}=2$.

    +

    Note that the odd number of columns of the local matrices owned by the processes P2, P5 and P8 accounts for $N=9$ not being an integral multiple of $\text{NB}=2$.

    The choice of the block sizes is a compromise between a sufficiently large size for efficient local/serial BLAS, but one that is also small enough to achieve good parallel load balance.

    Below we show a strong scaling example of ScaLAPACKMatrix::invert() on up to 5 nodes each composed of two Intel Xeon 2660v2 IvyBridge sockets 2.20GHz, 10 cores/socket. Calculations are performed on square processor grids 1x1, 2x2, 3x3, 4x4, 5x5, 6x6, 7x7, 8x8, 9x9, 10x10.

    @@ -615,7 +615,7 @@

    Constructor for a rectangular matrix with n_rows and n_cols and distributed using the grid process_grid.

    -

    The parameters row_block_size and column_block_size are the block sizes used for the block-cyclic distribution of the matrix. In general, it is recommended to use powers of $2$, e.g. $16,32,64, \dots$.

    +

    The parameters row_block_size and column_block_size are the block sizes used for the block-cyclic distribution of the matrix. In general, it is recommended to use powers of $2$, e.g. $16,32,64, \dots$.

    Definition at line 81 of file scalapack.cc.

    @@ -652,7 +652,7 @@

    Constructor for a square matrix of size size, and distributed using the process grid in process_grid.

    -

    The parameter block_size is used for the block-cyclic distribution of the matrix. An identical block size is used for the rows and columns of the matrix. In general, it is recommended to use powers of $2$, e.g. $16,32,64, \dots$.

    +

    The parameter block_size is used for the block-cyclic distribution of the matrix. An identical block size is used for the rows and columns of the matrix. In general, it is recommended to use powers of $2$, e.g. $16,32,64, \dots$.

    Definition at line 106 of file scalapack.cc.

    @@ -690,7 +690,7 @@

    Constructor for a general rectangular matrix that is read from the file filename and distributed using the grid process_grid.

    Loads the matrix from file filename using HDF5. In case that deal.II was built without HDF5 a call to this function will cause an exception to be thrown.

    -

    The parameters row_block_size and column_block_size are the block sizes used for the block-cyclic distribution of the matrix. In general, it is recommended to use powers of $2$, e.g. $16,32,64, \dots$.

    +

    The parameters row_block_size and column_block_size are the block sizes used for the block-cyclic distribution of the matrix. In general, it is recommended to use powers of $2$, e.g. $16,32,64, \dots$.

    Definition at line 122 of file scalapack.cc.

    @@ -766,7 +766,7 @@

    Initialize the rectangular matrix with n_rows and n_cols and distributed using the grid process_grid.

    -

    The parameters row_block_size and column_block_size are the block sizes used for the block-cyclic distribution of the matrix. In general, it is recommended to use powers of $2$, e.g. $16,32,64, \dots$.

    +

    The parameters row_block_size and column_block_size are the block sizes used for the block-cyclic distribution of the matrix. In general, it is recommended to use powers of $2$, e.g. $16,32,64, \dots$.

    Definition at line 217 of file scalapack.cc.

    @@ -803,7 +803,7 @@

    Initialize the square matrix of size size and distributed using the grid process_grid.

    -

    The parameter block_size is used for the block-cyclic distribution of the matrix. An identical block size is used for the rows and columns of the matrix. In general, it is recommended to use powers of $2$, e.g. $16,32,64, \dots$.

    +

    The parameter block_size is used for the block-cyclic distribution of the matrix. An identical block size is used for the rows and columns of the matrix. In general, it is recommended to use powers of $2$, e.g. $16,32,64, \dots$.

    Definition at line 291 of file scalapack.cc.

    @@ -1047,9 +1047,9 @@
    -

    Transposing assignment: $\mathbf{A} = \mathbf{B}^T$

    -

    The matrices $\mathbf{A}$ and $\mathbf{B}$ must have the same process grid.

    -

    The following alignment conditions have to be fulfilled: $MB_A=NB_B$ and $NB_A=MB_B$.

    +

    Transposing assignment: $\mathbf{A} = \mathbf{B}^T$

    +

    The matrices $\mathbf{A}$ and $\mathbf{B}$ must have the same process grid.

    +

    The following alignment conditions have to be fulfilled: $MB_A=NB_B$ and $NB_A=MB_B$.

    Definition at line 981 of file scalapack.cc.

    @@ -1087,13 +1087,13 @@ transpose_B Block Sizes Operation -false $MB_A=MB_B$
    - $NB_A=NB_B$ $\mathbf{A} = a \mathbf{A} + b \mathbf{B}$ +false $MB_A=MB_B$
    + $NB_A=NB_B$ $\mathbf{A} = a \mathbf{A} + b \mathbf{B}$ -true $MB_A=NB_B$
    - $NB_A=MB_B$ $\mathbf{A} = a \mathbf{A} + b \mathbf{B}^T$ +true $MB_A=NB_B$
    + $NB_A=MB_B$ $\mathbf{A} = a \mathbf{A} + b \mathbf{B}^T$ -

    The matrices $\mathbf{A}$ and $\mathbf{B}$ must have the same process grid.

    +

    The matrices $\mathbf{A}$ and $\mathbf{B}$ must have the same process grid.

    Definition at line 991 of file scalapack.cc.

    @@ -1116,9 +1116,9 @@ const ScaLAPACKMatrix< NumberType > & B&#href_anchor"memdoc"> -

    Matrix-addition: $\mathbf{A} = \mathbf{A} + b\, \mathbf{B}$

    -

    The matrices $\mathbf{A}$ and $\mathbf{B}$ must have the same process grid.

    -

    The following alignment conditions have to be fulfilled: $MB_A=MB_B$ and $NB_A=NB_B$.

    +

    Matrix-addition: $\mathbf{A} = \mathbf{A} + b\, \mathbf{B}$

    +

    The matrices $\mathbf{A}$ and $\mathbf{B}$ must have the same process grid.

    +

    The following alignment conditions have to be fulfilled: $MB_A=MB_B$ and $NB_A=NB_B$.

    Definition at line 1047 of file scalapack.cc.

    @@ -1141,9 +1141,9 @@ const ScaLAPACKMatrix< NumberType > & B&#href_anchor"memdoc"> -

    Matrix-addition: $\mathbf{A} = \mathbf{A} + b\, \mathbf{B}^T$

    -

    The matrices $\mathbf{A}$ and $\mathbf{B}$ must have the same process grid.

    -

    The following alignment conditions have to be fulfilled: $MB_A=NB_B$ and $NB_A=MB_B$.

    +

    Matrix-addition: $\mathbf{A} = \mathbf{A} + b\, \mathbf{B}^T$

    +

    The matrices $\mathbf{A}$ and $\mathbf{B}$ must have the same process grid.

    +

    The following alignment conditions have to be fulfilled: $MB_A=NB_B$ and $NB_A=MB_B$.

    Definition at line 1057 of file scalapack.cc.

    @@ -1192,24 +1192,24 @@ transpose_A transpose_B Block Sizes Operation -false false $MB_A=MB_C$
    - $NB_A=MB_B$
    - $NB_B=NB_C$ $\mathbf{C} = b \mathbf{A} \cdot \mathbf{B} + c \mathbf{C}$ +false false $MB_A=MB_C$
    + $NB_A=MB_B$
    + $NB_B=NB_C$ $\mathbf{C} = b \mathbf{A} \cdot \mathbf{B} + c \mathbf{C}$ -false true $MB_A=MB_C$
    - $NB_A=NB_B$
    - $MB_B=NB_C$ $\mathbf{C} = b \mathbf{A} \cdot \mathbf{B}^T + c \mathbf{C}$ +false true $MB_A=MB_C$
    + $NB_A=NB_B$
    + $MB_B=NB_C$ $\mathbf{C} = b \mathbf{A} \cdot \mathbf{B}^T + c \mathbf{C}$ -true false $MB_A=MB_B$
    - $NB_A=MB_C$
    - $NB_B=NB_C$ $\mathbf{C} = b \mathbf{A}^T \cdot \mathbf{B} + c \mathbf{C}$ +true false $MB_A=MB_B$
    + $NB_A=MB_C$
    + $NB_B=NB_C$ $\mathbf{C} = b \mathbf{A}^T \cdot \mathbf{B} + c \mathbf{C}$ -true true $MB_A=NB_B$
    - $NB_A=MB_C$
    - $MB_B=NB_C$ $\mathbf{C} = b \mathbf{A}^T \cdot \mathbf{B}^T + c \mathbf{C}$ +true true $MB_A=NB_B$
    + $NB_A=MB_C$
    + $MB_B=NB_C$ $\mathbf{C} = b \mathbf{A}^T \cdot \mathbf{B}^T + c \mathbf{C}$ -

    It is assumed that $\mathbf{A}$ and $\mathbf{B}$ have compatible sizes and that $\mathbf{C}$ already has the right size.

    -

    The matrices $\mathbf{A}$, $\mathbf{B}$ and $\mathbf{C}$ must have the same process grid.

    +

    It is assumed that $\mathbf{A}$ and $\mathbf{B}$ have compatible sizes and that $\mathbf{C}$ already has the right size.

    +

    The matrices $\mathbf{A}$, $\mathbf{B}$ and $\mathbf{C}$ must have the same process grid.

    Definition at line 1067 of file scalapack.cc.

    @@ -1238,11 +1238,11 @@ const bool adding = false&#href_anchor"memdoc">

    Matrix-matrix-multiplication.

    -

    The optional parameter adding determines whether the result is stored in $\mathbf{C}$ or added to $\mathbf{C}$.

    -

    if (adding) $\mathbf{C} = \mathbf{C} + \mathbf{A} \cdot \mathbf{B}$

    -

    else $\mathbf{C} = \mathbf{A} \cdot \mathbf{B}$

    -

    It is assumed that $\mathbf{A}$ and $\mathbf{B}$ have compatible sizes and that $\mathbf{C}$ already has the right size.

    -

    The following alignment conditions have to be fulfilled: $MB_A=MB_C$, $NB_A=MB_B$ and $NB_B=NB_C$.

    +

    The optional parameter adding determines whether the result is stored in $\mathbf{C}$ or added to $\mathbf{C}$.

    +

    if (adding) $\mathbf{C} = \mathbf{C} + \mathbf{A} \cdot \mathbf{B}$

    +

    else $\mathbf{C} = \mathbf{A} \cdot \mathbf{B}$

    +

    It is assumed that $\mathbf{A}$ and $\mathbf{B}$ have compatible sizes and that $\mathbf{C}$ already has the right size.

    +

    The following alignment conditions have to be fulfilled: $MB_A=MB_C$, $NB_A=MB_B$ and $NB_B=NB_C$.

    Definition at line 1184 of file scalapack.cc.

    @@ -1271,11 +1271,11 @@ const bool adding = false&#href_anchor"memdoc">

    Matrix-matrix-multiplication using transpose of $\mathbf{A}$.

    -

    The optional parameter adding determines whether the result is stored in $\mathbf{C}$ or added to $\mathbf{C}$.

    -

    if (adding) $\mathbf{C} = \mathbf{C} + \mathbf{A}^T \cdot \mathbf{B}$

    -

    else $\mathbf{C} = \mathbf{A}^T \cdot \mathbf{B}$

    -

    It is assumed that $\mathbf{A}$ and $\mathbf{B}$ have compatible sizes and that $\mathbf{C}$ already has the right size.

    -

    The following alignment conditions have to be fulfilled: $MB_A=MB_B$, $NB_A=MB_C$ and $NB_B=NB_C$.

    +

    The optional parameter adding determines whether the result is stored in $\mathbf{C}$ or added to $\mathbf{C}$.

    +

    if (adding) $\mathbf{C} = \mathbf{C} + \mathbf{A}^T \cdot \mathbf{B}$

    +

    else $\mathbf{C} = \mathbf{A}^T \cdot \mathbf{B}$

    +

    It is assumed that $\mathbf{A}$ and $\mathbf{B}$ have compatible sizes and that $\mathbf{C}$ already has the right size.

    +

    The following alignment conditions have to be fulfilled: $MB_A=MB_B$, $NB_A=MB_C$ and $NB_B=NB_C$.

    Definition at line 1198 of file scalapack.cc.

    @@ -1303,12 +1303,12 @@ /usr/share/doc/packages/dealii/doxygen/deal.II/classScalarFunctionFromFunctionObject.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classScalarFunctionFromFunctionObject.html 2024-01-30 03:04:45.056819796 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classScalarFunctionFromFunctionObject.html 2024-01-30 03:04:45.056819796 +0000 @@ -259,7 +259,7 @@
    Vector<double> solution_1d;
    -

    We will denote this solution function described by this DoFHandler and vector object by $u_h(x)$ where $x$ is a vector with just one component, and consequently is not shown in boldface. Then assume that we want this $u_h(x)$ to be used as a boundary condition for a 2d problem at the line $y=0$. Let's say that this line corresponds to boundary indicator 123. If we say that the 2d problem is associated with

    DoFHandler<2> dof_handler_2d;
    +

    We will denote this solution function described by this DoFHandler and vector object by $u_h(x)$ where $x$ is a vector with just one component, and consequently is not shown in boldface. Then assume that we want this $u_h(x)$ to be used as a boundary condition for a 2d problem at the line $y=0$. Let's say that this line corresponds to boundary indicator 123. If we say that the 2d problem is associated with

    DoFHandler<2> dof_handler_2d;

    then in order to evaluate the boundary conditions for this 2d problem, we would want to call VectorTools::interpolate_boundary_values() via

    AffineConstraints<double> boundary_values_2d;
    123,
    @@ -267,7 +267,7 @@
    boundary_values_2d);
    void interpolate_boundary_values(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const std::map< types::boundary_id, const Function< spacedim, number > * > &function_map, std::map< types::global_dof_index, number > &boundary_values, const ComponentMask &component_mask=ComponentMask())
    -

    The question here is what to use as the Function object that can be passed as third argument. It needs to be a Function<2> object, i.e., it receives a 2d input point and is supposed to return the value at that point. What we want it to do is to just take the $x$ component of the input point and evaluate the 1d solution at that point, knowing that at the boundary with indicator 123, the $y$ component of the input point must be zero. This all can be achieved via the following function object:

    The question here is what to use as the Function object that can be passed as third argument. It needs to be a Function<2> object, i.e., it receives a 2d input point and is supposed to return the value at that point. What we want it to do is to just take the $x$ component of the input point and evaluate the 1d solution at that point, knowing that at the boundary with indicator 123, the $y$ component of the input point must be zero. This all can be achieved via the following function object:

    solution_1d_as_function_object (dof_handler_1d, solution_1d);
    auto boundary_evaluator
    = [&] (const Point<2> &p)
    /usr/share/doc/packages/dealii/doxygen/deal.II/classSolverBFGS.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSolverBFGS.html 2024-01-30 03:04:45.084820030 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSolverBFGS.html 2024-01-30 03:04:45.084820030 +0000 @@ -212,7 +212,7 @@ \rho^{(k)} &\dealcoloneq \frac{1}{y^{(k)} \cdot s^{(k)}} \end{align*}" src="form_2417.png"/>

    -

    for a symmetric positive definite $H$. Limited memory variant is implemented via the two-loop recursion.

    +

    for a symmetric positive definite $H$. Limited memory variant is implemented via the two-loop recursion.

    Definition at line 58 of file solver_bfgs.h.

    Member Typedef Documentation

    @@ -366,8 +366,8 @@ \]" src="form_2418.png"/>

    starting from initial state x.

    -

    The function compute takes two arguments indicating the values of $x$ and of the gradient $g=\nabla f(\mathbf x)=\frac{\partial f}{\partial
-\mathbf x}$. When called, it needs to update the gradient $g$ at the given location $x$ and return the value of the function being minimized, i.e., $f(\mathbf x)$.

    +

    The function compute takes two arguments indicating the values of $x$ and of the gradient $g=\nabla f(\mathbf x)=\frac{\partial f}{\partial
+\mathbf x}$. When called, it needs to update the gradient $g$ at the given location $x$ and return the value of the function being minimized, i.e., $f(\mathbf x)$.

    @@ -388,7 +388,7 @@

    Connect a slot to perform a custom line-search.

    -

    Given the value of function f, the current value of unknown x, the gradient g and the search direction p, return the size $\alpha$ of the step $x \leftarrow x + \alpha p$, and update x, g and f accordingly.

    +

    Given the value of function f, the current value of unknown x, the gradient g and the search direction p, return the size $\alpha$ of the step $x \leftarrow x + \alpha p$, and update x, g and f accordingly.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSolverFIRE.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSolverFIRE.html 2024-01-30 03:04:45.112820263 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSolverFIRE.html 2024-01-30 03:04:45.116820296 +0000 @@ -196,10 +196,10 @@

    Detailed Description

    template<typename VectorType = Vector<double>>
    -class SolverFIRE< VectorType >

    FIRE (Fast Inertial Relaxation Engine) for minimization of (potentially non-linear) objective function $E(\mathbf x)$, $\mathbf x$ is a vector of $n$ variables ( $n$ is the number of variables of the objective function). Like all other solver classes, it can work on any kind of vector and matrix as long as they satisfy certain requirements (for the requirements on matrices and vectors in order to work with this class, see the documentation of the Solver base class). The type of the solution vector must be passed as template argument, and defaults to Vector<double>.

    +class SolverFIRE< VectorType >

    FIRE (Fast Inertial Relaxation Engine) for minimization of (potentially non-linear) objective function $E(\mathbf x)$, $\mathbf x$ is a vector of $n$ variables ( $n$ is the number of variables of the objective function). Like all other solver classes, it can work on any kind of vector and matrix as long as they satisfy certain requirements (for the requirements on matrices and vectors in order to work with this class, see the documentation of the Solver base class). The type of the solution vector must be passed as template argument, and defaults to Vector<double>.

    FIRE is a damped dynamics method described in Structural Relaxation Made Simple by Bitzek et al. 2006, typically used to find stable equilibrium configurations of atomistic systems in computational material science. Starting from a given initial configuration of the atomistic system, the algorithm relies on inertia to obtain (nearest) configuration with least potential energy.

    Notation:

    -

    Given initial values for $\Delta t$, $\alpha = \alpha_0$, $\epsilon$, $\mathbf x = \mathbf x_0$ and $\mathbf v= \mathbf 0$ along with a given mass matrix $\mathbf M$, FIRE algorithm is as follows,

      +

      Given initial values for $\Delta t$, $\alpha = \alpha_0$, $\epsilon$, $\mathbf x = \mathbf x_0$ and $\mathbf v= \mathbf 0$ along with a given mass matrix $\mathbf M$, FIRE algorithm is as follows,

      1. Calculate $\mathbf g = \nabla E(\mathbf x)$ and check for convergence ( $\mathbf g \cdot \mathbf g < \epsilon^2 $).
      2. -
      3. Update $\mathbf x$ and $V$ using simple (forward) Euler integration step,
        +
      4. Update $\mathbf x$ and $V$ using simple (forward) Euler integration step,
        $\mathbf x = \mathbf x + \Delta t \mathbf v$,
        $\mathbf v = \mathbf v + \Delta t \mathbf M^{-1} \cdot \mathbf g$.
      5. Calculate $p = \mathbf g \cdot \mathbf v$.
      6. Set $\mathbf v = (1-\alpha) \mathbf v
                   + \alpha \frac{|\mathbf v|}{|\mathbf g|} \mathbf g$.
      7. -
      8. If $p<0$ and number of steps since $p$ was last negative is larger than certain value, then increase time step $\Delta t$ and decrease $\alpha$.
      9. +
      10. If $p<0$ and number of steps since $p$ was last negative is larger than certain value, then increase time step $\Delta t$ and decrease $\alpha$.
      11. If $p>0$, then decrease the time step, freeze the system i.e., $\mathbf v = \mathbf 0$ and reset $\alpha = \alpha_0$.
      12. Return to 1.
      /usr/share/doc/packages/dealii/doxygen/deal.II/classSolverFlexibleCG.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSolverFlexibleCG.html 2024-01-30 03:04:45.144820530 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSolverFlexibleCG.html 2024-01-30 03:04:45.144820530 +0000 @@ -218,7 +218,7 @@

      Detailed Description

      template<typename VectorType = Vector<double>>
      class SolverFlexibleCG< VectorType >

      This class implements a flexible variant of the conjugate gradient method, which is based on a different formula to compute $\beta_k$ in the process of constructing a new search direction that is A-orthogonal against the previous one. Rather than using the Fletcher–Reeves update formula with $\beta_k = \frac{\mathbf{r}^T_{k+1} \mathbf{z}_{k+1}}{\mathbf{r}^T_{k}
-\mathbf{z}_{k}}$ for computing the new search direction (here $\mathbf{r}_{k+1}$ is the residual in step $k+1$ and $\mathbf{z}_{k+1} =
+\mathbf{z}_{k}}$ for computing the new search direction (here $\mathbf{r}_{k+1}$ is the residual in step $k+1$ and $\mathbf{z}_{k+1} =
 P^{-1} \mathbf{r}_{k+1}$) as in the classical conjugate gradient algorithm, this class selects the Polak-Ribiere formula $\beta_k =
 \frac{\mathbf{r}^T_{k+1} \left(\mathbf{z}_{k+1} -
 \mathbf{z}_{k}\right)}{\mathbf{r}^T_{k} \mathbf{z}_{k}}$. The additional term $\mathbf{r}^T_{k+1} \mathbf{z}_{k}$ is zero for linear symmetric-positive definite preconditioners due to the construction of the search directions, so the behavior of SolverFlexibleCG is equivalent for those kinds of situations and merely increases costs by requiring an additional stored vector and associated vector operations. While there are no theoretical guarantees for convergence as in the classical CG algorithm, the current class has been documented to be much more robust for variable preconditioners (e.g., involving some iterative inverse that is not fully converged) or a preconditioner with some slight non-symmetry (like weighted Schwarz methods), which results from the local optimality of the search direction with at least as good progress as the locally optimal steepest descent method.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classSolverRichardson.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSolverRichardson.html 2024-01-30 03:04:45.176820796 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSolverRichardson.html 2024-01-30 03:04:45.176820796 +0000 @@ -427,7 +427,7 @@ const PreconditionerType & preconditioner&#href_anchor"memdoc"> -

      Solve $A^Tx=b$ for $x$.

      +

      Solve $A^Tx=b$ for $x$.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSparseBlockVanka.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSparseBlockVanka.html 2024-01-30 03:04:45.204821029 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSparseBlockVanka.html 2024-01-30 03:04:45.204821029 +0000 @@ -435,7 +435,7 @@
    -

    Return the dimension of the codomain (or range) space. Note that the matrix is of dimension $m \times n$.

    +

    Return the dimension of the codomain (or range) space. Note that the matrix is of dimension $m \times n$.

    Note
    This function should only be called if the preconditioner has been initialized.
    @@ -464,7 +464,7 @@
    -

    Return the dimension of the domain space. Note that the matrix is of dimension $m \times n$.

    +

    Return the dimension of the domain space. Note that the matrix is of dimension $m \times n$.

    Note
    This function should only be called if the preconditioner has been initialized.
    /usr/share/doc/packages/dealii/doxygen/deal.II/classSparseDirectUMFPACK.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSparseDirectUMFPACK.html 2024-01-30 03:04:45.236821296 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSparseDirectUMFPACK.html 2024-01-30 03:04:45.236821296 +0000 @@ -531,7 +531,7 @@
    -

    Return the dimension of the codomain (or range) space. Note that the matrix is of dimension $m \times n$.

    +

    Return the dimension of the codomain (or range) space. Note that the matrix is of dimension $m \times n$.

    Definition at line 832 of file sparse_direct.cc.

    @@ -551,7 +551,7 @@
    -

    Return the dimension of the domain space. Note that the matrix is of dimension $m \times n$.

    +

    Return the dimension of the domain space. Note that the matrix is of dimension $m \times n$.

    Definition at line 839 of file sparse_direct.cc.

    @@ -576,7 +576,7 @@

    The solution will be returned in place of the right hand side vector.

    Parameters
    - +
    [in,out]rhs_and_solutionA vector that contains the right hand side $b$ of a linear system $Ax=b$ upon calling this function, and that contains the solution $x$ of the linear system after calling this function.
    [in,out]rhs_and_solutionA vector that contains the right hand side $b$ of a linear system $Ax=b$ upon calling this function, and that contains the solution $x$ of the linear system after calling this function.
    [in]transposeIf set to true, this function solves the linear $A^T x = b$ instead of $Ax=b$.
    /usr/share/doc/packages/dealii/doxygen/deal.II/classSparseILU.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSparseILU.html 2024-01-30 03:04:45.308821896 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSparseILU.html 2024-01-30 03:04:45.308821896 +0000 @@ -873,7 +873,7 @@
    -

    Return the dimension of the codomain (or range) space. It calls the inherited SparseMatrix::m() function. Note that the matrix is of dimension $m \times n$.

    +

    Return the dimension of the codomain (or range) space. It calls the inherited SparseMatrix::m() function. Note that the matrix is of dimension $m \times n$.

    @@ -1901,7 +1901,7 @@
    -

    Symmetrize the matrix by forming the mean value between the existing matrix and its transpose, $A = \frac 12(A+A^T)$.

    +

    Symmetrize the matrix by forming the mean value between the existing matrix and its transpose, $A = \frac 12(A+A^T)$.

    This operation assumes that the underlying sparsity pattern represents a symmetric object. If this is not the case, then the result of this operation will not be a symmetric matrix, since it only explicitly symmetrizes by looping over the lower left triangular part for efficiency reasons; if there are entries in the upper right triangle, then these elements are missed in the symmetrization. Symmetrization of the sparsity pattern can be obtain by SparsityPattern::symmetrize().

    @@ -2153,7 +2153,7 @@
    -

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    +

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    Obviously, the matrix needs to be quadratic for this operation, and for the result to actually be a norm it also needs to be either real symmetric or complex hermitian.

    The underlying template types of both this matrix and the given vector should either both be real or complex-valued, but not mixed, for this function to make sense.

    Note
    If deal.II is configured with threads, this operation will run multi-threaded by splitting the work into smaller chunks (assuming there is enough work to make this worthwhile).
    @@ -2186,7 +2186,7 @@
    -

    Compute the matrix scalar product $\left(u,Mv\right)$.

    +

    Compute the matrix scalar product $\left(u,Mv\right)$.

    Note
    If deal.II is configured with threads, this operation will run multi-threaded by splitting the work into smaller chunks (assuming there is enough work to make this worthwhile).
    @@ -2263,7 +2263,7 @@

    Perform the matrix-matrix multiplication C = A * B, or, if an optional vector argument is given, C = A * diag(V) * B, where diag(V) defines a diagonal matrix with the vector entries.

    This function assumes that the calling matrix A and the argument B have compatible sizes. By default, the output matrix C will be resized appropriately.

    -

    By default, i.e., if the optional argument rebuild_sparsity_pattern is true, the sparsity pattern of the matrix C will be changed to ensure that all entries that result from the product $AB$ can be stored in $C$. This is an expensive operation, and if there is a way to predict the sparsity pattern up front, you should probably build it yourself before calling this function with false as last argument. In this case, the rebuilding of the sparsity pattern is bypassed.

    +

    By default, i.e., if the optional argument rebuild_sparsity_pattern is true, the sparsity pattern of the matrix C will be changed to ensure that all entries that result from the product $AB$ can be stored in $C$. This is an expensive operation, and if there is a way to predict the sparsity pattern up front, you should probably build it yourself before calling this function with false as last argument. In this case, the rebuilding of the sparsity pattern is bypassed.

    When setting rebuild_sparsity_pattern to true (i.e., leaving it at the default value), it is important to realize that the matrix C passed as first argument still has to be initialized with a sparsity pattern (either at the time of creation of the SparseMatrix object, or via the SparseMatrix::reinit() function). This is because we could create a sparsity pattern inside the current function, and then associate C with it, but there would be no way to transfer ownership of this sparsity pattern to anyone once the current function finishes. Consequently, the function requires that C be already associated with a sparsity pattern object, and this object is then reset to fit the product of A and B.

    As a consequence of this, however, it is also important to realize that the sparsity pattern of C is modified and that this would render invalid all other SparseMatrix objects that happen to also use that sparsity pattern object.

    @@ -2334,8 +2334,8 @@
    -

    Return the $l_1$-norm of the matrix, that is $|M|_1=\max_{\mathrm{all\
-columns\ }j}\sum_{\mathrm{all\ rows\ } i} |M_{ij}|$, (max. sum of columns). This is the natural matrix norm that is compatible to the $l_1$-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin- Hoffmann: Numerische Mathematik)

    +

    Return the $l_1$-norm of the matrix, that is $|M|_1=\max_{\mathrm{all\
+columns\ }j}\sum_{\mathrm{all\ rows\ } i} |M_{ij}|$, (max. sum of columns). This is the natural matrix norm that is compatible to the $l_1$-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin- Hoffmann: Numerische Mathematik)

    @@ -2363,8 +2363,8 @@
    -

    Return the $l_\infty$-norm of the matrix, that is $|M|_\infty=\max_{\mathrm{all\ rows\ }i}\sum_{\mathrm{all\ columns\ }j}
-|M_{ij}|$, (max. sum of rows). This is the natural matrix norm that is compatible to the $l_\infty$-norm of vectors, i.e. $|Mv|_\infty \leq
+<p>Return the <picture><source srcset=$l_\infty$-norm of the matrix, that is $|M|_\infty=\max_{\mathrm{all\ rows\ }i}\sum_{\mathrm{all\ columns\ }j}
+|M_{ij}|$, (max. sum of rows). This is the natural matrix norm that is compatible to the $l_\infty$-norm of vectors, i.e. $|Mv|_\infty \leq
 |M|_\infty |v|_\infty$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSparseLUDecomposition.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSparseLUDecomposition.html 2024-01-30 03:04:45.384822530 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSparseLUDecomposition.html 2024-01-30 03:04:45.384822530 +0000 @@ -761,7 +761,7 @@
    -

    Return the dimension of the codomain (or range) space. It calls the inherited SparseMatrix::m() function. Note that the matrix is of dimension $m \times n$.

    +

    Return the dimension of the codomain (or range) space. It calls the inherited SparseMatrix::m() function. Note that the matrix is of dimension $m \times n$.

    @@ -1704,7 +1704,7 @@
    -

    Symmetrize the matrix by forming the mean value between the existing matrix and its transpose, $A = \frac 12(A+A^T)$.

    +

    Symmetrize the matrix by forming the mean value between the existing matrix and its transpose, $A = \frac 12(A+A^T)$.

    This operation assumes that the underlying sparsity pattern represents a symmetric object. If this is not the case, then the result of this operation will not be a symmetric matrix, since it only explicitly symmetrizes by looping over the lower left triangular part for efficiency reasons; if there are entries in the upper right triangle, then these elements are missed in the symmetrization. Symmetrization of the sparsity pattern can be obtain by SparsityPattern::symmetrize().

    @@ -2049,7 +2049,7 @@
    -

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    +

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    Obviously, the matrix needs to be quadratic for this operation, and for the result to actually be a norm it also needs to be either real symmetric or complex hermitian.

    The underlying template types of both this matrix and the given vector should either both be real or complex-valued, but not mixed, for this function to make sense.

    Note
    If deal.II is configured with threads, this operation will run multi-threaded by splitting the work into smaller chunks (assuming there is enough work to make this worthwhile).
    @@ -2082,7 +2082,7 @@
    -

    Compute the matrix scalar product $\left(u,Mv\right)$.

    +

    Compute the matrix scalar product $\left(u,Mv\right)$.

    Note
    If deal.II is configured with threads, this operation will run multi-threaded by splitting the work into smaller chunks (assuming there is enough work to make this worthwhile).
    @@ -2159,7 +2159,7 @@

    Perform the matrix-matrix multiplication C = A * B, or, if an optional vector argument is given, C = A * diag(V) * B, where diag(V) defines a diagonal matrix with the vector entries.

    This function assumes that the calling matrix A and the argument B have compatible sizes. By default, the output matrix C will be resized appropriately.

    -

    By default, i.e., if the optional argument rebuild_sparsity_pattern is true, the sparsity pattern of the matrix C will be changed to ensure that all entries that result from the product $AB$ can be stored in $C$. This is an expensive operation, and if there is a way to predict the sparsity pattern up front, you should probably build it yourself before calling this function with false as last argument. In this case, the rebuilding of the sparsity pattern is bypassed.

    +

    By default, i.e., if the optional argument rebuild_sparsity_pattern is true, the sparsity pattern of the matrix C will be changed to ensure that all entries that result from the product $AB$ can be stored in $C$. This is an expensive operation, and if there is a way to predict the sparsity pattern up front, you should probably build it yourself before calling this function with false as last argument. In this case, the rebuilding of the sparsity pattern is bypassed.

    When setting rebuild_sparsity_pattern to true (i.e., leaving it at the default value), it is important to realize that the matrix C passed as first argument still has to be initialized with a sparsity pattern (either at the time of creation of the SparseMatrix object, or via the SparseMatrix::reinit() function). This is because we could create a sparsity pattern inside the current function, and then associate C with it, but there would be no way to transfer ownership of this sparsity pattern to anyone once the current function finishes. Consequently, the function requires that C be already associated with a sparsity pattern object, and this object is then reset to fit the product of A and B.

    As a consequence of this, however, it is also important to realize that the sparsity pattern of C is modified and that this would render invalid all other SparseMatrix objects that happen to also use that sparsity pattern object.

    @@ -2230,8 +2230,8 @@
    -

    Return the $l_1$-norm of the matrix, that is $|M|_1=\max_{\mathrm{all\
-columns\ }j}\sum_{\mathrm{all\ rows\ } i} |M_{ij}|$, (max. sum of columns). This is the natural matrix norm that is compatible to the $l_1$-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin- Hoffmann: Numerische Mathematik)

    +

    Return the $l_1$-norm of the matrix, that is $|M|_1=\max_{\mathrm{all\
+columns\ }j}\sum_{\mathrm{all\ rows\ } i} |M_{ij}|$, (max. sum of columns). This is the natural matrix norm that is compatible to the $l_1$-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin- Hoffmann: Numerische Mathematik)

    @@ -2259,8 +2259,8 @@
    -

    Return the $l_\infty$-norm of the matrix, that is $|M|_\infty=\max_{\mathrm{all\ rows\ }i}\sum_{\mathrm{all\ columns\ }j}
-|M_{ij}|$, (max. sum of rows). This is the natural matrix norm that is compatible to the $l_\infty$-norm of vectors, i.e. $|Mv|_\infty \leq
+<p>Return the <picture><source srcset=$l_\infty$-norm of the matrix, that is $|M|_\infty=\max_{\mathrm{all\ rows\ }i}\sum_{\mathrm{all\ columns\ }j}
+|M_{ij}|$, (max. sum of rows). This is the natural matrix norm that is compatible to the $l_\infty$-norm of vectors, i.e. $|Mv|_\infty \leq
 |M|_\infty |v|_\infty$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMIC.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMIC.html 2024-01-30 03:04:45.460823162 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMIC.html 2024-01-30 03:04:45.460823162 +0000 @@ -426,8 +426,8 @@
    template<typename number>
    class SparseMIC< number >

    Implementation of the Modified Incomplete Cholesky (MIC(0)) preconditioner for symmetric matrices. This class conforms to the state and usage specification in SparseLUDecomposition.

    The decomposition

    -

    Let a symmetric, positive-definite, sparse matrix $A$ be in the form $A = D
-- L - L^T$, where $D$ is the diagonal part of $A$ and $-L$ is a strictly lower triangular matrix. The MIC(0) decomposition of the matrix $A$ is defined by $B = (X-L)X^{-1}(X-L^T)$, where $X$ is a diagonal matrix defined by the condition $\text{rowsum}(A) = \text{rowsum}(B)$.

    +

    Let a symmetric, positive-definite, sparse matrix $A$ be in the form $A = D
+- L - L^T$, where $D$ is the diagonal part of $A$ and $-L$ is a strictly lower triangular matrix. The MIC(0) decomposition of the matrix $A$ is defined by $B = (X-L)X^{-1}(X-L^T)$, where $X$ is a diagonal matrix defined by the condition $\text{rowsum}(A) = \text{rowsum}(B)$.

    Definition at line 46 of file sparse_mic.h.

    Member Typedef Documentation

    @@ -938,7 +938,7 @@
    -

    Return the dimension of the codomain (or range) space. It calls the inherited SparseMatrix::m() function. Note that the matrix is of dimension $m \times n$.

    +

    Return the dimension of the codomain (or range) space. It calls the inherited SparseMatrix::m() function. Note that the matrix is of dimension $m \times n$.

    @@ -1966,7 +1966,7 @@
    -

    Symmetrize the matrix by forming the mean value between the existing matrix and its transpose, $A = \frac 12(A+A^T)$.

    +

    Symmetrize the matrix by forming the mean value between the existing matrix and its transpose, $A = \frac 12(A+A^T)$.

    This operation assumes that the underlying sparsity pattern represents a symmetric object. If this is not the case, then the result of this operation will not be a symmetric matrix, since it only explicitly symmetrizes by looping over the lower left triangular part for efficiency reasons; if there are entries in the upper right triangle, then these elements are missed in the symmetrization. Symmetrization of the sparsity pattern can be obtain by SparsityPattern::symmetrize().

    @@ -2218,7 +2218,7 @@
    -

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    +

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    Obviously, the matrix needs to be quadratic for this operation, and for the result to actually be a norm it also needs to be either real symmetric or complex hermitian.

    The underlying template types of both this matrix and the given vector should either both be real or complex-valued, but not mixed, for this function to make sense.

    Note
    If deal.II is configured with threads, this operation will run multi-threaded by splitting the work into smaller chunks (assuming there is enough work to make this worthwhile).
    @@ -2251,7 +2251,7 @@
    -

    Compute the matrix scalar product $\left(u,Mv\right)$.

    +

    Compute the matrix scalar product $\left(u,Mv\right)$.

    Note
    If deal.II is configured with threads, this operation will run multi-threaded by splitting the work into smaller chunks (assuming there is enough work to make this worthwhile).
    @@ -2328,7 +2328,7 @@

    Perform the matrix-matrix multiplication C = A * B, or, if an optional vector argument is given, C = A * diag(V) * B, where diag(V) defines a diagonal matrix with the vector entries.

    This function assumes that the calling matrix A and the argument B have compatible sizes. By default, the output matrix C will be resized appropriately.

    -

    By default, i.e., if the optional argument rebuild_sparsity_pattern is true, the sparsity pattern of the matrix C will be changed to ensure that all entries that result from the product $AB$ can be stored in $C$. This is an expensive operation, and if there is a way to predict the sparsity pattern up front, you should probably build it yourself before calling this function with false as last argument. In this case, the rebuilding of the sparsity pattern is bypassed.

    +

    By default, i.e., if the optional argument rebuild_sparsity_pattern is true, the sparsity pattern of the matrix C will be changed to ensure that all entries that result from the product $AB$ can be stored in $C$. This is an expensive operation, and if there is a way to predict the sparsity pattern up front, you should probably build it yourself before calling this function with false as last argument. In this case, the rebuilding of the sparsity pattern is bypassed.

    When setting rebuild_sparsity_pattern to true (i.e., leaving it at the default value), it is important to realize that the matrix C passed as first argument still has to be initialized with a sparsity pattern (either at the time of creation of the SparseMatrix object, or via the SparseMatrix::reinit() function). This is because we could create a sparsity pattern inside the current function, and then associate C with it, but there would be no way to transfer ownership of this sparsity pattern to anyone once the current function finishes. Consequently, the function requires that C be already associated with a sparsity pattern object, and this object is then reset to fit the product of A and B.

    As a consequence of this, however, it is also important to realize that the sparsity pattern of C is modified and that this would render invalid all other SparseMatrix objects that happen to also use that sparsity pattern object.

    @@ -2399,8 +2399,8 @@
    -

    Return the $l_1$-norm of the matrix, that is $|M|_1=\max_{\mathrm{all\
-columns\ }j}\sum_{\mathrm{all\ rows\ } i} |M_{ij}|$, (max. sum of columns). This is the natural matrix norm that is compatible to the $l_1$-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin- Hoffmann: Numerische Mathematik)

    +

    Return the $l_1$-norm of the matrix, that is $|M|_1=\max_{\mathrm{all\
+columns\ }j}\sum_{\mathrm{all\ rows\ } i} |M_{ij}|$, (max. sum of columns). This is the natural matrix norm that is compatible to the $l_1$-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin- Hoffmann: Numerische Mathematik)

    @@ -2428,8 +2428,8 @@
    -

    Return the $l_\infty$-norm of the matrix, that is $|M|_\infty=\max_{\mathrm{all\ rows\ }i}\sum_{\mathrm{all\ columns\ }j}
-|M_{ij}|$, (max. sum of rows). This is the natural matrix norm that is compatible to the $l_\infty$-norm of vectors, i.e. $|Mv|_\infty \leq
+<p>Return the <picture><source srcset=$l_\infty$-norm of the matrix, that is $|M|_\infty=\max_{\mathrm{all\ rows\ }i}\sum_{\mathrm{all\ columns\ }j}
+|M_{ij}|$, (max. sum of rows). This is the natural matrix norm that is compatible to the $l_\infty$-norm of vectors, i.e. $|Mv|_\infty \leq
 |M|_\infty |v|_\infty$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMatrix.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMatrix.html 2024-01-30 03:04:45.532823763 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMatrix.html 2024-01-30 03:04:45.532823763 +0000 @@ -917,7 +917,7 @@
    -

    Return the dimension of the codomain (or range) space. Note that the matrix is of dimension $m \times n$.

    +

    Return the dimension of the codomain (or range) space. Note that the matrix is of dimension $m \times n$.

    @@ -937,7 +937,7 @@
    -

    Return the dimension of the domain space. Note that the matrix is of dimension $m \times n$.

    +

    Return the dimension of the domain space. Note that the matrix is of dimension $m \times n$.

    @@ -1452,7 +1452,7 @@
    -

    Symmetrize the matrix by forming the mean value between the existing matrix and its transpose, $A = \frac 12(A+A^T)$.

    +

    Symmetrize the matrix by forming the mean value between the existing matrix and its transpose, $A = \frac 12(A+A^T)$.

    This operation assumes that the underlying sparsity pattern represents a symmetric object. If this is not the case, then the result of this operation will not be a symmetric matrix, since it only explicitly symmetrizes by looping over the lower left triangular part for efficiency reasons; if there are entries in the upper right triangle, then these elements are missed in the symmetrization. Symmetrization of the sparsity pattern can be obtain by SparsityPattern::symmetrize().

    @@ -1812,7 +1812,7 @@
    -

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    +

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    Obviously, the matrix needs to be quadratic for this operation, and for the result to actually be a norm it also needs to be either real symmetric or complex hermitian.

    The underlying template types of both this matrix and the given vector should either both be real or complex-valued, but not mixed, for this function to make sense.

    Note
    If deal.II is configured with threads, this operation will run multi-threaded by splitting the work into smaller chunks (assuming there is enough work to make this worthwhile).
    @@ -1838,7 +1838,7 @@ const Vector< somenumber > & v&#href_anchor"memdoc"> -

    Compute the matrix scalar product $\left(u,Mv\right)$.

    +

    Compute the matrix scalar product $\left(u,Mv\right)$.

    Note
    If deal.II is configured with threads, this operation will run multi-threaded by splitting the work into smaller chunks (assuming there is enough work to make this worthwhile).
    @@ -1901,7 +1901,7 @@ const bool rebuild_sparsity_pattern = true&#href_anchor"memdoc">

    Perform the matrix-matrix multiplication C = A * B, or, if an optional vector argument is given, C = A * diag(V) * B, where diag(V) defines a diagonal matrix with the vector entries.

    This function assumes that the calling matrix A and the argument B have compatible sizes. By default, the output matrix C will be resized appropriately.

    -

    By default, i.e., if the optional argument rebuild_sparsity_pattern is true, the sparsity pattern of the matrix C will be changed to ensure that all entries that result from the product $AB$ can be stored in $C$. This is an expensive operation, and if there is a way to predict the sparsity pattern up front, you should probably build it yourself before calling this function with false as last argument. In this case, the rebuilding of the sparsity pattern is bypassed.

    +

    By default, i.e., if the optional argument rebuild_sparsity_pattern is true, the sparsity pattern of the matrix C will be changed to ensure that all entries that result from the product $AB$ can be stored in $C$. This is an expensive operation, and if there is a way to predict the sparsity pattern up front, you should probably build it yourself before calling this function with false as last argument. In this case, the rebuilding of the sparsity pattern is bypassed.

    When setting rebuild_sparsity_pattern to true (i.e., leaving it at the default value), it is important to realize that the matrix C passed as first argument still has to be initialized with a sparsity pattern (either at the time of creation of the SparseMatrix object, or via the SparseMatrix::reinit() function). This is because we could create a sparsity pattern inside the current function, and then associate C with it, but there would be no way to transfer ownership of this sparsity pattern to anyone once the current function finishes. Consequently, the function requires that C be already associated with a sparsity pattern object, and this object is then reset to fit the product of A and B.

    As a consequence of this, however, it is also important to realize that the sparsity pattern of C is modified and that this would render invalid all other SparseMatrix objects that happen to also use that sparsity pattern object.

    @@ -1957,8 +1957,8 @@
    -

    Return the $l_1$-norm of the matrix, that is $|M|_1=\max_{\mathrm{all\
-columns\ }j}\sum_{\mathrm{all\ rows\ } i} |M_{ij}|$, (max. sum of columns). This is the natural matrix norm that is compatible to the $l_1$-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin- Hoffmann: Numerische Mathematik)

    +

    Return the $l_1$-norm of the matrix, that is $|M|_1=\max_{\mathrm{all\
+columns\ }j}\sum_{\mathrm{all\ rows\ } i} |M_{ij}|$, (max. sum of columns). This is the natural matrix norm that is compatible to the $l_1$-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin- Hoffmann: Numerische Mathematik)

    @@ -1978,8 +1978,8 @@
    -

    Return the $l_\infty$-norm of the matrix, that is $|M|_\infty=\max_{\mathrm{all\ rows\ }i}\sum_{\mathrm{all\ columns\ }j}
-|M_{ij}|$, (max. sum of rows). This is the natural matrix norm that is compatible to the $l_\infty$-norm of vectors, i.e. $|Mv|_\infty \leq
+<p>Return the <picture><source srcset=$l_\infty$-norm of the matrix, that is $|M|_\infty=\max_{\mathrm{all\ rows\ }i}\sum_{\mathrm{all\ columns\ }j}
+|M_{ij}|$, (max. sum of rows). This is the natural matrix norm that is compatible to the $l_\infty$-norm of vectors, i.e. $|Mv|_\infty \leq
 |M|_\infty |v|_\infty$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMatrixEZ.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMatrixEZ.html 2024-01-30 03:04:45.584824195 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMatrixEZ.html 2024-01-30 03:04:45.584824195 +0000 @@ -650,7 +650,7 @@
    -

    Return the dimension of the codomain (or range) space. Note that the matrix is of dimension $m \times n$.

    +

    Return the dimension of the codomain (or range) space. Note that the matrix is of dimension $m \times n$.

    Definition at line 1094 of file sparse_matrix_ez.h.

    @@ -680,7 +680,7 @@
    -

    Return the dimension of the domain space. Note that the matrix is of dimension $m \times n$.

    +

    Return the dimension of the domain space. Note that the matrix is of dimension $m \times n$.

    Definition at line 1102 of file sparse_matrix_ez.h.

    @@ -1206,7 +1206,7 @@ const Vector< somenumber > & src&#href_anchor"memdoc"> -

    Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

    +

    Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

    @@ -1229,7 +1229,7 @@ const Vector< somenumber > & src&#href_anchor"memdoc"> -

    Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult but takes the transposed matrix.

    +

    Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult but takes the transposed matrix.

    @@ -1252,7 +1252,7 @@ const Vector< somenumber > & src&#href_anchor"memdoc"> -

    Adding Matrix-vector multiplication. Add $M*src$ on $dst$ with $M$ being this matrix.

    +

    Adding Matrix-vector multiplication. Add $M*src$ on $dst$ with $M$ being this matrix.

    @@ -1275,7 +1275,7 @@ const Vector< somenumber > & src&#href_anchor"memdoc"> -

    Adding Matrix-vector multiplication. Add $M^T*src$ to $dst$ with $M$ being this matrix. This function does the same as vmult_add but takes the transposed matrix.

    +

    Adding Matrix-vector multiplication. Add $M^T*src$ to $dst$ with $M$ being this matrix. This function does the same as vmult_add but takes the transposed matrix.

    @@ -1384,7 +1384,7 @@ const number om = 1.&#href_anchor"memdoc"> -

    Apply SOR preconditioning matrix to src. The result of this method is $dst = (om D - L)^{-1} src$.

    +

    Apply SOR preconditioning matrix to src. The result of this method is $dst = (om D - L)^{-1} src$.

    @@ -1412,7 +1412,7 @@ const number om = 1.&#href_anchor"memdoc"> -

    Apply transpose SOR preconditioning matrix to src. The result of this method is $dst = (om D - U)^{-1} src$.

    +

    Apply transpose SOR preconditioning matrix to src. The result of this method is $dst = (om D - U)^{-1} src$.

    @@ -1447,7 +1447,7 @@
    -

    Add the matrix A conjugated by B, that is, $B A B^T$ to this object. If the parameter transpose is true, compute $B^T A B$.

    +

    Add the matrix A conjugated by B, that is, $B A B^T$ to this object. If the parameter transpose is true, compute $B^T A B$.

    This function requires that B has a const_iterator traversing all matrix entries and that A has a function el(i,j) for access to a specific entry.

    Definition at line 1461 of file sparse_matrix_ez.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMatrixIterators_1_1Iterator.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMatrixIterators_1_1Iterator.html 2024-01-30 03:04:45.608824396 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMatrixIterators_1_1Iterator.html 2024-01-30 03:04:45.608824396 +0000 @@ -143,7 +143,7 @@

    The typical use for these iterators is to iterate over the elements of a sparse matrix or over the elements of individual rows. Note that there is no guarantee that the elements of a row are actually traversed in an order in which columns monotonically increase. See the documentation of the SparsityPattern class for more information.

    The first template argument denotes the underlying numeric type, the second the constness of the matrix.

    Since there is a specialization of this class for Constness=false, this class is for iterators to constant matrices.

    -
    Note
    This class operates directly on the internal data structures of the SparsityPattern and SparseMatrix classes. As a consequence, some operations are cheap and some are not. In particular, it is cheap to access the column index and the value of an entry pointed to. On the other hand, it is expensive to access the row index (this requires $O(\log(N))$ operations for a matrix with $N$ row). As a consequence, when you design algorithms that use these iterators, it is common practice to not loop over all elements of a sparse matrix at once, but to have an outer loop over all rows and within this loop iterate over the elements of this row. This way, you only ever need to dereference the iterator to obtain the column indices and values whereas the (expensive) lookup of the row index can be avoided by using the loop index instead.
    +
    Note
    This class operates directly on the internal data structures of the SparsityPattern and SparseMatrix classes. As a consequence, some operations are cheap and some are not. In particular, it is cheap to access the column index and the value of an entry pointed to. On the other hand, it is expensive to access the row index (this requires $O(\log(N))$ operations for a matrix with $N$ row). As a consequence, when you design algorithms that use these iterators, it is common practice to not loop over all elements of a sparse matrix at once, but to have an outer loop over all rows and within this loop iterate over the elements of this row. This way, you only ever need to dereference the iterator to obtain the column indices and values whereas the (expensive) lookup of the row index can be avoided by using the loop index instead.

    Definition at line 347 of file sparse_matrix.h.

    Member Typedef Documentation

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSparseVanka.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSparseVanka.html 2024-01-30 03:04:45.628824563 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSparseVanka.html 2024-01-30 03:04:45.628824563 +0000 @@ -373,7 +373,7 @@
    -

    Return the dimension of the codomain (or range) space. Note that the matrix is of dimension $m \times n$.

    +

    Return the dimension of the codomain (or range) space. Note that the matrix is of dimension $m \times n$.

    Note
    This function should only be called if the preconditioner has been initialized.
    @@ -394,7 +394,7 @@
    -

    Return the dimension of the domain space. Note that the matrix is of dimension $m \times n$.

    +

    Return the dimension of the domain space. Note that the matrix is of dimension $m \times n$.

    Note
    This function should only be called if the preconditioner has been initialized.
    /usr/share/doc/packages/dealii/doxygen/deal.II/classSparsityPattern.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSparsityPattern.html 2024-01-30 03:04:45.676824963 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSparsityPattern.html 2024-01-30 03:04:45.676824963 +0000 @@ -1161,7 +1161,7 @@
    -

    Compute the bandwidth of the matrix represented by this structure. The bandwidth is the maximum of $|i-j|$ for which the index pair $(i,j)$ represents a nonzero entry of the matrix. Consequently, the maximum bandwidth a $n\times m$ matrix can have is $\max\{n-1,m-1\}$, a diagonal matrix has bandwidth 0, and there are at most $2*q+1$ entries per row if the bandwidth is $q$. The returned quantity is sometimes called "half +

    Compute the bandwidth of the matrix represented by this structure. The bandwidth is the maximum of $|i-j|$ for which the index pair $(i,j)$ represents a nonzero entry of the matrix. Consequently, the maximum bandwidth a $n\times m$ matrix can have is $\max\{n-1,m-1\}$, a diagonal matrix has bandwidth 0, and there are at most $2*q+1$ entries per row if the bandwidth is $q$. The returned quantity is sometimes called "half bandwidth" in the literature.

    Definition at line 674 of file sparsity_pattern.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSparsityPatternIterators_1_1Iterator.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSparsityPatternIterators_1_1Iterator.html 2024-01-30 03:04:45.704825195 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSparsityPatternIterators_1_1Iterator.html 2024-01-30 03:04:45.704825195 +0000 @@ -160,7 +160,7 @@

    Detailed Description

    An iterator class for walking over the elements of a sparsity pattern.

    The typical use for these iterators is to iterate over the elements of a sparsity pattern (or, since they also serve as the basis for iterating over the elements of an associated matrix, over the elements of a sparse matrix), or over the elements of individual rows. There is no guarantee that the elements of a row are actually traversed in an order in which column numbers monotonically increase. See the documentation of the SparsityPattern class for more information.

    -
    Note
    This class operates directly on the internal data structures of the SparsityPattern class. As a consequence, some operations are cheap and some are not. In particular, it is cheap to access the column index of the sparsity pattern entry pointed to. On the other hand, it is expensive to access the row index (this requires $O(\log(N))$ operations for a matrix with $N$ row). As a consequence, when you design algorithms that use these iterators, it is common practice to not loop over all elements of a sparsity pattern at once, but to have an outer loop over all rows and within this loop iterate over the elements of this row. This way, you only ever need to dereference the iterator to obtain the column indices whereas the (expensive) lookup of the row index can be avoided by using the loop index instead.
    +
    Note
    This class operates directly on the internal data structures of the SparsityPattern class. As a consequence, some operations are cheap and some are not. In particular, it is cheap to access the column index of the sparsity pattern entry pointed to. On the other hand, it is expensive to access the row index (this requires $O(\log(N))$ operations for a matrix with $N$ row). As a consequence, when you design algorithms that use these iterators, it is common practice to not loop over all elements of a sparsity pattern at once, but to have an outer loop over all rows and within this loop iterate over the elements of this row. This way, you only ever need to dereference the iterator to obtain the column indices whereas the (expensive) lookup of the row index can be avoided by using the loop index instead.

    Definition at line 280 of file sparsity_pattern.h.

    Member Typedef Documentation

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSphericalManifold.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSphericalManifold.html 2024-01-30 03:04:45.752825595 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSphericalManifold.html 2024-01-30 03:04:45.752825595 +0000 @@ -219,20 +219,20 @@ class SphericalManifold< dim, spacedim >

    Manifold description for a spherical space coordinate system.

    You can use this Manifold object to describe any sphere, circle, hypersphere or hyperdisc in two or three dimensions. This manifold can be used as a co-dimension one manifold descriptor of a spherical surface embedded in a higher dimensional space, or as a co-dimension zero manifold descriptor for a body with positive volume, provided that the center of the spherical space is excluded from the domain. An example for the use of this function would be in the description of a hyper-shell or hyper-ball geometry, for example after creating a coarse mesh using GridGenerator::hyper_ball(). (However, it is worth mentioning that generating a good mesh for a disk or ball is complicated and requires addition steps. See the "Possibilities for extensions" section of step-6 for an extensive discussion of how one would construct such meshes and what one needs to do for it.)

    The two template arguments match the meaning of the two template arguments in Triangulation<dim, spacedim>, however this Manifold can be used to describe both thin and thick objects, and the behavior is identical when dim <= spacedim, i.e., the functionality of SphericalManifold<2,3> is identical to SphericalManifold<3,3>.

    -

    While PolarManifold reflects the usual notion of polar coordinates, it may not be suitable for domains that contain either the north or south poles. Consider for instance the pair of points $x_1=(1,\pi/3,0)$ and $x_2=(1,\pi/3,\pi)$ in polar coordinates (lying on the surface of a sphere with radius one, on a parallel at height $\pi/3$). In this case connecting the points with a straight line in polar coordinates would take the long road around the globe, without passing through the north pole.

    +

    While PolarManifold reflects the usual notion of polar coordinates, it may not be suitable for domains that contain either the north or south poles. Consider for instance the pair of points $x_1=(1,\pi/3,0)$ and $x_2=(1,\pi/3,\pi)$ in polar coordinates (lying on the surface of a sphere with radius one, on a parallel at height $\pi/3$). In this case connecting the points with a straight line in polar coordinates would take the long road around the globe, without passing through the north pole.

    These two points would be connected (using a PolarManifold) by the curve

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   s: [0,1]  & \rightarrow &  \mathbb S^3 \\
           t & \mapsto     &  (1,\pi/3,0) + (0,0,t\pi)
-\end{align*} +\end{align*}" src="form_1449.png"/>

    This curve is not a geodesic on the sphere, and it is not how we would connect those two points. A better curve, would be the one passing through the North pole:

    -\[
+<picture><source srcset=\[
  s(t) = x_1 \cos(\alpha(t)) + \kappa \times x_1 \sin(\alpha(t)) +
  \kappa ( \kappa \cdot x_1) (1-\cos(\alpha(t))).
-\] +\]" src="form_1450.png"/>

    -

    where $\kappa = \frac{x_1 \times x_2}{\Vert x_1 \times x_2 \Vert}$ and $\alpha(t) = t \cdot \arccos(x_1 \cdot x_2)$ for $t\in[0,1]$. Indeed, this is a geodesic, and it is the natural choice when connecting points on the surface of the sphere. In the examples above, the PolarManifold class implements the first way of connecting two points on the surface of a sphere, while SphericalManifold implements the second way, i.e., this Manifold connects points using geodesics. If more than two points are involved through a SphericalManifold::get_new_points() call, a so-called spherical average is used where the final point minimizes the weighted distance to all other points via geodesics.

    +

    where $\kappa = \frac{x_1 \times x_2}{\Vert x_1 \times x_2 \Vert}$ and $\alpha(t) = t \cdot \arccos(x_1 \cdot x_2)$ for $t\in[0,1]$. Indeed, this is a geodesic, and it is the natural choice when connecting points on the surface of the sphere. In the examples above, the PolarManifold class implements the first way of connecting two points on the surface of a sphere, while SphericalManifold implements the second way, i.e., this Manifold connects points using geodesics. If more than two points are involved through a SphericalManifold::get_new_points() call, a so-called spherical average is used where the final point minimizes the weighted distance to all other points via geodesics.

    In particular, this class implements a Manifold that joins any two points in space by first projecting them onto the surface of a sphere with unit radius, then connecting them with a geodesic, and finally rescaling the final radius so that the resulting one is the weighted average of the starting radii. This Manifold is identical to PolarManifold in dimension two, while for dimension three it returns points that are more uniformly distributed on the sphere, and it is invariant with respect to rotations of the coordinate system, therefore avoiding the problems that PolarManifold has at the poles. Notice, in particular, that computing tangent vectors at the poles with a PolarManifold is not well defined, while it is perfectly fine with this class.

    For mathematical reasons, it is impossible to construct a unique map of a sphere using only geodesic curves, and therefore, using this class with MappingManifold is discouraged. If you use this Manifold to describe the geometry of a sphere, you should use MappingQ as the underlying mapping, and not MappingManifold.

    This Manifold can be used only on geometries where a ball with finite radius is removed from the center. Indeed, the center is a singular point for this manifold, and if you try to connect two points across the center, they would travel on spherical coordinates, avoiding the center.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSymmetricTensor.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSymmetricTensor.html 2024-01-30 03:04:45.828826228 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSymmetricTensor.html 2024-01-30 03:04:45.836826295 +0000 @@ -301,7 +301,7 @@ std::ostream &&#href_anchor"memTemplItemRight" valign="bottom">operator<< (std::ostream &out, const SymmetricTensor< 4, dim, Number > &t) &#href_anchor"details" id="details">

    Detailed Description

    template<int rank_, int dim, typename Number>
    -class SymmetricTensor< rank_, dim, Number >

    Provide a class that stores symmetric tensors of rank 2,4,... efficiently, i.e. only store those off-diagonal elements of the full tensor that are not redundant. For example, for symmetric $2\times 2$ tensors, this would be the elements 11, 22, and 12, while the element 21 is equal to the 12 element. Within this documentation, second order symmetric tensors are denoted as bold-faced upper-case Latin letters such as $\mathbf A, \mathbf B, \dots$ or bold-faced Greek letters such as $\boldsymbol{\varepsilon}$, $\boldsymbol{\sigma}$. The Cartesian coordinates of a second-order tensor such as $\mathbf A$ are represented as $A_{ij}$ where $i,j$ are indices ranging from 0 to dim-1.

    +class SymmetricTensor< rank_, dim, Number >

    Provide a class that stores symmetric tensors of rank 2,4,... efficiently, i.e. only store those off-diagonal elements of the full tensor that are not redundant. For example, for symmetric $2\times 2$ tensors, this would be the elements 11, 22, and 12, while the element 21 is equal to the 12 element. Within this documentation, second order symmetric tensors are denoted as bold-faced upper-case Latin letters such as $\mathbf A, \mathbf B, \dots$ or bold-faced Greek letters such as $\boldsymbol{\varepsilon}$, $\boldsymbol{\sigma}$. The Cartesian coordinates of a second-order tensor such as $\mathbf A$ are represented as $A_{ij}$ where $i,j$ are indices ranging from 0 to dim-1.

    Using this class for symmetric tensors of rank 2 has advantages over matrices in many cases since the dimension is known to the compiler as well as the location of the data. It is therefore possible to produce far more efficient code than for matrices with runtime-dependent dimension. It is also more efficient than using the more general Tensor class, since fewer elements are stored, and the class automatically makes sure that the tensor represents a symmetric object.

    For tensors of higher rank, the savings in storage are even higher. For example for the $3 \times 3 \times 3 \times 3$ tensors of rank 4, only 36 instead of the full 81 entries have to be stored. These rank 4 tensors are denoted by blackboard-style upper-case Latin letters such as $\mathbb A$ with components $\mathcal{A}_{ijkl}$.

    While the definition of a symmetric rank-2 tensor is obvious, tensors of rank 4 are considered symmetric if they are operators mapping symmetric rank-2 tensors onto symmetric rank-2 tensors. This so-called minor symmetry of the rank 4 tensor requires that for every set of four indices $i, j, k, l$, the identity $\mathcal{C}_{ijkl} = \mathcal{C}_{jikl} =
@@ -630,7 +630,7 @@
   </tr>
 </table>
 </div><div class= -

    This operator assigns a scalar to a tensor. To avoid confusion with what exactly it means to assign a scalar value to a tensor, zero is the only value allowed for d, allowing the intuitive notation $\mathbf A = 0$ to reset all elements of the tensor to zero.

    +

    This operator assigns a scalar to a tensor. To avoid confusion with what exactly it means to assign a scalar value to a tensor, zero is the only value allowed for d, allowing the intuitive notation $\mathbf A = 0$ to reset all elements of the tensor to zero.

    @@ -892,8 +892,8 @@
    -

    Double contraction product between the present symmetric tensor and a tensor of rank 2. For example, if the present object is the symmetric rank-2 tensor $\mathbf{A}$ and it is multiplied by another symmetric rank-2 tensor $\mathbf{B}$, then the result is the scalar-product double contraction $\mathbf A : \mathbf B = \sum_{i,j} A_{ij} B_{ij}$. In this case, the return value evaluates to a single scalar. While it is possible to define other scalar products (and associated induced norms), this one seems to be the most appropriate one.

    -

    If the present object is a rank-4 tensor such as $\mathbb A$, then the result is a rank-2 tensor $\mathbf C = \mathbb A : \mathbf B$, i.e., the operation contracts over the last two indices of the present object and the indices of the argument, and the result is a tensor of rank 2 ( $C_{ij} = \sum_{k,l} \mathcal{A}_{ijkl} B_{kl}$).

    +

    Double contraction product between the present symmetric tensor and a tensor of rank 2. For example, if the present object is the symmetric rank-2 tensor $\mathbf{A}$ and it is multiplied by another symmetric rank-2 tensor $\mathbf{B}$, then the result is the scalar-product double contraction $\mathbf A : \mathbf B = \sum_{i,j} A_{ij} B_{ij}$. In this case, the return value evaluates to a single scalar. While it is possible to define other scalar products (and associated induced norms), this one seems to be the most appropriate one.

    +

    If the present object is a rank-4 tensor such as $\mathbb A$, then the result is a rank-2 tensor $\mathbf C = \mathbb A : \mathbf B$, i.e., the operation contracts over the last two indices of the present object and the indices of the argument, and the result is a tensor of rank 2 ( $C_{ij} = \sum_{k,l} \mathcal{A}_{ijkl} B_{kl}$).

    Note that the multiplication operator for symmetric tensors is defined to be a double contraction over two indices, while it is defined as a single contraction over only one index for regular Tensor objects. For symmetric tensors it therefore acts in a way that is commonly denoted by a "colon multiplication" in the mathematical literature (the two dots of the colon suggesting that it is a contraction over two indices), which corresponds to a scalar product between tensors.

    It is worth pointing out that this definition of operator* between symmetric tensors is different to how the (in general non-symmetric) Tensor class defines operator*, namely as the single-contraction product over the last index of the first operand and the first index of the second operand. For the double contraction of Tensor objects, you will need to use the double_contract() function.

    To maintain at least a modicum of resemblance between the interfaces of Tensor and SymmetricTensor, there are also global functions double_contract() for symmetric tensors that then do the same work as this operator. However, rather than returning the result as a return value, they write it into the first argument to the function in the same way as the corresponding functions for the Tensor class do things.

    @@ -1237,7 +1237,7 @@
    -

    The opposite of the previous function: given an index $i$ in the unrolled form of the tensor, return what set of indices $(k,l)$ (for rank-2 tensors) or $(k,l,m,n)$ (for rank-4 tensors) corresponds to it.

    +

    The opposite of the previous function: given an index $i$ in the unrolled form of the tensor, return what set of indices $(k,l)$ (for rank-2 tensors) or $(k,l,m,n)$ (for rank-4 tensors) corresponds to it.

    @@ -1893,7 +1893,7 @@ \left[ (\text{tr} \mathbf A)^2 - \text{tr} (\mathbf{A}^2) \right]$" src="form_809.png"/>.

    For the kind of arguments to this function, i.e., a symmetric rank-2 tensor of size 2, the result is (counting indices starting at one) $I_2(\mathbf A) = II(\mathbf A) = \frac 12
   \left[ (A_{11} + A_{22})^2 - (A_{11}^2+2 A_{12}^2+ A_{22}^2) \right]
-  = A_{11} A_{22} - A_{12}^2$. As expected, for the $2\times 2$ symmetric tensors this function handles, this equals the determinant of the tensor. (This is so because for $2\times 2$ symmetric tensors, there really are only two invariants, so the second and third invariant are the same; the determinant is the third invariant.)

    + = A_{11} A_{22} - A_{12}^2$" src="form_810.png"/>. As expected, for the $2\times 2$ symmetric tensors this function handles, this equals the determinant of the tensor. (This is so because for $2\times 2$ symmetric tensors, there really are only two invariants, so the second and third invariant are the same; the determinant is the third invariant.)

    Definition at line 2917 of file symmetric_tensor.h.

    @@ -1982,8 +1982,8 @@
    -

    Return the eigenvalues of a symmetric $2\times 2$ tensor. The array of eigenvalues is sorted in descending order.

    -

    For $2\times 2$ tensors, the eigenvalues of tensor $\mathbf T$ are the roots of the characteristic polynomial $0 = \lambda^2
+<p>Return the eigenvalues of a symmetric <picture><source srcset=$2\times 2$ tensor. The array of eigenvalues is sorted in descending order.

    +

    For $2\times 2$ tensors, the eigenvalues of tensor $\mathbf T$ are the roots of the characteristic polynomial $0 = \lambda^2
 - \lambda\;\text{tr}\mathbf{T} + \det \mathbf{T}$ as given by $\lambda_1, \lambda_2 = \frac{1}{2} \left[ \text{tr} \mathbf{T} \pm
 \sqrt{(\text{tr} \mathbf{T})^2 - 4 \det \mathbf{T}} \right]$.

    Warning
    The algorithm employed here determines the eigenvalues by computing the roots of the characteristic polynomial. In the case that there exists a common root (the eigenvalues are equal), the computation is subject to round-off errors of order $\sqrt{\epsilon}$. As an alternative, the eigenvectors() function provides a more robust, but costly, method to compute the eigenvalues of a symmetric tensor.
    @@ -2494,7 +2494,7 @@
    -

    Compute the scalar product $\mathbf A: \mathbf B=\sum_{i,j} A_{ij}B_{ij}$ between two tensors $\mathbf A, \mathbf B$ of rank 2. In the current case where both arguments are symmetric tensors, this is equivalent to calling the expression A*B which uses SymmetricTensor::operator*().

    +

    Compute the scalar product $\mathbf A: \mathbf B=\sum_{i,j} A_{ij}B_{ij}$ between two tensors $\mathbf A, \mathbf B$ of rank 2. In the current case where both arguments are symmetric tensors, this is equivalent to calling the expression A*B which uses SymmetricTensor::operator*().

    Definition at line 3737 of file symmetric_tensor.h.

    @@ -2524,7 +2524,7 @@
    -

    Compute the scalar product $\mathbf A: \mathbf B=\sum_{i,j} A_{ij}B_{ij}$ between two tensors $\mathbf A, \mathbf B$ of rank 2. We don't use operator* for this operation since the product between two tensors is usually assumed to be the contraction over the last index of the first tensor and the first index of the second tensor. For example, if B is a Tensor, calling A*B (instead of scalar_product(A,B)) provides $(\mathbf A \cdot\mathbf B)_{ij}=\sum_k A_{ik}B_{kj}$.

    +

    Compute the scalar product $\mathbf A: \mathbf B=\sum_{i,j} A_{ij}B_{ij}$ between two tensors $\mathbf A, \mathbf B$ of rank 2. We don't use operator* for this operation since the product between two tensors is usually assumed to be the contraction over the last index of the first tensor and the first index of the second tensor. For example, if B is a Tensor, calling A*B (instead of scalar_product(A,B)) provides $(\mathbf A \cdot\mathbf B)_{ij}=\sum_k A_{ik}B_{kj}$.

    Definition at line 3759 of file symmetric_tensor.h.

    @@ -2554,7 +2554,7 @@
    -

    Compute the scalar product $\mathbf A:\mathbf B=\sum_{i,j} A_{ij}B_{ij}$ between two tensors $\mathbf A, \mathbf B$ of rank 2. We don't use operator* for this operation since the product between two tensors is usually assumed to be the contraction over the last index of the first tensor and the first index of the second tensor. For example, if A is a Tensor, calling A*B (instead of scalar_product(A,B)) provides $(\mathbf A \cdot\mathbf B)_{ij}=\sum_k A_{ik}B_{kj}$.

    +

    Compute the scalar product $\mathbf A:\mathbf B=\sum_{i,j} A_{ij}B_{ij}$ between two tensors $\mathbf A, \mathbf B$ of rank 2. We don't use operator* for this operation since the product between two tensors is usually assumed to be the contraction over the last index of the first tensor and the first index of the second tensor. For example, if A is a Tensor, calling A*B (instead of scalar_product(A,B)) provides $(\mathbf A \cdot\mathbf B)_{ij}=\sum_k A_{ik}B_{kj}$.

    Definition at line 3786 of file symmetric_tensor.h.

    @@ -2860,13 +2860,13 @@
    -

    The dot product (single contraction) for tensors: Return a tensor of rank $(\text{rank}_1 + \text{rank}_2 - 2)$ that is the contraction of the last index of a tensor src1 of rank rank_1 with the first index of a tensor src2 of rank rank_2:

    -\[
+<p>The dot product (single contraction) for tensors: Return a tensor of rank <picture><source srcset=$(\text{rank}_1 + \text{rank}_2 - 2)$ that is the contraction of the last index of a tensor src1 of rank rank_1 with the first index of a tensor src2 of rank rank_2:

    +\[
   \text{result}_{i_1,\ldots,i_{r1},j_1,\ldots,j_{r2}}
   = \sum_{k}
     \text{left}_{i_1,\ldots,i_{r1}, k}
     \text{right}_{k, j_1,\ldots,j_{r2}}
-\] +\]" src="form_827.png"/>

    Note
    As one operand is a Tensor, the multiplication operator only performs a contraction over a single pair of indices. This is in contrast to the multiplication operator for SymmetricTensor, which does the double contraction.
    @@ -2898,13 +2898,13 @@
    -

    The dot product (single contraction) for tensors: Return a tensor of rank $(\text{rank}_1 + \text{rank}_2 - 2)$ that is the contraction of the last index of a tensor src1 of rank rank_1 with the first index of a tensor src2 of rank rank_2:

    -\[
+<p>The dot product (single contraction) for tensors: Return a tensor of rank <picture><source srcset=$(\text{rank}_1 + \text{rank}_2 - 2)$ that is the contraction of the last index of a tensor src1 of rank rank_1 with the first index of a tensor src2 of rank rank_2:

    +\[
   \text{result}_{i_1,\ldots,i_{r1},j_1,\ldots,j_{r2}}
   = \sum_{k}
     \text{left}_{i_1,\ldots,i_{r1}, k}
     \text{right}_{k, j_1,\ldots,j_{r2}}
-\] +\]" src="form_827.png"/>

    Note
    As one operand is a Tensor, the multiplication operator only performs a contraction over a single pair of indices. This is in contrast to the multiplication operator for SymmetricTensor, which does the double contraction.
    @@ -3051,7 +3051,7 @@ Initial value:
    =
    n_independent_components
    -

    An integer denoting the number of independent components that fully describe a symmetric tensor. In $d$ space dimensions, this number equals $\frac 12 (d^2+d)$ for symmetric tensors of rank 2.

    +

    An integer denoting the number of independent components that fully describe a symmetric tensor. In $d$ space dimensions, this number equals $\frac 12 (d^2+d)$ for symmetric tensors of rank 2.

    Definition at line 743 of file symmetric_tensor.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classTableBase.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classTableBase.html 2024-01-30 03:04:45.876826628 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classTableBase.html 2024-01-30 03:04:45.876826628 +0000 @@ -232,7 +232,7 @@

    In some way, this class is similar to the Tensor class, in that it templatizes on the number of dimensions. However, there are two major differences. The first is that the Tensor class stores only numeric values (as doubles), while the Table class stores arbitrary objects. The second is that the Tensor class has fixed sizes in each dimension, also given as a template argument, while this class can handle arbitrary and different sizes in each dimension.

    This has two consequences. First, since the size is not known at compile time, it has to do explicit memory allocation. Second, the layout of individual elements is not known at compile time, so access is slower than for the Tensor class where the number of elements are their location is known at compile time and the compiler can optimize with this knowledge (for example when unrolling loops). On the other hand, this class is of course more flexible, for example when you want a two-dimensional table with the number of rows equal to the number of degrees of freedom on a cell, and the number of columns equal to the number of quadrature points. Both numbers may only be known at run-time, so a flexible table is needed here. Furthermore, you may want to store, say, the gradients of shape functions, so the data type is not a single scalar value, but a tensor itself.

    Dealing with large data sets

    -

    The Table classes (derived from this class) are frequently used to store large data tables. A modest example is given in step-53 where we store a $380 \times 220$ table of geographic elevation data for a region of Africa, and this data requires about 670 kB if memory; however, tables that store three- or more-dimensional data (say, information about the density, pressure, and temperature in the earth interior on a regular grid of (latitude, longitude, depth) points) can easily run into hundreds of megabytes or more. These tables are then often provided to classes such as InterpolatedTensorProductGridData or InterpolatedUniformGridData.

    +

    The Table classes (derived from this class) are frequently used to store large data tables. A modest example is given in step-53 where we store a $380 \times 220$ table of geographic elevation data for a region of Africa, and this data requires about 670 kB if memory; however, tables that store three- or more-dimensional data (say, information about the density, pressure, and temperature in the earth interior on a regular grid of (latitude, longitude, depth) points) can easily run into hundreds of megabytes or more. These tables are then often provided to classes such as InterpolatedTensorProductGridData or InterpolatedUniformGridData.

    If you need to load such tables on single-processor (or multi-threaded) jobs, then there is nothing you can do about the size of these tables: The table just has to fit into memory. But, if your program is parallelized via MPI, then a typical first implementation would create a table object on every process and fill it on every MPI process by reading the data from a file. This is inefficient from two perspectives:

    Here, 'clockwise' is relative to the vector defined by the cross product of two lines in their standard orientation (which, e.g., points into the hexahedron for face 0 but out of the hexahedron for face 1).

    -

    For triangles, to enable indexing from the combined orientation, we do not consider flip-rotate or flip-orient-rotate as those cases are equivalent, respectively, to the identity operation or the orientation = true case as flip-rotate is equal to the identity operation. This choice ensures that the integer value of the combined orientation is in $[0, 5]$.

    +

    For triangles, to enable indexing from the combined orientation, we do not consider flip-rotate or flip-orient-rotate as those cases are equivalent, respectively, to the identity operation or the orientation = true case as flip-rotate is equal to the identity operation. This choice ensures that the integer value of the combined orientation is in $[0, 5]$.

    Definition at line 68 of file tria_objects_orientations.h.

    Constructor & Destructor Documentation

    /usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1DistributedTriangulationBase.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1DistributedTriangulationBase.html 2024-01-30 03:04:47.208837726 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1DistributedTriangulationBase.html 2024-01-30 03:04:47.208837726 +0000 @@ -2026,7 +2026,7 @@

    When vertices have been moved locally, for example using code like

    cell->vertex(0) = new_location;

    then this function can be used to update the location of vertices between MPI processes.

    -

    All the vertices that have been moved and might be in the ghost layer of a process have to be reported in the vertex_locally_moved argument. This ensures that that part of the information that has to be send between processes is actually sent. Additionally, it is quite important that vertices on the boundary between processes are reported on exactly one process (e.g. the one with the highest id). Otherwise we could expect undesirable results if multiple processes move a vertex differently. A typical strategy is to let processor $i$ move those vertices that are adjacent to cells whose owners include processor $i$ but no other processor $j$ with $j<i$; in other words, for vertices at the boundary of a subdomain, the processor with the lowest subdomain id "owns" a vertex.

    +

    All the vertices that have been moved and might be in the ghost layer of a process have to be reported in the vertex_locally_moved argument. This ensures that that part of the information that has to be send between processes is actually sent. Additionally, it is quite important that vertices on the boundary between processes are reported on exactly one process (e.g. the one with the highest id). Otherwise we could expect undesirable results if multiple processes move a vertex differently. A typical strategy is to let processor $i$ move those vertices that are adjacent to cells whose owners include processor $i$ but no other processor $j$ with $j<i$; in other words, for vertices at the boundary of a subdomain, the processor with the lowest subdomain id "owns" a vertex.

    Note
    It only makes sense to move vertices that are either located on locally owned cells or on cells in the ghost layer. This is because you can be sure that these vertices indeed exist on the finest mesh aggregated over all processors, whereas vertices on artificial cells but not at least in the ghost layer may or may not exist on the globally finest mesh. Consequently, the vertex_locally_moved argument may not contain vertices that aren't at least on ghost cells.
    This function moves vertices in such a way that on every processor, the vertices of every locally owned and ghost cell is consistent with the corresponding location of these cells on other processors. On the other hand, the locations of artificial cells will in general be wrong since artificial cells may or may not exist on other processors and consequently it is not possible to determine their location in any way. This is not usually a problem since one never does anything on artificial cells. However, it may lead to problems if the mesh with moved vertices is refined in a later step. If that's what you want to do, the right way to do it is to save the offset applied to every vertex, call this function, and before refining or coarsening the mesh apply the opposite offset and call this function again.
    @@ -2426,7 +2426,7 @@
    -

    Refine all cells times times. In other words, in each one of the times iterations, loop over all cells and refine each cell uniformly into $2^\text{dim}$ children. In practice, this function repeats the following operations times times: call set_all_refine_flags() followed by execute_coarsening_and_refinement(). The end result is that the number of cells increases by a factor of $(2^\text{dim})^\text{times}=2^{\text{dim} \times \text{times}}$.

    +

    Refine all cells times times. In other words, in each one of the times iterations, loop over all cells and refine each cell uniformly into $2^\text{dim}$ children. In practice, this function repeats the following operations times times: call set_all_refine_flags() followed by execute_coarsening_and_refinement(). The end result is that the number of cells increases by a factor of $(2^\text{dim})^\text{times}=2^{\text{dim} \times \text{times}}$.

    The execute_coarsening_and_refinement() function called in this loop may throw an exception if it creates cells that are distorted (see its documentation for an explanation). This exception will be propagated through this function if that happens, and you may not get the actual number of refinement steps in that case.

    Note
    This function triggers the pre- and post-refinement signals before and after doing each individual refinement cycle (i.e. more than once if times > 1) . See the section on signals in the general documentation of this class.
    @@ -6647,7 +6647,7 @@
    -

    Iterator to the first quad, used or not, on the given level. If a level has no quads, a past-the-end iterator is returned. If quads are no cells, i.e. for $dim>2$ no level argument must be given.

    +

    Iterator to the first quad, used or not, on the given level. If a level has no quads, a past-the-end iterator is returned. If quads are no cells, i.e. for $dim>2$ no level argument must be given.

    Note
    The given level argument needs to correspond to a level of the triangulation, i.e., should be less than the value returned by n_levels(). On the other hand, for parallel computations using a parallel::distributed::Triangulation object, it is often convenient to write loops over the cells of all levels of the global mesh, even if the local portion of the triangulation does not actually have cells at one of the higher levels. In those cases, the level argument is accepted if it is less than what the n_global_levels() function returns. If the given level is between the values returned by n_levels() and n_global_levels(), then no cells exist in the local portion of the triangulation at this level, and the function simply returns what end() would return.
    /usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1TriangulationBase.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1TriangulationBase.html 2024-01-30 03:04:47.352838926 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1TriangulationBase.html 2024-01-30 03:04:47.352838926 +0000 @@ -1586,7 +1586,7 @@

    When vertices have been moved locally, for example using code like

    cell->vertex(0) = new_location;

    then this function can be used to update the location of vertices between MPI processes.

    -

    All the vertices that have been moved and might be in the ghost layer of a process have to be reported in the vertex_locally_moved argument. This ensures that that part of the information that has to be send between processes is actually sent. Additionally, it is quite important that vertices on the boundary between processes are reported on exactly one process (e.g. the one with the highest id). Otherwise we could expect undesirable results if multiple processes move a vertex differently. A typical strategy is to let processor $i$ move those vertices that are adjacent to cells whose owners include processor $i$ but no other processor $j$ with $j<i$; in other words, for vertices at the boundary of a subdomain, the processor with the lowest subdomain id "owns" a vertex.

    +

    All the vertices that have been moved and might be in the ghost layer of a process have to be reported in the vertex_locally_moved argument. This ensures that that part of the information that has to be send between processes is actually sent. Additionally, it is quite important that vertices on the boundary between processes are reported on exactly one process (e.g. the one with the highest id). Otherwise we could expect undesirable results if multiple processes move a vertex differently. A typical strategy is to let processor $i$ move those vertices that are adjacent to cells whose owners include processor $i$ but no other processor $j$ with $j<i$; in other words, for vertices at the boundary of a subdomain, the processor with the lowest subdomain id "owns" a vertex.

    Note
    It only makes sense to move vertices that are either located on locally owned cells or on cells in the ghost layer. This is because you can be sure that these vertices indeed exist on the finest mesh aggregated over all processors, whereas vertices on artificial cells but not at least in the ghost layer may or may not exist on the globally finest mesh. Consequently, the vertex_locally_moved argument may not contain vertices that aren't at least on ghost cells.
    This function moves vertices in such a way that on every processor, the vertices of every locally owned and ghost cell is consistent with the corresponding location of these cells on other processors. On the other hand, the locations of artificial cells will in general be wrong since artificial cells may or may not exist on other processors and consequently it is not possible to determine their location in any way. This is not usually a problem since one never does anything on artificial cells. However, it may lead to problems if the mesh with moved vertices is refined in a later step. If that's what you want to do, the right way to do it is to save the offset applied to every vertex, call this function, and before refining or coarsening the mesh apply the opposite offset and call this function again.
    @@ -2053,7 +2053,7 @@
    -

    Refine all cells times times. In other words, in each one of the times iterations, loop over all cells and refine each cell uniformly into $2^\text{dim}$ children. In practice, this function repeats the following operations times times: call set_all_refine_flags() followed by execute_coarsening_and_refinement(). The end result is that the number of cells increases by a factor of $(2^\text{dim})^\text{times}=2^{\text{dim} \times \text{times}}$.

    +

    Refine all cells times times. In other words, in each one of the times iterations, loop over all cells and refine each cell uniformly into $2^\text{dim}$ children. In practice, this function repeats the following operations times times: call set_all_refine_flags() followed by execute_coarsening_and_refinement(). The end result is that the number of cells increases by a factor of $(2^\text{dim})^\text{times}=2^{\text{dim} \times \text{times}}$.

    The execute_coarsening_and_refinement() function called in this loop may throw an exception if it creates cells that are distorted (see its documentation for an explanation). This exception will be propagated through this function if that happens, and you may not get the actual number of refinement steps in that case.

    Note
    This function triggers the pre- and post-refinement signals before and after doing each individual refinement cycle (i.e. more than once if times > 1) . See the section on signals in the general documentation of this class.
    @@ -6305,7 +6305,7 @@
    -

    Iterator to the first quad, used or not, on the given level. If a level has no quads, a past-the-end iterator is returned. If quads are no cells, i.e. for $dim>2$ no level argument must be given.

    +

    Iterator to the first quad, used or not, on the given level. If a level has no quads, a past-the-end iterator is returned. If quads are no cells, i.e. for $dim>2$ no level argument must be given.

    Note
    The given level argument needs to correspond to a level of the triangulation, i.e., should be less than the value returned by n_levels(). On the other hand, for parallel computations using a parallel::distributed::Triangulation object, it is often convenient to write loops over the cells of all levels of the global mesh, even if the local portion of the triangulation does not actually have cells at one of the higher levels. In those cases, the level argument is accepted if it is less than what the n_global_levels() function returns. If the given level is between the values returned by n_levels() and n_global_levels(), then no cells exist in the local portion of the triangulation at this level, and the function simply returns what end() would return.
    /usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1distributed_1_1Triangulation.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1distributed_1_1Triangulation.html 2024-01-30 03:04:47.524840358 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1distributed_1_1Triangulation.html 2024-01-30 03:04:47.524840358 +0000 @@ -1945,7 +1945,7 @@
    -

    Return a permutation vector for the order the coarse cells are handed off to p4est. For example the value of the $i$th element in this vector is the index of the deal.II coarse cell (counting from begin(0)) that corresponds to the $i$th tree managed by p4est.

    +

    Return a permutation vector for the order the coarse cells are handed off to p4est. For example the value of the $i$th element in this vector is the index of the deal.II coarse cell (counting from begin(0)) that corresponds to the $i$th tree managed by p4est.

    Definition at line 3609 of file tria.cc.

    @@ -3039,7 +3039,7 @@

    When vertices have been moved locally, for example using code like

    cell->vertex(0) = new_location;

    then this function can be used to update the location of vertices between MPI processes.

    -

    All the vertices that have been moved and might be in the ghost layer of a process have to be reported in the vertex_locally_moved argument. This ensures that that part of the information that has to be send between processes is actually sent. Additionally, it is quite important that vertices on the boundary between processes are reported on exactly one process (e.g. the one with the highest id). Otherwise we could expect undesirable results if multiple processes move a vertex differently. A typical strategy is to let processor $i$ move those vertices that are adjacent to cells whose owners include processor $i$ but no other processor $j$ with $j<i$; in other words, for vertices at the boundary of a subdomain, the processor with the lowest subdomain id "owns" a vertex.

    +

    All the vertices that have been moved and might be in the ghost layer of a process have to be reported in the vertex_locally_moved argument. This ensures that that part of the information that has to be send between processes is actually sent. Additionally, it is quite important that vertices on the boundary between processes are reported on exactly one process (e.g. the one with the highest id). Otherwise we could expect undesirable results if multiple processes move a vertex differently. A typical strategy is to let processor $i$ move those vertices that are adjacent to cells whose owners include processor $i$ but no other processor $j$ with $j<i$; in other words, for vertices at the boundary of a subdomain, the processor with the lowest subdomain id "owns" a vertex.

    Note
    It only makes sense to move vertices that are either located on locally owned cells or on cells in the ghost layer. This is because you can be sure that these vertices indeed exist on the finest mesh aggregated over all processors, whereas vertices on artificial cells but not at least in the ghost layer may or may not exist on the globally finest mesh. Consequently, the vertex_locally_moved argument may not contain vertices that aren't at least on ghost cells.
    This function moves vertices in such a way that on every processor, the vertices of every locally owned and ghost cell is consistent with the corresponding location of these cells on other processors. On the other hand, the locations of artificial cells will in general be wrong since artificial cells may or may not exist on other processors and consequently it is not possible to determine their location in any way. This is not usually a problem since one never does anything on artificial cells. However, it may lead to problems if the mesh with moved vertices is refined in a later step. If that's what you want to do, the right way to do it is to save the offset applied to every vertex, call this function, and before refining or coarsening the mesh apply the opposite offset and call this function again.
    @@ -3355,7 +3355,7 @@
    -

    Refine all cells times times. In other words, in each one of the times iterations, loop over all cells and refine each cell uniformly into $2^\text{dim}$ children. In practice, this function repeats the following operations times times: call set_all_refine_flags() followed by execute_coarsening_and_refinement(). The end result is that the number of cells increases by a factor of $(2^\text{dim})^\text{times}=2^{\text{dim} \times \text{times}}$.

    +

    Refine all cells times times. In other words, in each one of the times iterations, loop over all cells and refine each cell uniformly into $2^\text{dim}$ children. In practice, this function repeats the following operations times times: call set_all_refine_flags() followed by execute_coarsening_and_refinement(). The end result is that the number of cells increases by a factor of $(2^\text{dim})^\text{times}=2^{\text{dim} \times \text{times}}$.

    The execute_coarsening_and_refinement() function called in this loop may throw an exception if it creates cells that are distorted (see its documentation for an explanation). This exception will be propagated through this function if that happens, and you may not get the actual number of refinement steps in that case.

    Note
    This function triggers the pre- and post-refinement signals before and after doing each individual refinement cycle (i.e. more than once if times > 1) . See the section on signals in the general documentation of this class.
    @@ -7358,7 +7358,7 @@
    -

    Iterator to the first quad, used or not, on the given level. If a level has no quads, a past-the-end iterator is returned. If quads are no cells, i.e. for $dim>2$ no level argument must be given.

    +

    Iterator to the first quad, used or not, on the given level. If a level has no quads, a past-the-end iterator is returned. If quads are no cells, i.e. for $dim>2$ no level argument must be given.

    Note
    The given level argument needs to correspond to a level of the triangulation, i.e., should be less than the value returned by n_levels(). On the other hand, for parallel computations using a parallel::distributed::Triangulation object, it is often convenient to write loops over the cells of all levels of the global mesh, even if the local portion of the triangulation does not actually have cells at one of the higher levels. In those cases, the level argument is accepted if it is less than what the n_global_levels() function returns. If the given level is between the values returned by n_levels() and n_global_levels(), then no cells exist in the local portion of the triangulation at this level, and the function simply returns what end() would return.
    /usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1distributed_1_1Triangulation_3_011_00_01spacedim_01_4.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1distributed_1_1Triangulation_3_011_00_01spacedim_01_4.html 2024-01-30 03:04:47.684841691 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1distributed_1_1Triangulation_3_011_00_01spacedim_01_4.html 2024-01-30 03:04:47.684841691 +0000 @@ -2361,7 +2361,7 @@

    When vertices have been moved locally, for example using code like

    cell->vertex(0) = new_location;

    then this function can be used to update the location of vertices between MPI processes.

    -

    All the vertices that have been moved and might be in the ghost layer of a process have to be reported in the vertex_locally_moved argument. This ensures that that part of the information that has to be send between processes is actually sent. Additionally, it is quite important that vertices on the boundary between processes are reported on exactly one process (e.g. the one with the highest id). Otherwise we could expect undesirable results if multiple processes move a vertex differently. A typical strategy is to let processor $i$ move those vertices that are adjacent to cells whose owners include processor $i$ but no other processor $j$ with $j<i$; in other words, for vertices at the boundary of a subdomain, the processor with the lowest subdomain id "owns" a vertex.

    +

    All the vertices that have been moved and might be in the ghost layer of a process have to be reported in the vertex_locally_moved argument. This ensures that that part of the information that has to be send between processes is actually sent. Additionally, it is quite important that vertices on the boundary between processes are reported on exactly one process (e.g. the one with the highest id). Otherwise we could expect undesirable results if multiple processes move a vertex differently. A typical strategy is to let processor $i$ move those vertices that are adjacent to cells whose owners include processor $i$ but no other processor $j$ with $j<i$; in other words, for vertices at the boundary of a subdomain, the processor with the lowest subdomain id "owns" a vertex.

    Note
    It only makes sense to move vertices that are either located on locally owned cells or on cells in the ghost layer. This is because you can be sure that these vertices indeed exist on the finest mesh aggregated over all processors, whereas vertices on artificial cells but not at least in the ghost layer may or may not exist on the globally finest mesh. Consequently, the vertex_locally_moved argument may not contain vertices that aren't at least on ghost cells.
    This function moves vertices in such a way that on every processor, the vertices of every locally owned and ghost cell is consistent with the corresponding location of these cells on other processors. On the other hand, the locations of artificial cells will in general be wrong since artificial cells may or may not exist on other processors and consequently it is not possible to determine their location in any way. This is not usually a problem since one never does anything on artificial cells. However, it may lead to problems if the mesh with moved vertices is refined in a later step. If that's what you want to do, the right way to do it is to save the offset applied to every vertex, call this function, and before refining or coarsening the mesh apply the opposite offset and call this function again.
    @@ -2760,7 +2760,7 @@
    -

    Refine all cells times times. In other words, in each one of the times iterations, loop over all cells and refine each cell uniformly into $2^\text{dim}$ children. In practice, this function repeats the following operations times times: call set_all_refine_flags() followed by execute_coarsening_and_refinement(). The end result is that the number of cells increases by a factor of $(2^\text{dim})^\text{times}=2^{\text{dim} \times \text{times}}$.

    +

    Refine all cells times times. In other words, in each one of the times iterations, loop over all cells and refine each cell uniformly into $2^\text{dim}$ children. In practice, this function repeats the following operations times times: call set_all_refine_flags() followed by execute_coarsening_and_refinement(). The end result is that the number of cells increases by a factor of $(2^\text{dim})^\text{times}=2^{\text{dim} \times \text{times}}$.

    The execute_coarsening_and_refinement() function called in this loop may throw an exception if it creates cells that are distorted (see its documentation for an explanation). This exception will be propagated through this function if that happens, and you may not get the actual number of refinement steps in that case.

    Note
    This function triggers the pre- and post-refinement signals before and after doing each individual refinement cycle (i.e. more than once if times > 1) . See the section on signals in the general documentation of this class.
    @@ -6918,7 +6918,7 @@
    -

    Iterator to the first quad, used or not, on the given level. If a level has no quads, a past-the-end iterator is returned. If quads are no cells, i.e. for $dim>2$ no level argument must be given.

    +

    Iterator to the first quad, used or not, on the given level. If a level has no quads, a past-the-end iterator is returned. If quads are no cells, i.e. for $dim>2$ no level argument must be given.

    Note
    The given level argument needs to correspond to a level of the triangulation, i.e., should be less than the value returned by n_levels(). On the other hand, for parallel computations using a parallel::distributed::Triangulation object, it is often convenient to write loops over the cells of all levels of the global mesh, even if the local portion of the triangulation does not actually have cells at one of the higher levels. In those cases, the level argument is accepted if it is less than what the n_global_levels() function returns. If the given level is between the values returned by n_levels() and n_global_levels(), then no cells exist in the local portion of the triangulation at this level, and the function simply returns what end() would return.
    /usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1fullydistributed_1_1Triangulation.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1fullydistributed_1_1Triangulation.html 2024-01-30 03:04:47.844843025 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1fullydistributed_1_1Triangulation.html 2024-01-30 03:04:47.848843058 +0000 @@ -2529,7 +2529,7 @@

    When vertices have been moved locally, for example using code like

    cell->vertex(0) = new_location;

    then this function can be used to update the location of vertices between MPI processes.

    -

    All the vertices that have been moved and might be in the ghost layer of a process have to be reported in the vertex_locally_moved argument. This ensures that that part of the information that has to be send between processes is actually sent. Additionally, it is quite important that vertices on the boundary between processes are reported on exactly one process (e.g. the one with the highest id). Otherwise we could expect undesirable results if multiple processes move a vertex differently. A typical strategy is to let processor $i$ move those vertices that are adjacent to cells whose owners include processor $i$ but no other processor $j$ with $j<i$; in other words, for vertices at the boundary of a subdomain, the processor with the lowest subdomain id "owns" a vertex.

    +

    All the vertices that have been moved and might be in the ghost layer of a process have to be reported in the vertex_locally_moved argument. This ensures that that part of the information that has to be send between processes is actually sent. Additionally, it is quite important that vertices on the boundary between processes are reported on exactly one process (e.g. the one with the highest id). Otherwise we could expect undesirable results if multiple processes move a vertex differently. A typical strategy is to let processor $i$ move those vertices that are adjacent to cells whose owners include processor $i$ but no other processor $j$ with $j<i$; in other words, for vertices at the boundary of a subdomain, the processor with the lowest subdomain id "owns" a vertex.

    Note
    It only makes sense to move vertices that are either located on locally owned cells or on cells in the ghost layer. This is because you can be sure that these vertices indeed exist on the finest mesh aggregated over all processors, whereas vertices on artificial cells but not at least in the ghost layer may or may not exist on the globally finest mesh. Consequently, the vertex_locally_moved argument may not contain vertices that aren't at least on ghost cells.
    This function moves vertices in such a way that on every processor, the vertices of every locally owned and ghost cell is consistent with the corresponding location of these cells on other processors. On the other hand, the locations of artificial cells will in general be wrong since artificial cells may or may not exist on other processors and consequently it is not possible to determine their location in any way. This is not usually a problem since one never does anything on artificial cells. However, it may lead to problems if the mesh with moved vertices is refined in a later step. If that's what you want to do, the right way to do it is to save the offset applied to every vertex, call this function, and before refining or coarsening the mesh apply the opposite offset and call this function again.
    @@ -2857,7 +2857,7 @@
    -

    Refine all cells times times. In other words, in each one of the times iterations, loop over all cells and refine each cell uniformly into $2^\text{dim}$ children. In practice, this function repeats the following operations times times: call set_all_refine_flags() followed by execute_coarsening_and_refinement(). The end result is that the number of cells increases by a factor of $(2^\text{dim})^\text{times}=2^{\text{dim} \times \text{times}}$.

    +

    Refine all cells times times. In other words, in each one of the times iterations, loop over all cells and refine each cell uniformly into $2^\text{dim}$ children. In practice, this function repeats the following operations times times: call set_all_refine_flags() followed by execute_coarsening_and_refinement(). The end result is that the number of cells increases by a factor of $(2^\text{dim})^\text{times}=2^{\text{dim} \times \text{times}}$.

    The execute_coarsening_and_refinement() function called in this loop may throw an exception if it creates cells that are distorted (see its documentation for an explanation). This exception will be propagated through this function if that happens, and you may not get the actual number of refinement steps in that case.

    Note
    This function triggers the pre- and post-refinement signals before and after doing each individual refinement cycle (i.e. more than once if times > 1) . See the section on signals in the general documentation of this class.
    @@ -6860,7 +6860,7 @@
    -

    Iterator to the first quad, used or not, on the given level. If a level has no quads, a past-the-end iterator is returned. If quads are no cells, i.e. for $dim>2$ no level argument must be given.

    +

    Iterator to the first quad, used or not, on the given level. If a level has no quads, a past-the-end iterator is returned. If quads are no cells, i.e. for $dim>2$ no level argument must be given.

    Note
    The given level argument needs to correspond to a level of the triangulation, i.e., should be less than the value returned by n_levels(). On the other hand, for parallel computations using a parallel::distributed::Triangulation object, it is often convenient to write loops over the cells of all levels of the global mesh, even if the local portion of the triangulation does not actually have cells at one of the higher levels. In those cases, the level argument is accepted if it is less than what the n_global_levels() function returns. If the given level is between the values returned by n_levels() and n_global_levels(), then no cells exist in the local portion of the triangulation at this level, and the function simply returns what end() would return.
    /usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1shared_1_1Triangulation.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1shared_1_1Triangulation.html 2024-01-30 03:04:48.000844324 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1shared_1_1Triangulation.html 2024-01-30 03:04:48.000844324 +0000 @@ -2029,7 +2029,7 @@

    When vertices have been moved locally, for example using code like

    cell->vertex(0) = new_location;

    then this function can be used to update the location of vertices between MPI processes.

    -

    All the vertices that have been moved and might be in the ghost layer of a process have to be reported in the vertex_locally_moved argument. This ensures that that part of the information that has to be send between processes is actually sent. Additionally, it is quite important that vertices on the boundary between processes are reported on exactly one process (e.g. the one with the highest id). Otherwise we could expect undesirable results if multiple processes move a vertex differently. A typical strategy is to let processor $i$ move those vertices that are adjacent to cells whose owners include processor $i$ but no other processor $j$ with $j<i$; in other words, for vertices at the boundary of a subdomain, the processor with the lowest subdomain id "owns" a vertex.

    +

    All the vertices that have been moved and might be in the ghost layer of a process have to be reported in the vertex_locally_moved argument. This ensures that that part of the information that has to be send between processes is actually sent. Additionally, it is quite important that vertices on the boundary between processes are reported on exactly one process (e.g. the one with the highest id). Otherwise we could expect undesirable results if multiple processes move a vertex differently. A typical strategy is to let processor $i$ move those vertices that are adjacent to cells whose owners include processor $i$ but no other processor $j$ with $j<i$; in other words, for vertices at the boundary of a subdomain, the processor with the lowest subdomain id "owns" a vertex.

    Note
    It only makes sense to move vertices that are either located on locally owned cells or on cells in the ghost layer. This is because you can be sure that these vertices indeed exist on the finest mesh aggregated over all processors, whereas vertices on artificial cells but not at least in the ghost layer may or may not exist on the globally finest mesh. Consequently, the vertex_locally_moved argument may not contain vertices that aren't at least on ghost cells.
    This function moves vertices in such a way that on every processor, the vertices of every locally owned and ghost cell is consistent with the corresponding location of these cells on other processors. On the other hand, the locations of artificial cells will in general be wrong since artificial cells may or may not exist on other processors and consequently it is not possible to determine their location in any way. This is not usually a problem since one never does anything on artificial cells. However, it may lead to problems if the mesh with moved vertices is refined in a later step. If that's what you want to do, the right way to do it is to save the offset applied to every vertex, call this function, and before refining or coarsening the mesh apply the opposite offset and call this function again.
    @@ -2376,7 +2376,7 @@
    -

    Refine all cells times times. In other words, in each one of the times iterations, loop over all cells and refine each cell uniformly into $2^\text{dim}$ children. In practice, this function repeats the following operations times times: call set_all_refine_flags() followed by execute_coarsening_and_refinement(). The end result is that the number of cells increases by a factor of $(2^\text{dim})^\text{times}=2^{\text{dim} \times \text{times}}$.

    +

    Refine all cells times times. In other words, in each one of the times iterations, loop over all cells and refine each cell uniformly into $2^\text{dim}$ children. In practice, this function repeats the following operations times times: call set_all_refine_flags() followed by execute_coarsening_and_refinement(). The end result is that the number of cells increases by a factor of $(2^\text{dim})^\text{times}=2^{\text{dim} \times \text{times}}$.

    The execute_coarsening_and_refinement() function called in this loop may throw an exception if it creates cells that are distorted (see its documentation for an explanation). This exception will be propagated through this function if that happens, and you may not get the actual number of refinement steps in that case.

    Note
    This function triggers the pre- and post-refinement signals before and after doing each individual refinement cycle (i.e. more than once if times > 1) . See the section on signals in the general documentation of this class.
    @@ -6557,7 +6557,7 @@
    -

    Iterator to the first quad, used or not, on the given level. If a level has no quads, a past-the-end iterator is returned. If quads are no cells, i.e. for $dim>2$ no level argument must be given.

    +

    Iterator to the first quad, used or not, on the given level. If a level has no quads, a past-the-end iterator is returned. If quads are no cells, i.e. for $dim>2$ no level argument must be given.

    Note
    The given level argument needs to correspond to a level of the triangulation, i.e., should be less than the value returned by n_levels(). On the other hand, for parallel computations using a parallel::distributed::Triangulation object, it is often convenient to write loops over the cells of all levels of the global mesh, even if the local portion of the triangulation does not actually have cells at one of the higher levels. In those cases, the level argument is accepted if it is less than what the n_global_levels() function returns. If the given level is between the values returned by n_levels() and n_global_levels(), then no cells exist in the local portion of the triangulation at this level, and the function simply returns what end() would return.
    /usr/share/doc/packages/dealii/doxygen/deal.II/deprecated.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/deprecated.html 2024-01-30 03:04:48.036844624 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/deprecated.html 2024-01-30 03:04:48.036844624 +0000 @@ -105,10 +105,10 @@
    Use numbers::invalid_fe_index instead.
    Member DoFHandler< dim, spacedim >::set_active_fe_indices (const std::vector< unsigned int > &active_fe_indices)
    Use set_active_fe_indices() with the types::fe_index datatype.
    -
    Member DoFTools::extract_boundary_dofs (const DoFHandler< dim, spacedim > &dof_handler, const ComponentMask &component_mask, IndexSet &selected_dofs, const std::set< types::boundary_id > &boundary_ids={})
    -
    Use the previous function instead.
    Member DoFTools::extract_boundary_dofs (const DoFHandler< dim, spacedim > &dof_handler, const ComponentMask &component_mask, std::vector< bool > &selected_dofs, const std::set< types::boundary_id > &boundary_ids={})
    This function will not work for DoFHandler objects that are built on a parallel::distributed::Triangulation object. The reasons is that the output argument selected_dofs has to have a length equal to all global degrees of freedom. Consequently, this does not scale to very large problems, and this is also why the function is deprecated. If you need the functionality of this function for parallel triangulations, then you need to use the other DoFTools::extract_boundary_dofs() function that returns its information via an IndexSet object.
    +
    Member DoFTools::extract_boundary_dofs (const DoFHandler< dim, spacedim > &dof_handler, const ComponentMask &component_mask, IndexSet &selected_dofs, const std::set< types::boundary_id > &boundary_ids={})
    +
    Use the previous function instead.
    Member DoFTools::extract_locally_active_dofs (const DoFHandler< dim, spacedim > &dof_handler, IndexSet &dof_set)
    Use the previous function instead.
    Member DoFTools::extract_locally_active_level_dofs (const DoFHandler< dim, spacedim > &dof_handler, IndexSet &dof_set, const unsigned int level)
    @@ -119,20 +119,20 @@
    Use the previous function instead.
    Member DoFTools::get_active_fe_indices (const DoFHandler< dim, spacedim > &dof_handler, std::vector< unsigned int > &active_fe_indices)
    Use DoFHandler::get_active_fe_indices() that returns the result vector.
    -
    Member DoFTools::map_dofs_to_support_points (const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof_handler, std::map< types::global_dof_index, Point< spacedim > > &support_points, const ComponentMask &mask=ComponentMask())
    -
    Use the function that returns the std::map instead.
    Member DoFTools::map_dofs_to_support_points (const hp::MappingCollection< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof_handler, std::map< types::global_dof_index, Point< spacedim > > &support_points, const ComponentMask &mask=ComponentMask())
    Use the function that returns the std::map instead.
    -
    Member FEEvaluation< dim, fe_degree, n_q_points_1d, n_components_, Number, VectorizedArrayType >::evaluate (const VectorizedArrayType *values_array, const bool evaluate_values, const bool evaluate_gradients, const bool evaluate_hessians=false)
    -
    use evaluate() with the EvaluationFlags argument.
    +
    Member DoFTools::map_dofs_to_support_points (const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof_handler, std::map< types::global_dof_index, Point< spacedim > > &support_points, const ComponentMask &mask=ComponentMask())
    +
    Use the function that returns the std::map instead.
    Member FEEvaluation< dim, fe_degree, n_q_points_1d, n_components_, Number, VectorizedArrayType >::evaluate (const bool evaluate_values, const bool evaluate_gradients, const bool evaluate_hessians=false)
    use evaluate() with the EvaluationFlags argument.
    +
    Member FEEvaluation< dim, fe_degree, n_q_points_1d, n_components_, Number, VectorizedArrayType >::evaluate (const VectorizedArrayType *values_array, const bool evaluate_values, const bool evaluate_gradients, const bool evaluate_hessians=false)
    +
    use evaluate() with the EvaluationFlags argument.
    Member FEEvaluation< dim, fe_degree, n_q_points_1d, n_components_, Number, VectorizedArrayType >::gather_evaluate (const VectorType &input_vector, const bool evaluate_values, const bool evaluate_gradients, const bool evaluate_hessians=false)
    Please use the gather_evaluate() function with the EvaluationFlags argument.
    -
    Member FEEvaluation< dim, fe_degree, n_q_points_1d, n_components_, Number, VectorizedArrayType >::integrate (const bool integrate_values, const bool integrate_gradients, VectorizedArrayType *values_array)
    -
    Please use the integrate() function with the EvaluationFlags argument.
    Member FEEvaluation< dim, fe_degree, n_q_points_1d, n_components_, Number, VectorizedArrayType >::integrate (const bool integrate_values, const bool integrate_gradients)
    Please use the integrate() function with the EvaluationFlags argument.
    +
    Member FEEvaluation< dim, fe_degree, n_q_points_1d, n_components_, Number, VectorizedArrayType >::integrate (const bool integrate_values, const bool integrate_gradients, VectorizedArrayType *values_array)
    +
    Please use the integrate() function with the EvaluationFlags argument.
    Member FEEvaluation< dim, fe_degree, n_q_points_1d, n_components_, Number, VectorizedArrayType >::integrate_scatter (const bool integrate_values, const bool integrate_gradients, VectorType &output_vector)
    Please use the integrate_scatter() function with the EvaluationFlags argument.
    Member FEFaceEvaluation< dim, fe_degree, n_q_points_1d, n_components_, Number, VectorizedArrayType >::evaluate (const bool evaluate_values, const bool evaluate_gradients)
    @@ -192,13 +192,13 @@
    Member FEInterfaceViews::Vector< dim, spacedim >::jump_hessian (const unsigned int interface_dof_index, const unsigned int q_point) const
    Use the average_of_hessians() function instead.
    Struct FEValuesViews::Scalar< dim, spacedim >::OutputType< Number >
    -
    Use the types defined in the surrounding class instead.
    +
    Use the types defined in the surrounding class instead.
    Struct FEValuesViews::SymmetricTensor< 2, dim, spacedim >::OutputType< Number >
    -
    Use the types defined in the surrounding class instead.
    +
    Use the types defined in the surrounding class instead.
    Struct FEValuesViews::Tensor< 2, dim, spacedim >::OutputType< Number >
    -
    Use the types defined in the surrounding class instead.
    -
    Struct FEValuesViews::Vector< dim, spacedim >::OutputType< Number >
    Use the types defined in the surrounding class instead.
    +
    Struct FEValuesViews::Vector< dim, spacedim >::OutputType< Number >
    +
    Use the types defined in the surrounding class instead.
    Member FiniteElement< dim, spacedim >::fill_fe_face_values (const typename Triangulation< dim, spacedim >::cell_iterator &cell, const unsigned int face_no, const Quadrature< dim - 1 > &quadrature, const Mapping< dim, spacedim > &mapping, const typename Mapping< dim, spacedim >::InternalDataBase &mapping_internal, const internal::FEValuesImplementation::MappingRelatedData< dim, spacedim > &mapping_data, const InternalDataBase &fe_internal, internal::FEValuesImplementation::FiniteElementRelatedData< dim, spacedim > &output_data) const
    Use the version taking a hp::QCollection argument.
    Member FiniteElement< dim, spacedim >::get_face_data (const UpdateFlags update_flags, const Mapping< dim, spacedim > &mapping, const Quadrature< dim - 1 > &quadrature, internal::FEValuesImplementation::FiniteElementRelatedData< dim, spacedim > &output_data) const
    @@ -223,20 +223,20 @@
    Use import_elements() instead.
    Member LinearAlgebra::distributed::BlockVector< Number >::zero_out_ghosts () const
    Use zero_out_ghost_values() instead.
    -
    Member LinearAlgebra::distributed::Vector< Number, MemorySpace >::import (const Vector< Number, MemorySpace2 > &src, VectorOperation::values operation)
    -
    Use import_elements() instead.
    Member LinearAlgebra::distributed::Vector< Number, MemorySpace >::import (const LinearAlgebra::ReadWriteVector< Number > &V, VectorOperation::values operation, std::shared_ptr< const Utilities::MPI::CommunicationPatternBase > communication_pattern={}) override
    Use import_elements() instead.
    +
    Member LinearAlgebra::distributed::Vector< Number, MemorySpace >::import (const Vector< Number, MemorySpace2 > &src, VectorOperation::values operation)
    +
    Use import_elements() instead.
    Member LinearAlgebra::distributed::Vector< Number, MemorySpace >::local_size () const
    Use locally_owned_size() instead.
    Member LinearAlgebra::distributed::Vector< Number, MemorySpace >::zero_out_ghosts () const
    Use zero_out_ghost_values() instead.
    Member LinearAlgebra::EpetraWrappers::Vector::import (const ReadWriteVector< double > &V, VectorOperation::values operation, std::shared_ptr< const Utilities::MPI::CommunicationPatternBase > communication_pattern={}) override
    Use import_elements() instead.
    -
    Member LinearAlgebra::ReadWriteVector< Number >::import (const ::Vector< Number > &V, VectorOperation::values operation, const std::shared_ptr< const Utilities::MPI::CommunicationPatternBase > &communication_pattern={})
    -
    Use import_elements() instead.
    Member LinearAlgebra::ReadWriteVector< Number >::import (const LinearAlgebra::Vector< Number > &V, VectorOperation::values operation, const std::shared_ptr< const Utilities::MPI::CommunicationPatternBase > &communication_pattern={})
    Use import_elements() instead.
    +
    Member LinearAlgebra::ReadWriteVector< Number >::import (const ::Vector< Number > &V, VectorOperation::values operation, const std::shared_ptr< const Utilities::MPI::CommunicationPatternBase > &communication_pattern={})
    +
    Use import_elements() instead.
    Member LinearAlgebra::ReadWriteVector< Number >::import (const distributed::Vector< Number, MemorySpace > &V, VectorOperation::values operation, const std::shared_ptr< const Utilities::MPI::CommunicationPatternBase > &communication_pattern={})
    Use import_elements() instead.
    Member LinearAlgebra::ReadWriteVector< Number >::import (const PETScWrappers::MPI::Vector &V, VectorOperation::values operation, const std::shared_ptr< const Utilities::MPI::CommunicationPatternBase > &communication_pattern={})
    @@ -257,18 +257,18 @@
    Use import_elements() instead.
    Member LinearAlgebra::VectorSpaceVector< Number >::import (const ReadWriteVector< Number > &V, VectorOperation::values operation, std::shared_ptr< const Utilities::MPI::CommunicationPatternBase > communication_pattern={})=0
    Use import_elements() instead.
    +
    Member make_array_view (Tensor< rank, dim, Number > &tensor)
    +
    This function suggests that the elements of a Tensor object are stored as a contiguous array, but this is not in fact true and one should not pretend that this so. As a consequence, this function is deprecated.
    Member make_array_view (SymmetricTensor< rank, dim, Number > &tensor)
    This function suggests that the elements of a SymmetricTensor object are stored as a contiguous array, but this is not in fact true and one should not pretend that this so. As a consequence, this function is deprecated.
    Member make_array_view (const SymmetricTensor< rank, dim, Number > &tensor)
    This function suggests that the elements of a SymmetricTensor object are stored as a contiguous array, but this is not in fact true and one should not pretend that this so. As a consequence, this function is deprecated.
    Member make_array_view (const Tensor< rank, dim, Number > &tensor)
    This function suggests that the elements of a Tensor object are stored as a contiguous array, but this is not in fact true and one should not pretend that this so. As a consequence, this function is deprecated.
    -
    Member make_array_view (Tensor< rank, dim, Number > &tensor)
    -
    This function suggests that the elements of a Tensor object are stored as a contiguous array, but this is not in fact true and one should not pretend that this so. As a consequence, this function is deprecated.
    Member Mapping< dim, spacedim >::fill_fe_face_values (const typename Triangulation< dim, spacedim >::cell_iterator &cell, const unsigned int face_no, const Quadrature< dim - 1 > &quadrature, const typename Mapping< dim, spacedim >::InternalDataBase &internal_data, internal::FEValuesImplementation::MappingRelatedData< dim, spacedim > &output_data) const
    -
    Use the version taking a hp::QCollection argument.
    +
    Use the version taking a hp::QCollection argument.
    Member Mapping< dim, spacedim >::get_face_data (const UpdateFlags update_flags, const Quadrature< dim - 1 > &quadrature) const
    -
    Use the version taking a hp::QCollection argument.
    +
    Use the version taking a hp::QCollection argument.
    Member MappingQCache< dim, spacedim >::initialize (const Triangulation< dim, spacedim > &triangulation, const MappingQ< dim, spacedim > &mapping)
    Use initialize() version above instead.
    Member parallel::distributed::Triangulation< dim, spacedim >::load (const std::string &filename, const bool autopartition) override
    @@ -351,22 +351,22 @@
    This function suggests that the elements of a Tensor object are stored as a contiguous array, but this is not in fact true and one should not pretend that this so. As a consequence, this function is deprecated.
    Member Tensor< 0, dim, Number >::begin_raw ()
    This function suggests that the elements of a Tensor object are stored as a contiguous array, but this is not in fact true and one should not pretend that this so. As a consequence, this function is deprecated.
    -
    Member Tensor< 0, dim, Number >::end_raw () const
    -
    This function suggests that the elements of a Tensor object are stored as a contiguous array, but this is not in fact true and one should not pretend that this so. As a consequence, this function is deprecated.
    Member Tensor< 0, dim, Number >::end_raw ()
    This function suggests that the elements of a Tensor object are stored as a contiguous array, but this is not in fact true and one should not pretend that this so. As a consequence, this function is deprecated.
    +
    Member Tensor< 0, dim, Number >::end_raw () const
    +
    This function suggests that the elements of a Tensor object are stored as a contiguous array, but this is not in fact true and one should not pretend that this so. As a consequence, this function is deprecated.
    Member Tensor< rank_, dim, Number >::unroll (Vector< OtherNumber > &result) const
    Use the more general function that takes a pair of iterators instead.
    +
    Member Threads::new_thread (RT(C::*fun_ptr)(Args...) const, std_cxx20::type_identity_t< const C > &c, std_cxx20::type_identity_t< Args >... args)
    +
    Use std::thread or std::jthread instead.
    +
    Member Threads::new_thread (RT(C::*fun_ptr)(Args...), std_cxx20::type_identity_t< C > &c, std_cxx20::type_identity_t< Args >... args)
    +
    Use std::thread or std::jthread instead.
    Member Threads::new_thread (const std::function< RT()> &function)
    Use std::thread or std::jthread instead.
    Member Threads::new_thread (FunctionObjectType function_object) -> Thread< decltype(function_object())>
    Use std::thread or std::jthread instead.
    Member Threads::new_thread (RT(*fun_ptr)(Args...), std_cxx20::type_identity_t< Args >... args)
    Use std::thread or std::jthread instead.
    -
    Member Threads::new_thread (RT(C::*fun_ptr)(Args...) const, std_cxx20::type_identity_t< const C > &c, std_cxx20::type_identity_t< Args >... args)
    -
    Use std::thread or std::jthread instead.
    -
    Member Threads::new_thread (RT(C::*fun_ptr)(Args...), std_cxx20::type_identity_t< C > &c, std_cxx20::type_identity_t< Args >... args)
    -
    Use std::thread or std::jthread instead.
    Class Threads::Thread< RT >
    Use std::thread or std::jthread instead.
    Class Threads::ThreadGroup< RT >
    @@ -401,12 +401,12 @@
    Use the more clearly named function locally_owned_size() instead.
    Member XDMFEntry::get_xdmf_content (const unsigned int indent_level, const ReferenceCell &reference_cell) const
    Use the other function instead.
    -
    Member XDMFEntry::XDMFEntry (const std::string &mesh_filename, const std::string &solution_filename, const double time, const std::uint64_t nodes, const std::uint64_t cells, const unsigned int dim, const unsigned int spacedim)
    -
    Use the constructor that additionally takes a ReferenceCell.
    +
    Member XDMFEntry::XDMFEntry (const std::string &filename, const double time, const std::uint64_t nodes, const std::uint64_t cells, const unsigned int dim)
    +
    Use the constructor that additionally takes a ReferenceCell.
    Member XDMFEntry::XDMFEntry (const std::string &mesh_filename, const std::string &solution_filename, const double time, const std::uint64_t nodes, const std::uint64_t cells, const unsigned int dim)
    Use the constructor that additionally takes a ReferenceCell.
    -
    Member XDMFEntry::XDMFEntry (const std::string &filename, const double time, const std::uint64_t nodes, const std::uint64_t cells, const unsigned int dim)
    -
    Use the constructor that additionally takes a ReferenceCell.
    +
    Member XDMFEntry::XDMFEntry (const std::string &mesh_filename, const std::string &solution_filename, const double time, const std::uint64_t nodes, const std::uint64_t cells, const unsigned int dim, const unsigned int spacedim)
    +
    Use the constructor that additionally takes a ReferenceCell.
    /usr/share/doc/packages/dealii/doxygen/deal.II/derivative__form_8h.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/derivative__form_8h.html 2024-01-30 03:04:48.056844791 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/derivative__form_8h.html 2024-01-30 03:04:48.056844791 +0000 @@ -145,7 +145,7 @@
    -

    One of the uses of DerivativeForm is to apply it as a linear transformation. This function returns $\nabla \mathbf F(\mathbf x) \Delta \mathbf x$, which approximates the change in $\mathbf F(\mathbf x)$ when $\mathbf x$ is changed by the amount $\Delta \mathbf x$

    +

    One of the uses of DerivativeForm is to apply it as a linear transformation. This function returns $\nabla \mathbf F(\mathbf x) \Delta \mathbf x$, which approximates the change in $\mathbf F(\mathbf x)$ when $\mathbf x$ is changed by the amount $\Delta \mathbf x$

    \[
   \nabla \mathbf F(\mathbf x) \; \Delta \mathbf x
   \approx
@@ -279,7 +279,7 @@
   </tr>
 </table>
 </div><div class= -

    Similar to the previous apply_transformation(). In matrix notation, it computes $DF2 \, DF1^{T}$. Moreover, the result of this operation $\mathbf A$ can be interpreted as a metric tensor in ${\mathbb R}^\text{spacedim}$ which corresponds to the Euclidean metric tensor in ${\mathbb R}^\text{dim}$. For every pair of vectors $\mathbf u, \mathbf v \in {\mathbb R}^\text{spacedim}$, we have:

    +

    Similar to the previous apply_transformation(). In matrix notation, it computes $DF2 \, DF1^{T}$. Moreover, the result of this operation $\mathbf A$ can be interpreted as a metric tensor in ${\mathbb R}^\text{spacedim}$ which corresponds to the Euclidean metric tensor in ${\mathbb R}^\text{dim}$. For every pair of vectors $\mathbf u, \mathbf v \in {\mathbb R}^\text{spacedim}$, we have:

    \[
   \mathbf u \cdot \mathbf A \mathbf v =
   \text{DF2}^{-1}(\mathbf u) \cdot \text{DF1}^{-1}(\mathbf v)
/usr/share/doc/packages/dealii/doxygen/deal.II/doxygen_crawl.html differs (HTML document, ASCII text)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/doxygen_crawl.html	2024-01-30 03:04:48.096845124 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/doxygen_crawl.html	2024-01-30 03:04:48.096845124 +0000
@@ -138,8 +138,8 @@
 <a href= - + @@ -158,8 +158,8 @@ - + @@ -211,8 +211,8 @@ - + @@ -244,8 +244,8 @@ - + @@ -340,11 +340,11 @@ - + - + @@ -471,12 +471,12 @@ - + - + @@ -985,8 +985,8 @@ - + @@ -1052,8 +1052,8 @@ - + @@ -1178,9 +1178,9 @@ + - @@ -1381,8 +1381,8 @@ - + @@ -1662,8 +1662,8 @@ - + @@ -1800,8 +1800,8 @@ - + @@ -1916,8 +1916,8 @@ - + @@ -2001,8 +2001,8 @@ - + @@ -2232,8 +2232,8 @@ - + @@ -2481,1062 +2481,1073 @@ - - - - - - - + + + + - - - - - - - + + + + + + + + /usr/share/doc/packages/dealii/doxygen/deal.II/grid__tools_8h_source.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/grid__tools_8h_source.html 2024-01-30 03:04:48.228846224 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/grid__tools_8h_source.html 2024-01-30 03:04:48.228846224 +0000 @@ -1021,8 +1021,8 @@

    3211 DEAL_II_CXX20_REQUIRES(concepts::is_triangulation_or_dof_handler<MeshType>)
    3212 void collect_periodic_faces(
    3213 const MeshType & mesh,
    -
    3214 const types::boundary_id b_id1,
    -
    3215 const types::boundary_id b_id2,
    +
    3214 const types::boundary_id b_id1,
    +
    3215 const types::boundary_id b_id2,
    3216 const unsigned int direction,
    3217 std::vector<PeriodicFacePair<typename MeshType::cell_iterator>>
    3218 & matched_pairs,
    @@ -2596,6 +2596,7 @@
    STL namespace.
    ::VectorizedArray< Number, width > abs(const ::VectorizedArray< Number, width > &)
    Definition types.h:33
    +
    unsigned int boundary_id
    Definition types.h:141
    unsigned int subdomain_id
    Definition types.h:44
    unsigned int global_dof_index
    Definition types.h:82
    /usr/share/doc/packages/dealii/doxygen/deal.II/group__CPP11.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/group__CPP11.html 2024-01-30 03:04:48.272846590 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/group__CPP11.html 2024-01-30 03:04:48.272846590 +0000 @@ -177,7 +177,7 @@

    The macro DEAL_II_CONSTEXPR expands to constexpr if the compiler supports enough constexpr features (such as loops). If the compiler does not then this macro expands to nothing.

    Functions declared as constexpr can be evaluated at compile time. Hence code like

    constexpr double det_A = determinant(A);
    DEAL_II_HOST constexpr Number determinant(const SymmetricTensor< 2, dim, Number > &)
    -

    assuming A is declared with the constexpr specifier, will typically result in compile-time constants. This example shows the performance gains of using constexpr because here we performed an operation with $O(\text{dim}^3)$ complexity during compile time, avoiding any runtime cost.

    +

    assuming A is declared with the constexpr specifier, will typically result in compile-time constants. This example shows the performance gains of using constexpr because here we performed an operation with $O(\text{dim}^3)$ complexity during compile time, avoiding any runtime cost.

    Function Documentation

    ◆ new_thread()

    /usr/share/doc/packages/dealii/doxygen/deal.II/group__Concepts.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/group__Concepts.html 2024-01-30 03:04:48.292846757 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/group__Concepts.html 2024-01-30 03:04:48.292846757 +0000 @@ -176,7 +176,7 @@
    template <typename VectorType>
    virtual void Tstep(VectorType &u, const VectorType &v) const =0;
    };
    -

    where these two member functions perform one step (or the transpose of such a step) of the smoothing scheme. In other words, the operations performed by these functions are $u = u - P^{-1} (A u - v)$ and $u = u - P^{-T} (A u - v)$.

    +

    where these two member functions perform one step (or the transpose of such a step) of the smoothing scheme. In other words, the operations performed by these functions are $u = u - P^{-1} (A u - v)$ and $u = u - P^{-T} (A u - v)$.

    SparsityPatternType
    /usr/share/doc/packages/dealii/doxygen/deal.II/group__LAOperators.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/group__LAOperators.html 2024-01-30 03:04:48.644849690 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/group__LAOperators.html 2024-01-30 03:04:48.644849690 +0000 @@ -324,7 +324,7 @@
    std::function<void(Domain &, const Range &)> Tvmult;
    std::function<void(Domain &, const Range &)> Tvmult_add;

    Thus, such an object can be used as a matrix object in all iterative solver classes, either as a matrix object, or as preconditioner.

    -

    The big advantage of the LinearOperator class is that it provides syntactic sugar for complex matrix-vector operations. As an example consider the operation $(A+k\,B)\,C$, where $A$, $B$ and $C$ denote (possibly different) SparseMatrix objects. In order to construct a LinearOperator op that performs above computation when applied on a vector, one can write:

    #href_anchor"code" href="linear__operator__tools_8h.html">deal.II/lac/linear_operator_tools.h>
    +

    The big advantage of the LinearOperator class is that it provides syntactic sugar for complex matrix-vector operations. As an example consider the operation $(A+k\,B)\,C$, where $A$, $B$ and $C$ denote (possibly different) SparseMatrix objects. In order to construct a LinearOperator op that performs above computation when applied on a vector, one can write:

    #href_anchor"code" href="linear__operator__tools_8h.html">deal.II/lac/linear_operator_tools.h>
    double k;
    @@ -376,7 +376,7 @@
    result += b;
    result -= c;
    result += d;
    -

    that avoids any intermediate storage. As a second example (involving a LinearOperator object) consider the computation of a residual $b-Ax$:

    +

    that avoids any intermediate storage. As a second example (involving a LinearOperator object) consider the computation of a residual $b-Ax$:

    ::SparseMatrix<double> A;
    ::Vector<double> b, x;
    // ..
    @@ -600,8 +600,8 @@ const LinearOperator< Range, Domain, Payload > & second_op&#href_anchor"memdoc"> -

    Addition of two linear operators first_op and second_op given by $(\mathrm{first\_op}+\mathrm{second\_op})x \dealcoloneq \mathrm{first\_op}(x)
-+ \mathrm{second\_op}(x)$

    +

    Addition of two linear operators first_op and second_op given by $(\mathrm{first\_op}+\mathrm{second\_op})x \dealcoloneq \mathrm{first\_op}(x)
++ \mathrm{second\_op}(x)$

    Definition at line 390 of file linear_operator.h.

    @@ -624,8 +624,8 @@ const LinearOperator< Range, Domain, Payload > & second_op&#href_anchor"memdoc"> -

    Subtraction of two linear operators first_op and second_op given by $(\mathrm{first\_op}-\mathrm{second\_op})x \dealcoloneq \mathrm{first\_op}(x)
-- \mathrm{second\_op}(x)$

    +

    Subtraction of two linear operators first_op and second_op given by $(\mathrm{first\_op}-\mathrm{second\_op})x \dealcoloneq \mathrm{first\_op}(x)
+- \mathrm{second\_op}(x)$

    Definition at line 449 of file linear_operator.h.

    @@ -1348,7 +1348,7 @@ const Domain & u&#href_anchor"memdoc"> -

    Create a PackagedOperation object from a LinearOperator and a reference to a vector u of the Domain space. The object stores the PackagedOperation $\text{op} \,u$ (in matrix notation). return (return_add) are implemented with vmult(__1,u) (vmult_add(__1,u)).

    +

    Create a PackagedOperation object from a LinearOperator and a reference to a vector u of the Domain space. The object stores the PackagedOperation $\text{op} \,u$ (in matrix notation). return (return_add) are implemented with vmult(__1,u) (vmult_add(__1,u)).

    The PackagedOperation object that is created stores a reference to u. Thus, the vector must remain a valid reference for the whole lifetime of the PackagedOperation object. All changes made on u after the creation of the PackagedOperation object are reflected by the operator object.

    Definition at line 669 of file packaged_operation.h.

    @@ -1372,7 +1372,7 @@ const LinearOperator< Range, Domain, Payload > & op&#href_anchor"memdoc"> -

    Create a PackagedOperation object from a LinearOperator and a reference to a vector u of the Range space. The object stores the PackagedOperation $\text{op}^T \,u$ (in matrix notation). return (return_add) are implemented with Tvmult(__1,u) (Tvmult_add(__1,u)).

    +

    Create a PackagedOperation object from a LinearOperator and a reference to a vector u of the Range space. The object stores the PackagedOperation $\text{op}^T \,u$ (in matrix notation). return (return_add) are implemented with Tvmult(__1,u) (Tvmult_add(__1,u)).

    The PackagedOperation object that is created stores a reference to u. Thus, the vector must remain a valid reference for the whole lifetime of the PackagedOperation object. All changes made on u after the creation of the PackagedOperation object are reflected by the operator object.

    Definition at line 704 of file packaged_operation.h.

    @@ -1396,7 +1396,7 @@ const PackagedOperation< Domain > & comp&#href_anchor"memdoc"> -

    Composition of a PackagedOperation object with a LinearOperator. The object stores the computation $\text{op} \,comp$ (in matrix notation).

    +

    Composition of a PackagedOperation object with a LinearOperator. The object stores the computation $\text{op} \,comp$ (in matrix notation).

    Definition at line 731 of file packaged_operation.h.

    @@ -1419,7 +1419,7 @@ const LinearOperator< Range, Domain, Payload > & op&#href_anchor"memdoc"> -

    Composition of a PackagedOperation object with a LinearOperator. The object stores the computation $\text{op}^T \,comp$ (in matrix notation).

    +

    Composition of a PackagedOperation object with a LinearOperator. The object stores the computation $\text{op}^T \,comp$ (in matrix notation).

    Definition at line 775 of file packaged_operation.h.

    @@ -1455,7 +1455,7 @@

    Return a LinearOperator that performs the operations associated with the Schur complement. There are two additional helper functions, condense_schur_rhs() and postprocess_schur_solution(), that are likely necessary to be used in order to perform any useful tasks in linear algebra with this operator.

    We construct the definition of the Schur complement in the following way:

    Consider a general system of linear equations that can be decomposed into two major sets of equations:

    -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
 \mathbf{K}\mathbf{d} = \mathbf{f}
 \quad \Rightarrow\quad
 \left(\begin{array}{cc}
@@ -1468,60 +1468,60 @@
 \left(\begin{array}{cc}
    f \\ g
 \end{array}\right),
-\end{eqnarray*} +\end{eqnarray*}" src="form_1852.png"/>

    -

    where $ A,B,C,D $ represent general subblocks of the matrix $ \mathbf{K} $ and, similarly, general subvectors of $ \mathbf{d},\mathbf{f} $ are given by $ x,y,f,g $ .

    +

    where $ A,B,C,D $ represent general subblocks of the matrix $ \mathbf{K} $ and, similarly, general subvectors of $ \mathbf{d},\mathbf{f} $ are given by $ x,y,f,g $ .

    This is equivalent to the following two statements:

    -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   (1) \quad Ax + By &=& f \\
   (2) \quad Cx + Dy &=& g \quad .
-\end{eqnarray*} +\end{eqnarray*}" src="form_1857.png"/>

    -

    Assuming that $ A,D $ are both square and invertible, we could then perform one of two possible substitutions,

    -\begin{eqnarray*}
+<p>Assuming that <picture><source srcset=$ A,D $ are both square and invertible, we could then perform one of two possible substitutions,

    +\begin{eqnarray*}
   (3) \quad x &=& A^{-1}(f - By) \quad \text{from} \quad (1) \\
   (4) \quad y &=& D^{-1}(g - Cx) \quad \text{from} \quad (2) ,
-\end{eqnarray*} +\end{eqnarray*}" src="form_1859.png"/>

    which amount to performing block Gaussian elimination on this system of equations.

    For the purpose of the current implementation, we choose to substitute (3) into (2)

    -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   C \: A^{-1}(f - By) + Dy &=& g \\
   -C \: A^{-1} \: By + Dy &=& g - C \: A^{-1} \: f \quad .
-\end{eqnarray*} +\end{eqnarray*}" src="form_1860.png"/>

    This leads to the result

    -\[
+<picture><source srcset=\[
   (5) \quad (D - C\: A^{-1} \:B)y  = g - C \: A^{-1} f
       \quad \Rightarrow \quad Sy = g'
-\] +\]" src="form_1861.png"/>

    -

    with $ S = (D - C\: A^{-1} \:B) $ being the Schur complement and the modified right-hand side vector $ g' = g - C \: A^{-1} f $ arising from the condensation step. Note that for this choice of $ S $, submatrix $ D $ need not be invertible and may thus be the null matrix. Ideally $ A $ should be well-conditioned.

    -

    So for any arbitrary vector $ a $, the Schur complement performs the following operation:

    -\[
+<p> with <picture><source srcset=$ S = (D - C\: A^{-1} \:B) $ being the Schur complement and the modified right-hand side vector $ g' = g - C \: A^{-1} f $ arising from the condensation step. Note that for this choice of $ S $, submatrix $ D $ need not be invertible and may thus be the null matrix. Ideally $ A $ should be well-conditioned.

    +

    So for any arbitrary vector $ a $, the Schur complement performs the following operation:

    +\[
   (6) \quad Sa = (D - C \: A^{-1} \: B)a
-\] +\]" src="form_1868.png"/>

    A typical set of steps needed the solve a linear system (1),(2) would be:

    1. Define the inverse matrix A_inv (using inverse_operator()).
    2. -
    3. Define the Schur complement $ S $ (using schur_complement()).
    4. -
    5. Define iterative inverse matrix $ S^{-1} $ such that (6) holds. It is necessary to use a solver with a preconditioner to compute the approximate inverse operation of $ S $ since we never compute $ S $ directly, but rather the result of its operation. To achieve this, one may again use the inverse_operator() in conjunction with the Schur complement that we've just constructed. Observe that the both $ S $ and its preconditioner operate over the same space as $ D $.
    6. +
    7. Define the Schur complement $ S $ (using schur_complement()).
    8. +
    9. Define iterative inverse matrix $ S^{-1} $ such that (6) holds. It is necessary to use a solver with a preconditioner to compute the approximate inverse operation of $ S $ since we never compute $ S $ directly, but rather the result of its operation. To achieve this, one may again use the inverse_operator() in conjunction with the Schur complement that we've just constructed. Observe that the both $ S $ and its preconditioner operate over the same space as $ D $.
    10. Perform pre-processing step on the RHS of (5) using condense_schur_rhs():

      -\[
+<picture><source srcset=\[
      g' = g - C \: A^{-1} \: f
-   \] + \]" src="form_1870.png"/>

    11. -
    12. Solve for $ y $ in (5):

      -\[
+<li>Solve for <picture><source srcset=$ y $ in (5):

      +\[
      y =  S^{-1} g'
-   \] + \]" src="form_1872.png"/>

    13. Perform the post-processing step from (3) using postprocess_schur_solution():

      -\[
+<picture><source srcset=\[
      x =  A^{-1} (f - By)
-   \] + \]" src="form_1873.png"/>

    @@ -1567,10 +1567,10 @@
    LinearOperator< Domain, Range, Payload > inverse_operator(const LinearOperator< Range, Domain, Payload > &op, Solver &solver, const Preconditioner &preconditioner)
    PackagedOperation< Domain_1 > postprocess_schur_solution(const LinearOperator< Range_1, Domain_1, Payload > &A_inv, const LinearOperator< Range_1, Domain_2, Payload > &B, const Domain_2 &y, const Range_1 &f)
    -

    In the above example, the preconditioner for $ S $ was defined as the preconditioner for $ D $, which is valid since they operate on the same space. However, if $ D $ and $ S $ are too dissimilar, then this may lead to a large number of solver iterations as $ \text{prec}(D) $ is not a good approximation for $ S^{-1} $.

    -

    A better preconditioner in such a case would be one that provides a more representative approximation for $ S^{-1} $. One approach is shown in step-22, where $ D $ is the null matrix and the preconditioner for $ S^{-1}
-$ is derived from the mass matrix over this space.

    -

    From another viewpoint, a similar result can be achieved by first constructing an object that represents an approximation for $ S $ wherein expensive operation, namely $ A^{-1} $, is approximated. Thereafter we construct the approximate inverse operator $ \tilde{S}^{-1} $ which is then used as the preconditioner for computing $ S^{-1} $.

    // Construction of approximate inverse of Schur complement
    +

    In the above example, the preconditioner for $ S $ was defined as the preconditioner for $ D $, which is valid since they operate on the same space. However, if $ D $ and $ S $ are too dissimilar, then this may lead to a large number of solver iterations as $ \text{prec}(D) $ is not a good approximation for $ S^{-1} $.

    +

    A better preconditioner in such a case would be one that provides a more representative approximation for $ S^{-1} $. One approach is shown in step-22, where $ D $ is the null matrix and the preconditioner for $ S^{-1}
+$ is derived from the mass matrix over this space.

    +

    From another viewpoint, a similar result can be achieved by first constructing an object that represents an approximation for $ S $ wherein expensive operation, namely $ A^{-1} $, is approximated. Thereafter we construct the approximate inverse operator $ \tilde{S}^{-1} $ which is then used as the preconditioner for computing $ S^{-1} $.

    // Construction of approximate inverse of Schur complement
    const auto A_inv_approx = linear_operator(preconditioner_A);
    const auto S_approx = schur_complement(A_inv_approx,B,C,D);
    @@ -1593,8 +1593,8 @@
    // Solve for y
    y = S_inv * rhs;
    x = postprocess_schur_solution (A_inv,B,y,f);
    -

    Note that due to the construction of S_inv_approx and subsequently S_inv, there are a pair of nested iterative solvers which could collectively consume a lot of resources. Therefore care should be taken in the choices leading to the construction of the iterative inverse_operators. One might consider the use of a IterationNumberControl (or a similar mechanism) to limit the number of inner solver iterations. This controls the accuracy of the approximate inverse operation $ \tilde{S}^{-1} $ which acts only as the preconditioner for $ S^{-1} $. Furthermore, the preconditioner to $ \tilde{S}^{-1} $, which in this example is $
-\text{prec}(D) $, should ideally be computationally inexpensive.

    +

    Note that due to the construction of S_inv_approx and subsequently S_inv, there are a pair of nested iterative solvers which could collectively consume a lot of resources. Therefore care should be taken in the choices leading to the construction of the iterative inverse_operators. One might consider the use of a IterationNumberControl (or a similar mechanism) to limit the number of inner solver iterations. This controls the accuracy of the approximate inverse operation $ \tilde{S}^{-1} $ which acts only as the preconditioner for $ S^{-1} $. Furthermore, the preconditioner to $ \tilde{S}^{-1} $, which in this example is $
+\text{prec}(D) $, should ideally be computationally inexpensive.

    However, if an iterative solver based on IterationNumberControl is used as a preconditioner then the preconditioning operation is not a linear operation. Here a flexible solver like SolverFGMRES (flexible GMRES) is best employed as an outer solver in order to deal with the variable behavior of the preconditioner. Otherwise the iterative solver can stagnate somewhere near the tolerance of the preconditioner or generally behave erratically. Alternatively, using a ReductionControl would ensure that the preconditioner always solves to the same tolerance, thereby rendering its behavior constant.

    Further examples of this functionality can be found in the test-suite, such as tests/lac/schur_complement_01.cc . The solution of a multi- component problem (namely step-22) using the schur_complement can be found in tests/lac/schur_complement_03.cc .

    See also
    Block (linear algebra)
    @@ -1631,15 +1631,15 @@ /usr/share/doc/packages/dealii/doxygen/deal.II/group__UpdateFlags.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/group__UpdateFlags.html 2024-01-30 03:04:48.664849856 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/group__UpdateFlags.html 2024-01-30 03:04:48.664849856 +0000 @@ -124,7 +124,7 @@ w_q, \]" src="form_272.png"/>

    - where $q$ indicates the index of the quadrature point, $\hat{\bf x}_q$ its location on the reference cell, and $w_q$ its weight. + where $q$ indicates the index of the quadrature point, $\hat{\bf x}_q$ its location on the reference cell, and $w_q$ its weight.

    In order to evaluate such an expression in an application code, we have to access three different kinds of objects: a quadrature object that describes locations $\hat{\bf x}_q$ and weights $w_q$ of quadrature points on the reference cell; a finite element object that describes the gradients $\hat\nabla \varphi_i(\hat{\bf x}_q)$ of shape functions on the unit cell; and a mapping object that provides the Jacobian as well as its determinant. Dealing with all these objects would be cumbersome and error prone.

    On the other hand, these three kinds of objects almost always appear together, and it is in fact very rare for deal.II application codes to do anything with quadrature, finite element, or mapping objects besides using them together. For this reason, deal.II uses the FEValues abstraction combining information on the shape functions, the geometry of the actual mesh cell and a quadrature rule on a reference cell. Upon construction it takes one object of each of the three mentioned categories. Later, it can be "re-initialized" for a concrete grid cell and then provides mapped quadrature points and weights, mapped shape function values and derivatives as well as some properties of the transformation from the reference cell to the actual mesh cell.

    /usr/share/doc/packages/dealii/doxygen/deal.II/group__auto__symb__diff.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/group__auto__symb__diff.html 2024-01-30 03:04:48.692850089 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/group__auto__symb__diff.html 2024-01-30 03:04:48.692850089 +0000 @@ -103,7 +103,7 @@ &#href_anchor"memitem:namespaceDifferentiation_1_1SD" id="r_namespaceDifferentiation_1_1SD">namespace  Differentiation::SD &#href_anchor"details" id="details">

    Detailed Description

    A module dedicated to the implementation of functions and classes that relate to automatic and symbolic differentiation.

    -

    Below we provide a very brief introduction as to what automatic and symbolic differentiation are, what variations of these computational/numerical schemes exist, and how they are integrated within deal.II's framework. The purpose of all of these schemes is to automatically compute the derivative of functions, or approximations of it, in cases where one does not want to compute them by hand. Common examples are situations in the finite element context is where one wants to solve a nonlinear problem that is given by requiring that some residual $F(u,\nabla u)=0$ where $F$ is a complicated function that needs to be differentiated to apply Newton's method; and situations where one is given a parameter dependent problem ${\cal A}(q,u,\nabla u) = f$ and wants to form derivatives with regards to the parameters $q$, for example to optimize an output functional with regards to $q$, or for a sensitivity analysis with regards to $q$. One should think of $q$ as design parameters: say, the width or shape of a wing, the stiffness coefficients of a material chosen to build an object, the power sent to a device, the chemical composition of the gases sent to a burner. In all of these cases, one should think of $F$ and $\cal A$ as complicated and cumbersome to differentiate – at least when doing it by hand. A relatively simple case of a nonlinear problem that already highlights the tedium of computing derivatives by hand is shown in step-15. However, in reality, one might, for example, think about problems such as chemically reactive flows where the fluid equations have coefficients such as the density and viscosity that depend strongly and nonlinearly on the chemical composition, temperature, and pressure of the fluid at each point; and where the chemical species react with each other based on reaction coefficients that also depend nonlinearly and in complicated ways on the chemical composition, temperature, and pressure. In many cases, the exact formulas for all of these coefficients can take several lines to write out, may include exponentials and (harmonic or geometric) averages of several nonlinear terms, and/or may contain table lookup of and interpolation between data points. Just getting these terms right is difficult enough; computing derivatives of these terms is impractical in most applications and, in reality, impossible to get right. Higher derivatives are even more impossible to do without computer aid. Automatic or symbolic differentiation is a way out of this: One only has to implement the function that computes these coefficients in terms of their inputs only once, and gets the (correct!) derivatives without further coding effort (though at a non-negligible computational cost either at run time, compile time, or both).

    +

    Below we provide a very brief introduction as to what automatic and symbolic differentiation are, what variations of these computational/numerical schemes exist, and how they are integrated within deal.II's framework. The purpose of all of these schemes is to automatically compute the derivative of functions, or approximations of it, in cases where one does not want to compute them by hand. Common examples are situations in the finite element context is where one wants to solve a nonlinear problem that is given by requiring that some residual $F(u,\nabla u)=0$ where $F$ is a complicated function that needs to be differentiated to apply Newton's method; and situations where one is given a parameter dependent problem ${\cal A}(q,u,\nabla u) = f$ and wants to form derivatives with regards to the parameters $q$, for example to optimize an output functional with regards to $q$, or for a sensitivity analysis with regards to $q$. One should think of $q$ as design parameters: say, the width or shape of a wing, the stiffness coefficients of a material chosen to build an object, the power sent to a device, the chemical composition of the gases sent to a burner. In all of these cases, one should think of $F$ and $\cal A$ as complicated and cumbersome to differentiate – at least when doing it by hand. A relatively simple case of a nonlinear problem that already highlights the tedium of computing derivatives by hand is shown in step-15. However, in reality, one might, for example, think about problems such as chemically reactive flows where the fluid equations have coefficients such as the density and viscosity that depend strongly and nonlinearly on the chemical composition, temperature, and pressure of the fluid at each point; and where the chemical species react with each other based on reaction coefficients that also depend nonlinearly and in complicated ways on the chemical composition, temperature, and pressure. In many cases, the exact formulas for all of these coefficients can take several lines to write out, may include exponentials and (harmonic or geometric) averages of several nonlinear terms, and/or may contain table lookup of and interpolation between data points. Just getting these terms right is difficult enough; computing derivatives of these terms is impractical in most applications and, in reality, impossible to get right. Higher derivatives are even more impossible to do without computer aid. Automatic or symbolic differentiation is a way out of this: One only has to implement the function that computes these coefficients in terms of their inputs only once, and gets the (correct!) derivatives without further coding effort (though at a non-negligible computational cost either at run time, compile time, or both).

    Automatic differentiation

    Automatic differentiation (commonly also referred to as algorithmic differentiation), is a numerical method that can be used to "automatically" compute the first, and perhaps higher-order, derivatives of function(s) with respect to one or more input variables. Although this comes at a certain computational cost, the benefits to using such a tool may be significant. When used correctly the derivatives of often complicated functions can be computed to a very high accuracy. Although the exact accuracy achievable by these frameworks largely depends on their underlying mathematical formulation, some implementations compute with a precision on the order of machine accuracy. Note that this is different to classical numerical differentiation (using, for example, a finite difference approximation of a function by evaluating it at different points), which has an accuracy that depends on both the perturbation size as well as the chosen finite-difference scheme; the error of these methods is measurably larger than well-formulated automatic differentiation approaches.

    @@ -151,38 +151,38 @@
  • reverse-mode (or reverse accumulation) auto-differentiation.
  • As a point of interest, the optimal Jacobian accumulation, which performs a minimal set of computations, lies somewhere between these two limiting cases. Its computation for a general composite function remains an open problem in graph theory.

    -

    With the aid of the diagram below (it and some of the listed details courtesy of this Wikipedia article), let us think about the representation of the calculation of the function $f (\mathbf{x}) = \sin (x_{1}) + x_{1} x_{2}$ and its derivatives:

    -
    Forward mode automatic differentiation
    Forward mode automatic differentiation
    Reverse mode automatic differentiation
    Reverse mode automatic differentiation

    Specifically, we will briefly describe what forward and reverse auto-differentiation are. Note that in the diagram, along the edges of the graph in text are the directional derivative of function $w$ with respect to the $i$-th variable, represented by the notation $\dot{w} = \dfrac{d w}{d x_{i}}$. The specific computations used to render the function value and its directional derivatives for this example are tabulated in the source article. For a second illustrative example, we refer the interested reader to this article.

    -

    Consider first that any composite function $f(x)$, here represented as having two independent variables, can be dissected into a composition of its elementary functions

    -\[
+<p>With the aid of the diagram below (it and some of the listed details courtesy of this <a href=Wikipedia article), let us think about the representation of the calculation of the function $f (\mathbf{x}) = \sin (x_{1}) + x_{1} x_{2}$ and its derivatives:

    +
    Forward mode automatic differentiation
    Forward mode automatic differentiation
    Reverse mode automatic differentiation
    Reverse mode automatic differentiation

    Specifically, we will briefly describe what forward and reverse auto-differentiation are. Note that in the diagram, along the edges of the graph in text are the directional derivative of function $w$ with respect to the $i$-th variable, represented by the notation $\dot{w} = \dfrac{d w}{d x_{i}}$. The specific computations used to render the function value and its directional derivatives for this example are tabulated in the source article. For a second illustrative example, we refer the interested reader to this article.

    +

    Consider first that any composite function $f(x)$, here represented as having two independent variables, can be dissected into a composition of its elementary functions

    +\[
   f (\mathbf{x})
   = f_{0} \circ f_{1} \circ f_{2} \circ \ldots \circ f_{n} (\mathbf{x})
   \quad .
-\] +\]" src="form_10.png"/>

    -

    As was previously mentioned, if each of the primitive operations $f_{n}$ is smooth and differentiable, then the chain-rule can be universally employed to compute the total derivative of $f$, namely $\dfrac{d f(x)}{d \mathbf{x}}$. What distinguishes the "forward" from the "reverse" mode is how the chain-rule is evaluated, but ultimately both compute the total derivative

    -\[
+<p> As was previously mentioned, if each of the primitive operations <picture><source srcset=$f_{n}$ is smooth and differentiable, then the chain-rule can be universally employed to compute the total derivative of $f$, namely $\dfrac{d f(x)}{d \mathbf{x}}$. What distinguishes the "forward" from the "reverse" mode is how the chain-rule is evaluated, but ultimately both compute the total derivative

    +\[
   \dfrac{d f (\mathbf{x})}{d \mathbf{x}}
   = \dfrac{d f_{0}}{d f_{1}} \dfrac{d f_{1}}{d f_{2}} \dfrac{d f_{2}}{d f_{3}} \ldots \dfrac{d f_{n} (\mathbf{x})}{d \mathbf{x}}
   \quad .
-\] +\]" src="form_14.png"/>

    -

    In forward-mode, the chain-rule is computed naturally from the "inside out". The independent variables are therefore fixed, and each sub-function $f'_{i} \vert_{f'_{i+1}}$ is computed recursively and its result returned as inputs to the parent function. Encapsulating and fixing the order of operations using parentheses, this means that we compute

    -\[
+<p>In forward-mode, the chain-rule is computed naturally from the $f'_{i} \vert_{f'_{i+1}}$ is computed recursively and its result returned as inputs to the parent function. Encapsulating and fixing the order of operations using parentheses, this means that we compute

    +\[
   \dfrac{d f (\mathbf{x})}{d \mathbf{x}}
   = \dfrac{d f_{0}}{d f_{1}} \left( \dfrac{d f_{1}}{d f_{2}} \left(\dfrac{d f_{2}}{d f_{3}} \left(\ldots \left( \dfrac{d f_{n} (\mathbf{x})}{d \mathbf{x}} \right)\right)\right)\right)
   \quad .
-\] +\]" src="form_16.png"/>

    The computational complexity of a forward-sweep is proportional to that of the input function. However, for each directional derivative that is to be computed one sweep of the computational graph is required.

    In reverse-mode, the chain-rule is computed somewhat unnaturally from the "outside in". The values of the dependent variables first get computed and fixed, and then the preceding differential operations are evaluated and multiplied in succession with the previous results from left to right. Again, if we encapsulate and fix the order of operations using parentheses, this implies that the reverse calculation is performed by

    -\[
+<picture><source srcset=\[
 \dfrac{d f (\mathbf{x})}{d \mathbf{x}}
   = \left( \left( \left( \left( \left( \dfrac{d f_{0}}{d f_{1}} \right) \dfrac{d f_{1}}{d f_{2}} \right) \dfrac{d f_{2}}{d f_{3}} \right) \ldots \right) \dfrac{d f_{n} (\mathbf{x})}{d \mathbf{x}} \right)
   \quad .
-\] +\]" src="form_17.png"/>

    -

    The intermediate values $\dfrac{d f_{i-1}}{d f_{i}}$ are known as adjoints, which must be computed and stored as the computational graph is traversed. However, for each dependent scalar function one sweep of the computational graph renders all directional derivatives at once.

    +

    The intermediate values $\dfrac{d f_{i-1}}{d f_{i}}$ are known as adjoints, which must be computed and stored as the computational graph is traversed. However, for each dependent scalar function one sweep of the computational graph renders all directional derivatives at once.

    Overall, the efficiency of each mode is determined by the number of independent (input) variables and dependent (output) variables. If the outputs greatly exceed the inputs in number, then forward-mode can be shown to be more efficient than reverse-mode. The converse is true when the number of input variables greatly exceeds that of the output variables. This point may be used to help inform which number type is most suitable for which set of operations are to be performed using automatic differentiation. For example, in many applications for which second derivatives are to be computed it is appropriate to combine both reverse- and forward-modes. The former would then typically be used to calculate the first derivatives, and the latter the second derivatives.

    Supported automatic differentiation libraries

    @@ -330,7 +330,7 @@

    Symbolic expressions and differentiation

    Symbolic differentiation is, in terms of its design and usage, quite different to automatic differentiation. Underlying any symbolic library is a computer algebra system (CAS) that implements a language and collection of algorithms to manipulate symbolic (or "string-like") expressions. This is most similar, from a philosophical point of view, to how algebraic operations would be performed by hand.

    -

    To help better distinguish between symbolic differentiation and numerical methods like automatic differentiation, let's consider a very simple example. Suppose that the function $f(x,y) = [2x+1]^{y}$, where $x$ and $y$ are variables that are independent of one another. By applying the chain-rule, the derivatives of this function are simply $\dfrac{d f(x,y)}{d x} = 2y[2x+1]^{y-1}$ and $\dfrac{d f(x,y)}{d y} = [2x+1]^{y} \ln(2x+1)$. These are exactly the results that you get from a CAS after defining the symbolic variables x and y, defining the symbolic expression f = pow(2x+1, y) and computing the derivatives diff(f, x) and diff(f, y). At this point there is no assumption of what x and y represent; they may later be interpreted as plain (scalar) numbers, complex numbers, or something else for which the power and natural logarithm functions are well defined. Obviously this means that there is also no assumption about which point to evaluate either the expression or its derivatives. One could readily take the expression for $\dfrac{d f(x, y)}{d x}$ and evaluate it at $x=1, y=2.5$ and then later, with no recomputation of the derivative expression itself, evaluate it at $x=3.25, y=-6$. In fact, the interpretation of any symbolic variable or expression, and the inter-dependencies between variables, may be defined or redefined at any point during their manipulation; this leads to a degree of flexibility in computations that cannot be matched by auto-differentiation. For example, one could perform the permanent substitution $g(x) = \dfrac{d f(x, y)}{d x} \vert_{y=1}$ and then recompute $g(x)$ for several different values of $x$. One could also post-factum express an interdependency between x and y, such as $y \rightarrow y(x) := 2x$. For such a case, this means that the initially computed derivatives $\dfrac{d f(x, y)}{d x} \rightarrow \dfrac{\partial f(x, y(x))}{\partial x} = 2y(x) [2x+1]^{y(x)-1} = 4x[2x+1]^{2x-1}$ and $\dfrac{d f(x, y)}{d y} \rightarrow \dfrac{\partial f(x, y(x))}{\partial y} = [2x+1]^{y(x)} \ln(2x+1) = [2x+1]^{2x} \ln(2x+1)$ truly represent partial derivatives rather than total derivatives. Of course, if such an inter-dependency was explicitly defined before the derivatives $\dfrac{d f(x, y(x))}{d x}$ and $\dfrac{d f(x, y(x))}{d y}$ are computed, then this could correspond to the total derivative (which is the only result that auto-differentiation is able to achieve for this example).

    +

    To help better distinguish between symbolic differentiation and numerical methods like automatic differentiation, let's consider a very simple example. Suppose that the function $f(x,y) = [2x+1]^{y}$, where $x$ and $y$ are variables that are independent of one another. By applying the chain-rule, the derivatives of this function are simply $\dfrac{d f(x,y)}{d x} = 2y[2x+1]^{y-1}$ and $\dfrac{d f(x,y)}{d y} = [2x+1]^{y} \ln(2x+1)$. These are exactly the results that you get from a CAS after defining the symbolic variables x and y, defining the symbolic expression f = pow(2x+1, y) and computing the derivatives diff(f, x) and diff(f, y). At this point there is no assumption of what x and y represent; they may later be interpreted as plain (scalar) numbers, complex numbers, or something else for which the power and natural logarithm functions are well defined. Obviously this means that there is also no assumption about which point to evaluate either the expression or its derivatives. One could readily take the expression for $\dfrac{d f(x, y)}{d x}$ and evaluate it at $x=1, y=2.5$ and then later, with no recomputation of the derivative expression itself, evaluate it at $x=3.25, y=-6$. In fact, the interpretation of any symbolic variable or expression, and the inter-dependencies between variables, may be defined or redefined at any point during their manipulation; this leads to a degree of flexibility in computations that cannot be matched by auto-differentiation. For example, one could perform the permanent substitution $g(x) = \dfrac{d f(x, y)}{d x} \vert_{y=1}$ and then recompute $g(x)$ for several different values of $x$. One could also post-factum express an interdependency between x and y, such as $y \rightarrow y(x) := 2x$. For such a case, this means that the initially computed derivatives $\dfrac{d f(x, y)}{d x} \rightarrow \dfrac{\partial f(x, y(x))}{\partial x} = 2y(x) [2x+1]^{y(x)-1} = 4x[2x+1]^{2x-1}$ and $\dfrac{d f(x, y)}{d y} \rightarrow \dfrac{\partial f(x, y(x))}{\partial y} = [2x+1]^{y(x)} \ln(2x+1) = [2x+1]^{2x} \ln(2x+1)$ truly represent partial derivatives rather than total derivatives. Of course, if such an inter-dependency was explicitly defined before the derivatives $\dfrac{d f(x, y(x))}{d x}$ and $\dfrac{d f(x, y(x))}{d y}$ are computed, then this could correspond to the total derivative (which is the only result that auto-differentiation is able to achieve for this example).

    Due to the sophisticated CAS that forms the foundation of symbolic operations, the types of manipulations are not necessarily restricted to differentiation alone, but rather may span a spectrum of manipulations relevant to discrete differential calculus, topics in pure mathematics, and more. The documentation for the SymPy library gives plenty of examples that highlight what a fully-fledged CAS is capable of. Through the Differentiation::SD::Expression class, and the associated functions in the Differentiation::SD namespace, we provide a wrapper to the high-performance SymEngine symbolic manipulation library that has enriched operator overloading and a consistent interface that makes it easy and "natural" to use. In fact, this class can be used as a "drop-in" replacement for arithmetic types in many situations, transforming the operations from being numeric to symbolic in nature; this is made especially easy when classes are templated on the underlying number type. Being focused on numerical simulation of PDEs, the functionality of the CAS that is exposed within deal.II focuses on symbolic expression creation, manipulation, and differentiation.

    The convenience wrappers to SymEngine functionality are primarily focused on manipulations that solely involve dictionary-based (i.e., something reminiscent of "string-based") operations. Although SymEngine performs these operations in an efficient manner, they are still known to be computationally expensive, especially when the operations are performed on large expressions. It should therefore be expected that the performance of the parts of code that perform differentiation, symbolic substitution, etc., may be a limiting factor when using this in production code. deal.II therefore provides an interface to accelerate the evaluation of lengthy symbolic expression through the BatchOptimizer class (itself often leveraging functionality provided by SymEngine). In particular, the BatchOptimizer simultaneously optimizes a collection of symbolic expressions using methods such as common subexpression elimination (CSE), as well as by generating high performance code-paths to evaluate these expressions through the use of a custom-generated std::function or by compiling the expression using the LLVM JIT compiler. The usage of the Differentiation::SD::BatchOptimizer class is exemplified in step-71.

    As a final note, it is important to recognize the remaining major deficiencies in deal.II's current implementation of the interface to the supported symbolic library. The level of functionality currently implemented effectively limits the use of symbolic algebra to the traditional use case (i.e. scalar and tensor algebra, as might be useful to define constitutive relations or complex functions for application as boundary conditions or source terms). In fact, step-71 demonstrates how it can be used to implement challenging constitutive models. In the future we will also implement classes to assist in performing assembly operations in the same spirit as that which has been done in the Differentiation::AD namespace.

    /usr/share/doc/packages/dealii/doxygen/deal.II/group__constraints.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/group__constraints.html 2024-01-30 03:04:48.764850690 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/group__constraints.html 2024-01-30 03:04:48.764850690 +0000 @@ -210,7 +210,7 @@
  • If you have boundary conditions that set a certain part of the solution's value, for example no normal flux, $\mathbf n \cdot
   \mathbf u=0$ (as happens in flow problems and is handled by the VectorTools::compute_no_normal_flux_constraints function) or prescribed tangential components, $\mathbf{n}\times\mathbf{u}=
   \mathbf{n}\times\mathbf{f}$ (as happens in electromagnetic problems and is handled by the VectorTools::project_boundary_values_curl_conforming function). For the former case, imagine for example that we are at at vertex where the normal vector has the form $\frac 1{\sqrt{14}}
-  (1,2,3)^T$ and that the $x$-, $y$- and $z$-components of the flow field at this vertex are associated with degrees of freedom 12, 28, and 40. Then the no-normal-flux condition means that we need to have the condition $\frac 1{\sqrt{14}} (x_{12}+2x_{28}+3x_{40})=0$. The prescribed tangential component leads to similar constraints though there is often something on the right hand side.
  • + (1,2,3)^T$" src="form_43.png"/> and that the $x$-, $y$- and $z$-components of the flow field at this vertex are associated with degrees of freedom 12, 28, and 40. Then the no-normal-flux condition means that we need to have the condition $\frac 1{\sqrt{14}} (x_{12}+2x_{28}+3x_{40})=0$. The prescribed tangential component leads to similar constraints though there is often something on the right hand side.
  • If you have hanging node constraints, for example in a mesh like this:
    @@ -303,7 +303,7 @@ \]" src="form_70.png"/>

    instead [1] (M. S. Shephard. Linear multipoint constraints applied via transformation as part of a direct stiffness assembly process. International Journal for Numerical Methods in Engineering 20(11):2107-2112, 1985).

    -

    Here, $A$ is a given (unconstrained) system matrix for which we only assume that we can apply it to a vector but can not necessarily access individual matrix entries. $b$ is the corresponding right hand side of a system of linear equations $A\,x=b$. The matrix $C$ describes the homogeneous part of the linear constraints stored in an AffineConstraints object and the vector $k$ is the vector of corresponding inhomogeneities. More precisely, the AffineConstraints::distribute() operation applied on a vector $x$ is the operation

    +

    Here, $A$ is a given (unconstrained) system matrix for which we only assume that we can apply it to a vector but can not necessarily access individual matrix entries. $b$ is the corresponding right hand side of a system of linear equations $A\,x=b$. The matrix $C$ describes the homogeneous part of the linear constraints stored in an AffineConstraints object and the vector $k$ is the vector of corresponding inhomogeneities. More precisely, the AffineConstraints::distribute() operation applied on a vector $x$ is the operation

    \[
  x \leftarrow C\,x+k.
 \] @@ -370,7 +370,7 @@

    Compute which entries of a matrix built on the given dof_handler may possibly be nonzero, and create a sparsity pattern object that represents these nonzero locations.

    -

    This function computes the possible positions of non-zero entries in the global system matrix by simulating which entries one would write to during the actual assembly of a matrix. For this, the function assumes that each finite element basis function is non-zero on a cell only if its degree of freedom is associated with the interior, a face, an edge or a vertex of this cell. As a result, a matrix entry $A_{ij}$ that is computed from two basis functions $\varphi_i$ and $\varphi_j$ with (global) indices $i$ and $j$ (for example, using a bilinear form $A_{ij}=a(\varphi_i,\varphi_j)$) can be non-zero only if these shape functions correspond to degrees of freedom that are defined on at least one common cell. Therefore, this function just loops over all cells, figures out the global indices of all degrees of freedom, and presumes that all matrix entries that couple any of these indices will result in a nonzero matrix entry. These will then be added to the sparsity pattern. As this process of generating the sparsity pattern does not take into account the equation to be solved later on, the resulting sparsity pattern is symmetric.

    +

    This function computes the possible positions of non-zero entries in the global system matrix by simulating which entries one would write to during the actual assembly of a matrix. For this, the function assumes that each finite element basis function is non-zero on a cell only if its degree of freedom is associated with the interior, a face, an edge or a vertex of this cell. As a result, a matrix entry $A_{ij}$ that is computed from two basis functions $\varphi_i$ and $\varphi_j$ with (global) indices $i$ and $j$ (for example, using a bilinear form $A_{ij}=a(\varphi_i,\varphi_j)$) can be non-zero only if these shape functions correspond to degrees of freedom that are defined on at least one common cell. Therefore, this function just loops over all cells, figures out the global indices of all degrees of freedom, and presumes that all matrix entries that couple any of these indices will result in a nonzero matrix entry. These will then be added to the sparsity pattern. As this process of generating the sparsity pattern does not take into account the equation to be solved later on, the resulting sparsity pattern is symmetric.

    This algorithm makes no distinction between shape functions on each cell, i.e., it simply couples all degrees of freedom on a cell with all other degrees of freedom on a cell. This is often the case, and always a safe assumption. However, if you know something about the structure of your operator and that it does not couple certain shape functions with certain test functions, then you can get a sparser sparsity pattern by calling a variant of the current function described below that allows to specify which vector components couple with which other vector components.

    The method described above lives on the assumption that coupling between degrees of freedom only happens if shape functions overlap on at least one cell. This is the case with most usual finite element formulations involving conforming elements. However, for formulations such as the Discontinuous Galerkin finite element method, the bilinear form contains terms on interfaces between cells that couple shape functions that live on one cell with shape functions that live on a neighboring cell. The current function would not see these couplings, and would consequently not allocate entries in the sparsity pattern. You would then get into trouble during matrix assembly because you try to write into matrix entries for which no space has been allocated in the sparsity pattern. This can be avoided by calling the DoFTools::make_flux_sparsity_pattern() function instead, which takes into account coupling between degrees of freedom on neighboring cells.

    There are other situations where bilinear forms contain non-local terms, for example in treating integral equations. These require different methods for building the sparsity patterns that depend on the exact formulation of the problem. You will have to do this yourself then.

    @@ -436,13 +436,13 @@

    This function is a simple variation on the previous make_sparsity_pattern() function (see there for a description of all of the common arguments), but it provides functionality for vector finite elements that allows to be more specific about which variables couple in which equation.

    For example, if you wanted to solve the Stokes equations,

    -\begin{align*}
+<picture><source srcset=\begin{align*}
 -\Delta \mathbf u + \nabla p &= 0,\\ \text{div}\ u &= 0
-\end{align*} +\end{align*}" src="form_973.png"/>

    -

    in two space dimensions, using stable Q2/Q1 mixed elements (using the FESystem class), then you don't want all degrees of freedom to couple in each equation. More specifically, in the first equation, only $u_x$ and $p$ appear; in the second equation, only $u_y$ and $p$ appear; and in the third equation, only $u_x$ and $u_y$ appear. (Note that this discussion only talks about vector components of the solution variable and the different equation, and has nothing to do with degrees of freedom, or in fact with any kind of discretization.) We can describe this by the following pattern of "couplings":

    +

    in two space dimensions, using stable Q2/Q1 mixed elements (using the FESystem class), then you don't want all degrees of freedom to couple in each equation. More specifically, in the first equation, only $u_x$ and $p$ appear; in the second equation, only $u_y$ and $p$ appear; and in the third equation, only $u_x$ and $u_y$ appear. (Note that this discussion only talks about vector components of the solution variable and the different equation, and has nothing to do with degrees of freedom, or in fact with any kind of discretization.) We can describe this by the following pattern of "couplings":

    -\[
+<picture><source srcset=\[
 \left[
 \begin{array}{ccc}
   1 & 0 & 1 \\
@@ -450,7 +450,7 @@
   1 & 1 & 0
 \end{array}
 \right]
-\] +\]" src="form_976.png"/>

    where "1" indicates that two variables (i.e., vector components of the FESystem) couple in the respective equation, and a "0" means no coupling. These zeros imply that upon discretization via a standard finite element formulation, we will not write entries into the matrix that, for example, couple pressure test functions with pressure shape functions (and similar for the other zeros above). It is then a waste to allocate memory for these entries in the matrix and the sparsity pattern, and you can avoid this by creating a mask such as the one above that describes this to the (current) function that computes the sparsity pattern. As stated above, the mask shown above refers to components of the composed FESystem, rather than to degrees of freedom or shape functions.

    This function is designed to accept a coupling pattern, like the one shown above, through the couplings parameter, which contains values of type Coupling. It builds the matrix structure just like the previous function, but does not create matrix elements if not specified by the coupling pattern. If the couplings are symmetric, then so will be the resulting sparsity pattern.

    @@ -757,9 +757,9 @@
    LinearOperator< Range, Domain, Payload > distribute_constraints_linear_operator(const AffineConstraints< typename Range::value_type > &constraints, const LinearOperator< Range, Domain, Payload > &exemplar)

    and Id_c is the projection to the subspace consisting of all vector entries associated with constrained degrees of freedom.

    This LinearOperator object is used together with constrained_right_hand_side() to build up the following modified system of linear equations:

    -\[
+<picture><source srcset=\[
   (C^T A C + Id_c) x = C^T (b - A\,k)
-\] +\]" src="form_1616.png"/>

    with a given (unconstrained) system matrix $A$, right hand side $b$, and linear constraints $C$ with inhomogeneities $k$.

    A detailed explanation of this approach is given in the Constraints on degrees of freedom module.

    @@ -795,9 +795,9 @@

    with

    This LinearOperator object is used together with constrained_right_hand_side() to build up the following modified system of linear equations:

    -\[
+<picture><source srcset=\[
   (C^T A C + Id_c) x = C^T (b - A\,k)
-\] +\]" src="form_1616.png"/>

    with a given (unconstrained) system matrix $A$, right hand side $b$, and linear constraints $C$ with inhomogeneities $k$.

    A detailed explanation of this approach is given in the Constraints on degrees of freedom module.

    @@ -1155,27 +1155,27 @@

    This function is an updated version of the project_boundary_values_curl_conforming function. The intention is to fix a problem when using the previous function in conjunction with non- rectangular geometries (i.e. elements with non-rectangular faces). The L2-projection method used has been taken from the paper "Electromagnetic scattering simulation using an H (curl) conforming hp-finite element method in three dimensions" by PD Ledger, K Morgan and O Hassan ( Int. J. Num. Meth. Fluids, Volume 53, Issue 8, pages 1267-1296).

    -

    This function will compute constraints that correspond to Dirichlet boundary conditions of the form $\vec{n}\times\vec{E}=\vec{n}\times\vec{F}$ i.e. the tangential components of $\vec{E}$ and $f$ shall coincide.

    +

    This function will compute constraints that correspond to Dirichlet boundary conditions of the form $\vec{n}\times\vec{E}=\vec{n}\times\vec{F}$ i.e. the tangential components of $\vec{E}$ and $f$ shall coincide.

    Computing constraints

    To compute the constraints we use a projection method based upon the paper mentioned above. In 2d this is done in a single stage for the edge- based shape functions, regardless of the order of the finite element. In 3d this is done in two stages, edges first and then faces.

    -

    For each cell, each edge, $e$, is projected by solving the linear system $Ax=b$ where $x$ is the vector of constraints on degrees of freedom on the edge and

    -

    $A_{ij} = \int_{e} (\vec{s}_{i}\cdot\vec{t})(\vec{s}_{j}\cdot\vec{t}) dS$

    -

    $b_{i} = \int_{e} (\vec{s}_{i}\cdot\vec{t})(\vec{F}\cdot\vec{t}) dS$

    -

    with $\vec{s}_{i}$ the $i^{th}$ shape function and $\vec{t}$ the tangent vector.

    -

    Once all edge constraints, $x$, have been computed, we may compute the face constraints in a similar fashion, taking into account the residuals from the edges.

    -

    For each face on the cell, $f$, we solve the linear system $By=c$ where $y$ is the vector of constraints on degrees of freedom on the face and

    -

    $B_{ij} = \int_{f} (\vec{n} \times \vec{s}_{i}) \cdot (\vec{n} \times
-\vec{s}_{j}) dS$

    -

    $c_{i} = \int_{f} (\vec{n} \times \vec{r}) \cdot (\vec{n} \times
-\vec{s}_i) dS$

    -

    and $\vec{r} = \vec{F} - \sum_{e \in f} \sum{i \in e} x_{i}\vec{s}_i$, the edge residual.

    -

    The resulting constraints are then given in the solutions $x$ and $y$.

    +

    For each cell, each edge, $e$, is projected by solving the linear system $Ax=b$ where $x$ is the vector of constraints on degrees of freedom on the edge and

    +

    $A_{ij} = \int_{e} (\vec{s}_{i}\cdot\vec{t})(\vec{s}_{j}\cdot\vec{t}) dS$

    +

    $b_{i} = \int_{e} (\vec{s}_{i}\cdot\vec{t})(\vec{F}\cdot\vec{t}) dS$

    +

    with $\vec{s}_{i}$ the $i^{th}$ shape function and $\vec{t}$ the tangent vector.

    +

    Once all edge constraints, $x$, have been computed, we may compute the face constraints in a similar fashion, taking into account the residuals from the edges.

    +

    For each face on the cell, $f$, we solve the linear system $By=c$ where $y$ is the vector of constraints on degrees of freedom on the face and

    +

    $B_{ij} = \int_{f} (\vec{n} \times \vec{s}_{i}) \cdot (\vec{n} \times
+\vec{s}_{j}) dS$

    +

    $c_{i} = \int_{f} (\vec{n} \times \vec{r}) \cdot (\vec{n} \times
+\vec{s}_i) dS$

    +

    and $\vec{r} = \vec{F} - \sum_{e \in f} \sum{i \in e} x_{i}\vec{s}_i$, the edge residual.

    +

    The resulting constraints are then given in the solutions $x$ and $y$.

    If the AffineConstraints constraints contained values or other constraints before, the new ones are added or the old ones overwritten, if a node of the boundary part to be used was already in the list of constraints. This is handled by using inhomogeneous constraints. Please note that when combining adaptive meshes and this kind of constraints, the Dirichlet conditions should be set first, and then completed by hanging node constraints, in order to make sure that the discretization remains consistent. See the discussion on conflicting constraints in the module on Constraints on degrees of freedom.

    Arguments to this function

    This function is explicitly for use with FE_Nedelec elements, or with FESystem elements which contain FE_Nedelec elements. It will throw an exception if called with any other finite element. The user must ensure that FESystem elements are correctly setup when using this function as this check not possible in this case.

    -

    The second argument of this function denotes the first vector component of the finite element which corresponds to the vector function that you wish to constrain. For example, if we are solving Maxwell's equations in 3d and have components $(E_x,E_y,E_z,B_x,B_y,B_z)$ and we want the boundary conditions $\vec{n}\times\vec{B}=\vec{n}\times\vec{f}$, then first_vector_component would be 3. The boundary_function must return 6 components in this example, with the first 3 corresponding to $\vec{E}$ and the second 3 corresponding to $\vec{B}$. Vectors are implicitly assumed to have exactly dim components that are ordered in the same way as we usually order the coordinate directions, i.e. $x$-, $y$-, and finally $z$-component.

    +

    The second argument of this function denotes the first vector component of the finite element which corresponds to the vector function that you wish to constrain. For example, if we are solving Maxwell's equations in 3d and have components $(E_x,E_y,E_z,B_x,B_y,B_z)$ and we want the boundary conditions $\vec{n}\times\vec{B}=\vec{n}\times\vec{f}$, then first_vector_component would be 3. The boundary_function must return 6 components in this example, with the first 3 corresponding to $\vec{E}$ and the second 3 corresponding to $\vec{B}$. Vectors are implicitly assumed to have exactly dim components that are ordered in the same way as we usually order the coordinate directions, i.e. $x$-, $y$-, and finally $z$-component.

    The parameter boundary_component corresponds to the number boundary_id of the face. numbers::internal_face_boundary_id is an illegal value, since it is reserved for interior faces.

    -

    The last argument is denoted to compute the normal vector $\vec{n}$ at the boundary points.

    +

    The last argument is denoted to compute the normal vector $\vec{n}$ at the boundary points.

    See also
    Glossary entry on boundary indicators
    @@ -1258,11 +1258,11 @@ const Mapping< dim > & mapping&#href_anchor"memdoc"> -

    Compute constraints that correspond to boundary conditions of the form $\vec{n}^T\vec{u}=\vec{n}^T\vec{f}$, i.e. the normal components of the solution $u$ and a given $f$ shall coincide. The function $f$ is given by boundary_function and the resulting constraints are added to constraints for faces with boundary indicator boundary_component.

    +

    Compute constraints that correspond to boundary conditions of the form $\vec{n}^T\vec{u}=\vec{n}^T\vec{f}$, i.e. the normal components of the solution $u$ and a given $f$ shall coincide. The function $f$ is given by boundary_function and the resulting constraints are added to constraints for faces with boundary indicator boundary_component.

    This function is explicitly written to use with the FE_RaviartThomas elements. Thus it throws an exception, if it is called with other finite elements.

    If the AffineConstraints object constraints contained values or other constraints before, the new ones are added or the old ones overwritten, if a node of the boundary part to be used was already in the list of constraints. This is handled by using inhomogeneous constraints. Please note that when combining adaptive meshes and this kind of constraints, the Dirichlet conditions should be set first, and then completed by hanging node constraints, in order to make sure that the discretization remains consistent. See the discussion on conflicting constraints in the module on Constraints on degrees of freedom.

    -

    The argument first_vector_component denotes the first vector component in the finite element that corresponds to the vector function $\vec{u}$ that you want to constrain. Vectors are implicitly assumed to have exactly dim components that are ordered in the same way as we usually order the coordinate directions, i.e., $x$-, $y$-, and finally $z$-component.

    -

    The parameter boundary_component corresponds to the boundary_id of the faces where the boundary conditions are applied. numbers::internal_face_boundary_id is an illegal value, since it is reserved for interior faces. The mapping is used to compute the normal vector $\vec{n}$ at the boundary points.

    +

    The argument first_vector_component denotes the first vector component in the finite element that corresponds to the vector function $\vec{u}$ that you want to constrain. Vectors are implicitly assumed to have exactly dim components that are ordered in the same way as we usually order the coordinate directions, i.e., $x$-, $y$-, and finally $z$-component.

    +

    The parameter boundary_component corresponds to the boundary_id of the faces where the boundary conditions are applied. numbers::internal_face_boundary_id is an illegal value, since it is reserved for interior faces. The mapping is used to compute the normal vector $\vec{n}$ at the boundary points.

    Computing constraints

    To compute the constraints we use interpolation operator proposed in Brezzi, Fortin (Mixed and Hybrid Finite Element Methods, Springer, 1991) on every face located at the boundary.

    See also
    Glossary entry on boundary indicators
    @@ -1351,16 +1351,16 @@
    -

    This function computes the constraints that correspond to boundary conditions of the form $\vec u \cdot \vec n=\vec u_\Gamma \cdot \vec n$, i.e., normal flux constraints where $\vec u$ is a vector-valued solution variable and $\vec u_\Gamma$ is a prescribed vector field whose normal component we want to be equal to the normal component of the solution. This function can also be used on level meshes in the multigrid method if refinement_edge_indices and level are provided, and the former can be obtained by MGConstrainedDoFs::get_refinement_edge_indices(). These conditions have exactly the form handled by the AffineConstraints class, in that they relate a linear combination of boundary degrees of freedom to a corresponding value (the inhomogeneity of the constraint). Consequently, the current function creates a list of constraints that are written into an AffineConstraints container. This object may already have some content, for example from hanging node constraints, that remains untouched. These constraints have to be applied to the linear system like any other such constraints, i.e., you have to condense the linear system with the constraints before solving, and you have to distribute the solution vector afterwards.

    -

    This function treats a more general case than VectorTools::compute_no_normal_flux_constraints() (which can only handle the case where $\vec u_\Gamma \cdot \vec n = 0$, and is used in step-31 and step-32). However, because everything that would apply to that function also applies as a special case to the current function, the following discussion is relevant to both.

    +

    This function computes the constraints that correspond to boundary conditions of the form $\vec u \cdot \vec n=\vec u_\Gamma \cdot \vec n$, i.e., normal flux constraints where $\vec u$ is a vector-valued solution variable and $\vec u_\Gamma$ is a prescribed vector field whose normal component we want to be equal to the normal component of the solution. This function can also be used on level meshes in the multigrid method if refinement_edge_indices and level are provided, and the former can be obtained by MGConstrainedDoFs::get_refinement_edge_indices(). These conditions have exactly the form handled by the AffineConstraints class, in that they relate a linear combination of boundary degrees of freedom to a corresponding value (the inhomogeneity of the constraint). Consequently, the current function creates a list of constraints that are written into an AffineConstraints container. This object may already have some content, for example from hanging node constraints, that remains untouched. These constraints have to be applied to the linear system like any other such constraints, i.e., you have to condense the linear system with the constraints before solving, and you have to distribute the solution vector afterwards.

    +

    This function treats a more general case than VectorTools::compute_no_normal_flux_constraints() (which can only handle the case where $\vec u_\Gamma \cdot \vec n = 0$, and is used in step-31 and step-32). However, because everything that would apply to that function also applies as a special case to the current function, the following discussion is relevant to both.

    Note
    This function doesn't make much sense in 1d, so it throws an exception if dim equals one.

    Arguments to this function

    -

    The second argument of this function denotes the first vector component in the finite element that corresponds to the vector function that you want to constrain. For example, if we were solving a Stokes equation in 2d and the finite element had components $(u,v,p)$, then first_vector_component needs to be zero if you intend to constraint the vector $(u,v)^T \cdot \vec n = \vec u_\Gamma \cdot \vec n$. On the other hand, if we solved the Maxwell equations in 3d and the finite element has components $(E_x,E_y,E_z,B_x,B_y,B_z)$ and we want the boundary condition $\vec
-B\cdot \vec n=\vec B_\Gamma\cdot \vec n$, then first_vector_component would be 3. Vectors are implicitly assumed to have exactly dim components that are ordered in the same way as we usually order the coordinate directions, i.e. $x$-, $y$-, and finally $z$-component. The function assumes, but can't check, that the vector components in the range [first_vector_component,first_vector_component+dim) come from the same base finite element. For example, in the Stokes example above, it would not make sense to use a FESystem<dim>(FE_Q<dim>(2), 1, FE_Q<dim>(1), dim) (note that the first velocity vector component is a $Q_2$ element, whereas all the other ones are $Q_1$ elements) as there would be points on the boundary where the $x$-velocity is defined but no corresponding $y$- or $z$-velocities.

    +

    The second argument of this function denotes the first vector component in the finite element that corresponds to the vector function that you want to constrain. For example, if we were solving a Stokes equation in 2d and the finite element had components $(u,v,p)$, then first_vector_component needs to be zero if you intend to constraint the vector $(u,v)^T \cdot \vec n = \vec u_\Gamma \cdot \vec n$. On the other hand, if we solved the Maxwell equations in 3d and the finite element has components $(E_x,E_y,E_z,B_x,B_y,B_z)$ and we want the boundary condition $\vec
+B\cdot \vec n=\vec B_\Gamma\cdot \vec n$, then first_vector_component would be 3. Vectors are implicitly assumed to have exactly dim components that are ordered in the same way as we usually order the coordinate directions, i.e. $x$-, $y$-, and finally $z$-component. The function assumes, but can't check, that the vector components in the range [first_vector_component,first_vector_component+dim) come from the same base finite element. For example, in the Stokes example above, it would not make sense to use a FESystem<dim>(FE_Q<dim>(2), 1, FE_Q<dim>(1), dim) (note that the first velocity vector component is a $Q_2$ element, whereas all the other ones are $Q_1$ elements) as there would be points on the boundary where the $x$-velocity is defined but no corresponding $y$- or $z$-velocities.

    The third argument denotes the set of boundary indicators on which the boundary condition is to be enforced. Note that, as explained below, this is one of the few functions where it makes a difference where we call the function multiple times with only one boundary indicator, or whether we call the function once with the whole set of boundary indicators at once.

    -

    Argument four (function_map) describes the boundary function $\vec
-u_\Gamma$ for each boundary id. The function function_map[id] is used on boundary with id id taken from the set boundary_ids. Each function in function_map is expected to have dim components, which are used independent of first_vector_component.

    -

    The mapping argument is used to compute the boundary points at which the function needs to request the normal vector $\vec n$ from the boundary description.

    +

    Argument four (function_map) describes the boundary function $\vec
+u_\Gamma$ for each boundary id. The function function_map[id] is used on boundary with id id taken from the set boundary_ids. Each function in function_map is expected to have dim components, which are used independent of first_vector_component.

    +

    The mapping argument is used to compute the boundary points at which the function needs to request the normal vector $\vec n$ from the boundary description.

    Note
    When combining adaptively refined meshes with hanging node constraints and boundary conditions like from the current function within one AffineConstraints object, the hanging node constraints should always be set first, and then the boundary conditions since boundary conditions are not set in the second operation on degrees of freedom that are already constrained. This makes sure that the discretization remains conforming as is needed. See the discussion on conflicting constraints in the module on Constraints on degrees of freedom.

    Computing constraints in 2d

    Computing these constraints requires some smarts. The main question revolves around the question what the normal vector is. Consider the following situation:

    @@ -1368,23 +1368,23 @@
    -

    Here, we have two cells that use a bilinear mapping (i.e., MappingQ(1)). Consequently, for each of the cells, the normal vector is perpendicular to the straight edge. If the two edges at the top and right are meant to approximate a curved boundary (as indicated by the dashed line), then neither of the two computed normal vectors are equal to the exact normal vector (though they approximate it as the mesh is refined further). What is worse, if we constrain $\vec u \cdot \vec n=
-\vec u_\Gamma \cdot \vec n$ at the common vertex with the normal vector from both cells, then we constrain the vector $\vec u$ with respect to two linearly independent vectors; consequently, the constraint would be $\vec u=\vec u_\Gamma$ at this point (i.e. all components of the vector), which is not what we wanted.

    -

    To deal with this situation, the algorithm works in the following way: at each point where we want to constrain $\vec u$, we first collect all normal vectors that adjacent cells might compute at this point. We then do not constrain $\vec u \cdot \vec n=\vec u_\Gamma \cdot \vec n$ for each of these normal vectors but only for the average of the normal vectors. In the example above, we therefore record only a single constraint $\vec u \cdot \vec {\bar n}=\vec u_\Gamma \cdot \vec
-{\bar n}$, where $\vec {\bar n}$ is the average of the two indicated normal vectors.

    +

    Here, we have two cells that use a bilinear mapping (i.e., MappingQ(1)). Consequently, for each of the cells, the normal vector is perpendicular to the straight edge. If the two edges at the top and right are meant to approximate a curved boundary (as indicated by the dashed line), then neither of the two computed normal vectors are equal to the exact normal vector (though they approximate it as the mesh is refined further). What is worse, if we constrain $\vec u \cdot \vec n=
+\vec u_\Gamma \cdot \vec n$ at the common vertex with the normal vector from both cells, then we constrain the vector $\vec u$ with respect to two linearly independent vectors; consequently, the constraint would be $\vec u=\vec u_\Gamma$ at this point (i.e. all components of the vector), which is not what we wanted.

    +

    To deal with this situation, the algorithm works in the following way: at each point where we want to constrain $\vec u$, we first collect all normal vectors that adjacent cells might compute at this point. We then do not constrain $\vec u \cdot \vec n=\vec u_\Gamma \cdot \vec n$ for each of these normal vectors but only for the average of the normal vectors. In the example above, we therefore record only a single constraint $\vec u \cdot \vec {\bar n}=\vec u_\Gamma \cdot \vec
+{\bar n}$, where $\vec {\bar n}$ is the average of the two indicated normal vectors.

    Unfortunately, this is not quite enough. Consider the situation here:

    -

    If again the top and right edges approximate a curved boundary, and the left boundary a separate boundary (for example straight) so that the exact boundary has indeed a corner at the top left vertex, then the above construction would not work: here, we indeed want the constraint that $\vec u$ at this point (because the normal velocities with respect to both the left normal as well as the top normal vector should be zero), not that the velocity in the direction of the average normal vector is zero.

    +

    If again the top and right edges approximate a curved boundary, and the left boundary a separate boundary (for example straight) so that the exact boundary has indeed a corner at the top left vertex, then the above construction would not work: here, we indeed want the constraint that $\vec u$ at this point (because the normal velocities with respect to both the left normal as well as the top normal vector should be zero), not that the velocity in the direction of the average normal vector is zero.

    Consequently, we use the following heuristic to determine whether all normal vectors computed at one point are to be averaged: if two normal vectors for the same point are computed on different cells, then they are to be averaged. This covers the first example above. If they are computed from the same cell, then the fact that they are different is considered indication that they come from different parts of the boundary that might be joined by a real corner, and must not be averaged.

    There is one problem with this scheme. If, for example, the same domain we have considered above, is discretized with the following mesh, then we get into trouble:

    -

    Here, the algorithm assumes that the boundary does not have a corner at the point where faces $F1$ and $F2$ join because at that point there are two different normal vectors computed from different cells. If you intend for there to be a corner of the exact boundary at this point, the only way to deal with this is to assign the two parts of the boundary different boundary indicators and call this function twice, once for each boundary indicators; doing so will yield only one normal vector at this point per invocation (because we consider only one boundary part at a time), with the result that the normal vectors will not be averaged. This situation also needs to be taken into account when using this function around reentrant corners on Cartesian meshes. If normal-flux boundary conditions are to be enforced on non-Cartesian meshes around reentrant corners, one may even get cycles in the constraints as one will in general constrain different components from the two sides. In that case, set a no-slip constraint on the reentrant vertex first.

    +

    Here, the algorithm assumes that the boundary does not have a corner at the point where faces $F1$ and $F2$ join because at that point there are two different normal vectors computed from different cells. If you intend for there to be a corner of the exact boundary at this point, the only way to deal with this is to assign the two parts of the boundary different boundary indicators and call this function twice, once for each boundary indicators; doing so will yield only one normal vector at this point per invocation (because we consider only one boundary part at a time), with the result that the normal vectors will not be averaged. This situation also needs to be taken into account when using this function around reentrant corners on Cartesian meshes. If normal-flux boundary conditions are to be enforced on non-Cartesian meshes around reentrant corners, one may even get cycles in the constraints as one will in general constrain different components from the two sides. In that case, set a no-slip constraint on the reentrant vertex first.

    Computing constraints in 3d

    The situation is more complicated in 3d. Consider the following case where we want to compute the constraints at the marked vertex:

    @@ -1498,7 +1498,7 @@
    -

    This function does the same as the compute_nonzero_normal_flux_constraints() function (see there for more information), but for the simpler case of homogeneous normal-flux constraints, i.e., for imposing the condition $\vec u \cdot \vec n= 0$. This function is used in step-31 and step-32. This function can also be used on level meshes in the multigrid method if refinement_edge_indices and level are provided, and the former can be obtained by MGConstrainedDoFs::get_refinement_edge_indices().

    +

    This function does the same as the compute_nonzero_normal_flux_constraints() function (see there for more information), but for the simpler case of homogeneous normal-flux constraints, i.e., for imposing the condition $\vec u \cdot \vec n= 0$. This function is used in step-31 and step-32. This function can also be used on level meshes in the multigrid method if refinement_edge_indices and level are provided, and the former can be obtained by MGConstrainedDoFs::get_refinement_edge_indices().

    See also
    Glossary entry on boundary indicators
    /usr/share/doc/packages/dealii/doxygen/deal.II/group__feaccess.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/group__feaccess.html 2024-01-30 03:04:48.788850889 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/group__feaccess.html 2024-01-30 03:04:48.792850923 +0000 @@ -226,7 +226,7 @@ update_quadrature_points&#href_anchor"fielddoc">

    Transformed quadrature points.

    Compute the quadrature points location in real cell coordinates.

    -

    FEValues objects take the quadrature point locations on the reference cell as an argument of the constructor (via the Quadrature object). For most finite elements, knowing the location of quadrature points on the reference cell is all that is necessary to evaluate shape functions, evaluate the mapping, and other things. On the other hand, if you want to evaluate a right hand side function $f(\mathbf x_q)$ at quadrature point locations $\mathbf x_q$ on the real cell, you need to pass this flag to the FEValues constructor to make sure you can later access them.

    +

    FEValues objects take the quadrature point locations on the reference cell as an argument of the constructor (via the Quadrature object). For most finite elements, knowing the location of quadrature points on the reference cell is all that is necessary to evaluate shape functions, evaluate the mapping, and other things. On the other hand, if you want to evaluate a right hand side function $f(\mathbf x_q)$ at quadrature point locations $\mathbf x_q$ on the real cell, you need to pass this flag to the FEValues constructor to make sure you can later access them.

    There are contexts other than FEValues (and related classes) that take update flags. An example is the DataPostprocessor class (and derived classes). In these cases, the update_quadrature_points flag is generally understood to update the location of "evaluation points", i.e., the physical locations of the points at which the solution is evaluated. As a consequence, the flag is misnamed in these contexts: No quadrature (i.e., computation of integrals) is involved, and consequently what is being updated is, in the context of DataPostprocessor, the member variable DataPostprocessorInputs::CommonInputs::evaluation_points.

    /usr/share/doc/packages/dealii/doxygen/deal.II/group__hpcollection.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/group__hpcollection.html 2024-01-30 03:04:48.804851023 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/group__hpcollection.html 2024-01-30 03:04:48.804851023 +0000 @@ -117,7 +117,7 @@ * fe_collection.push_back (FE_Q<dim>(degree)); *

    This way, one can add elements of polynomial degree 1 through 4 to the collection. It is not necessary to retain the added object: the collection makes a copy of it, it does not only store a pointer to the given finite element object. This same observation also holds for the other collection classes.

    It is customary that within an hp-finite element program, one keeps collections of finite elements and quadrature formulas with the same number of elements, each element of the one collection matching the element in the other. This is not necessary, but it often makes coding a lot simpler. If a collection of mappings is used, the same holds for hp::MappingCollection objects as well.

    -

    Whenever p-adaptivity is considered in an hp-finite element program, a hierarchy of finite elements needs to be established to determine succeeding finite elements for refinement and preceding ones for coarsening. Typically, this hierarchy considers how finite element spaces are nested: for example, a $Q_1$ element describes a sub-space of a $Q_2$ element, and so doing $p$ refinement usually means using a larger (more accurate) finite element space. In other words, the hierarchy of finite elements is built by considering whether some elements of the collection are sub- or super-spaces of others.

    +

    Whenever p-adaptivity is considered in an hp-finite element program, a hierarchy of finite elements needs to be established to determine succeeding finite elements for refinement and preceding ones for coarsening. Typically, this hierarchy considers how finite element spaces are nested: for example, a $Q_1$ element describes a sub-space of a $Q_2$ element, and so doing $p$ refinement usually means using a larger (more accurate) finite element space. In other words, the hierarchy of finite elements is built by considering whether some elements of the collection are sub- or super-spaces of others.

    By default, we assume that finite elements are stored in an ascending order based on their polynomial degree. If the order of elements differs, a corresponding hierarchy needs to be supplied to the collection via the hp::FECollection::set_hierarchy() member function.

    /usr/share/doc/packages/dealii/doxygen/deal.II/group__manifold.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/group__manifold.html 2024-01-30 03:04:48.836851289 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/group__manifold.html 2024-01-30 03:04:48.836851289 +0000 @@ -239,7 +239,7 @@

    So why does this matter? After all, the last two meshes describe the exact same domain and we know that upon mesh refinement we obtain the correct solution regardless of the choice of cells, as long as the diameter of the largest cell goes to zero.

    -

    There are two answers to this question. First, the numerical effort of solving a partial differential equation to a certain accuracy typically depends on the quality of cells since the constant $C$ in error estimates of the form $\|u-u_h\|_{H^1} \le Ch^p \|u\|_{H^{p+1}}$ depends on factors such as the maximal ratio of radii of the smallest circumscribed to largest inscribed circle over all cells (for triangles; or a suitable generalization for other types of cells). Thus, it is worthwhile creating meshes with cells that are as well-formed as possible. This is arguably not so much of an issue for the meshes shown above, but is sometimes an issue. Consider, for example, the following code and mesh:

    const Point<2> center (1,0);
    +

    There are two answers to this question. First, the numerical effort of solving a partial differential equation to a certain accuracy typically depends on the quality of cells since the constant $C$ in error estimates of the form $\|u-u_h\|_{H^1} \le Ch^p \|u\|_{H^{p+1}}$ depends on factors such as the maximal ratio of radii of the smallest circumscribed to largest inscribed circle over all cells (for triangles; or a suitable generalization for other types of cells). Thus, it is worthwhile creating meshes with cells that are as well-formed as possible. This is arguably not so much of an issue for the meshes shown above, but is sometimes an issue. Consider, for example, the following code and mesh:

    const Point<2> center (1,0);
    const SphericalManifold<2> manifold(center);
    const double inner_radius = 0.5,
    outer_radius = 1.0;
    @@ -290,22 +290,22 @@
    See also
    Glossary entry on manifold indicators

    Computing the weights for combining different manifold descriptions

    In a realistic application, it happens regularly that different manifold descriptions need to be combined. The simplest case is when a curved description is only available for the boundary but not for the interior of the computational domain. The manifold description for a ball also falls into this category, as it needs to combine a spherical manifold at the circular part with a straight-sided description in the center of the domain where the spherical manifold is not valid.

    -

    In general, the process of blending different manifold descriptions in deal.II is achieved by the so-called transfinite interpolation. Its formula in 2D is, for example, described on Wikipedia. Given a point $(u,v)$ on a chart, the image of this point in real space is given by

    -\begin{align*}
+<p>In general, the process of blending different manifold descriptions in deal.II is achieved by the so-called transfinite interpolation. Its formula in 2D is, for example, described on <a href=Wikipedia. Given a point $(u,v)$ on a chart, the image of this point in real space is given by

    +\begin{align*}
    \mathbf S(u,v) &= (1-v)\mathbf c_0(u)+v \mathbf c_1(u) + (1-u)\mathbf c_2(v) + u \mathbf c_3(v) \\
    &\quad - \left[(1-u)(1-v) \mathbf x_0 + u(1-v) \mathbf x_1 + (1-u)v \mathbf x_2 + uv \mathbf x_3 \right]
-   \end{align*} + \end{align*}" src="form_206.png"/>

    -

    where $\bf x_0, \bf x_1, \bf x_2, \bf x_3$ denote the four vertices bounding the image space and $\bf c_0, \bf c_1, \bf c_2, \bf c_3$ are the four curves describing the lines of the cell.

    -

    If we want to find the center of the cell according to the manifold (that is also used when the grid is refined), the chart is the unit cell $(0,1)^2$ and we want to evaluate this formula in the point $(u,v) = (0.5,
-   0.5)$. In that case, $\mathbf c_0(0.5)$ is the position of the midpoint of the lower face (indexed by 2 in deal.II's ordering) that is derived from its own manifold, $\mathbf c_1(0.5)$ is the position of the midpoint of the upper face (indexed by 3 in deal.II), $\mathbf c_2(0.5)$ is the midpoint of the face on the left (indexed by 0), and $\mathbf c_3(0.5)$ is the midpoint of the right face. In this formula, the weights equate to $\frac{\displaystyle 1}{\displaystyle 2}$ for the four midpoints in the faces and to $-\frac{\displaystyle 1}{\displaystyle 4}$ for the four vertices. These weights look weird at first sight because the vertices enter with negative weight but the mechanism does what we want: In case of a cell with curved description on two opposite faces but straight lines on the other two faces, the negative weights of $-\frac{\displaystyle
-   1}{\displaystyle 4}$ in the vertices balance with the center of the two straight lines in radial direction that get weight $\frac{\displaystyle
-   1}{\displaystyle 2}$. Thus, the average is taken over the two center points in curved direction, exactly placing the new point in the middle.

    -

    In three spatial dimensions, the weights are $+\frac{\displaystyle
-   1}{\displaystyle 2}$ for the face midpoints, $-\frac{\displaystyle
-   1}{\displaystyle 4}$ for the line mid points, and $\frac{\displaystyle
-   1}{\displaystyle 8}$ for the vertices, again balancing the different entities. In case all the surrounding of a cell is straight, the formula reduces to the obvious weight $\frac{\displaystyle 1}{\displaystyle 8}$ on each of the eight vertices.

    -

    In the MappingQGeneric class, a generalization of this concept to the support points of a polynomial representation of curved cells, the nodes of the Gauss-Lobatto quadrature, is implemented by evaluating the boundary curves in the respective Gauss-Lobatto points $(u_i,v_i)$ and combining them with the above formula. The weights have been verified to yield optimal convergence rates $\mathcal O(h^{k+1})$ also for very high polynomial degrees, say $k=10$.

    +

    where $\bf x_0, \bf x_1, \bf x_2, \bf x_3$ denote the four vertices bounding the image space and $\bf c_0, \bf c_1, \bf c_2, \bf c_3$ are the four curves describing the lines of the cell.

    +

    If we want to find the center of the cell according to the manifold (that is also used when the grid is refined), the chart is the unit cell $(0,1)^2$ and we want to evaluate this formula in the point $(u,v) = (0.5,
+   0.5)$. In that case, $\mathbf c_0(0.5)$ is the position of the midpoint of the lower face (indexed by 2 in deal.II's ordering) that is derived from its own manifold, $\mathbf c_1(0.5)$ is the position of the midpoint of the upper face (indexed by 3 in deal.II), $\mathbf c_2(0.5)$ is the midpoint of the face on the left (indexed by 0), and $\mathbf c_3(0.5)$ is the midpoint of the right face. In this formula, the weights equate to $\frac{\displaystyle 1}{\displaystyle 2}$ for the four midpoints in the faces and to $-\frac{\displaystyle 1}{\displaystyle 4}$ for the four vertices. These weights look weird at first sight because the vertices enter with negative weight but the mechanism does what we want: In case of a cell with curved description on two opposite faces but straight lines on the other two faces, the negative weights of $-\frac{\displaystyle
+   1}{\displaystyle 4}$ in the vertices balance with the center of the two straight lines in radial direction that get weight $\frac{\displaystyle
+   1}{\displaystyle 2}$. Thus, the average is taken over the two center points in curved direction, exactly placing the new point in the middle.

    +

    In three spatial dimensions, the weights are $+\frac{\displaystyle
+   1}{\displaystyle 2}$ for the face midpoints, $-\frac{\displaystyle
+   1}{\displaystyle 4}$ for the line mid points, and $\frac{\displaystyle
+   1}{\displaystyle 8}$ for the vertices, again balancing the different entities. In case all the surrounding of a cell is straight, the formula reduces to the obvious weight $\frac{\displaystyle 1}{\displaystyle 8}$ on each of the eight vertices.

    +

    In the MappingQGeneric class, a generalization of this concept to the support points of a polynomial representation of curved cells, the nodes of the Gauss-Lobatto quadrature, is implemented by evaluating the boundary curves in the respective Gauss-Lobatto points $(u_i,v_i)$ and combining them with the above formula. The weights have been verified to yield optimal convergence rates $\mathcal O(h^{k+1})$ also for very high polynomial degrees, say $k=10$.

    In the literature, other boundary descriptions are also used. Before version 9.0 deal.II used something called Laplace smoothing where the weights that are applied to the nodes on the circumference to get the position of the interior nodes are determined by solving a Laplace equation on the unit element. However, this led to boundary layers close to the curved description, i.e., singularities in the higher derivatives of the mapping from unit to real cell.

    If the transition from a curved boundary description to a straight description in the interior is done wrong, it is typically impossible to achieve high order convergence rates. For example, the Laplace smoothing inside a single cell leads to a singularity in the fourth derivative of the mapping from the reference to the real cell, limiting the convergence rate to 3 in the cells at the boundary (and 3.5 if global L2 errors were measured in 2D). Other more crude strategies, like completely ignoring the presence of two different manifolds and simply computing the additional points of a high-order mapping in a straight coordinate system, could lead to even worse convergence rates. The current implementation in deal.II, on the other hand, has been extensively verified in this respect and should behave optimally.

    A bad strategy for blending a curved boundary representation with flat interior representations obviously also reflects mesh quality. For example, the above case with only 3 circumferential cells leads to the following mesh with Laplace manifold smoothing rather than the interpolation from the boundary as is implemented in deal.II:

    /usr/share/doc/packages/dealii/doxygen/deal.II/group__mapping.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/group__mapping.html 2024-01-30 03:04:48.856851456 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/group__mapping.html 2024-01-30 03:04:48.856851456 +0000 @@ -166,7 +166,7 @@
    -

    A class that implements a polynomial mapping $Q_p$ of degree $p$ on all cells. This class is completely equivalent to the MappingQ class and there for backward compatibility.

    +

    A class that implements a polynomial mapping $Q_p$ of degree $p$ on all cells. This class is completely equivalent to the MappingQ class and there for backward compatibility.

    Definition at line 702 of file mapping_q.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/group__matrixfree.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/group__matrixfree.html 2024-01-30 03:04:48.880851656 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/group__matrixfree.html 2024-01-30 03:04:48.880851656 +0000 @@ -142,7 +142,7 @@
  • Matrix-free methods skip the storage of big global sparse matrices and compute the underlying weak forms on the fly. Since the memory transfer, i.e., the speed at which the data can be read from RAM memory, is the bottleneck for matrix-based computations rather than the actual arithmetic done using this data, a matrix-free evaluation that reads less data can be advantageous even if it does more computations. This concept is building upon a trend in computer architecture which is best described by the term memory wall, saying that compute performance has increased more rapidly than the memory performance. Thus, a certain degree of arithmetic operations is essentially for free, and this share has become larger during the last twenty years. It has enabled this radical algorithm switch going from a matrix-based to a matrix-free implementation of matrix-vector products for iterative solvers, besides their classical use in explicit time integration. Of course, the implementation must be efficient and there cannot be an excess in computations to make it a win in total. The deal.II library uses SIMD vectorization and highly optimized kernels based on templates of the polynomial degree to achieve this goal. To give a perspective, a sparse matrix-vector product for quadratic elements FE_Q used to be equally fast as the matrix-free implementation on processors designed around 2005-2007 (e.g. Pentium 4 or AMD Opteron Barcelona with 2-4 cores per chip). By 2018, the matrix-free evaluation is around eight times as fast (measured on Intel Skylake Server, 14 cores).
  • -Matrix-free methods have a better complexity per degree of freedom as the degree is increased, due to sum factorization. The work per degree of freedom increases as $\mathcal O(k)$ in the degree $k$ for matrix-free schemes, whereas it increases as $\mathcal O(k^d)$ for matrix-based methods. This gives higher order schemes an edge. A particularly nice feature in matrix-free evaluation is that the $\mathcal O(1)$ terms often dominate, so it appears that higher order methods are as fast in terms of evaluation time as low order ones, when they have the same number of degrees of freedom. For the implementation in deal.II, best throughput is typically achieved for polynomial degrees between three and six.
  • +Matrix-free methods have a better complexity per degree of freedom as the degree is increased, due to sum factorization. The work per degree of freedom increases as $\mathcal O(k)$ in the degree $k$ for matrix-free schemes, whereas it increases as $\mathcal O(k^d)$ for matrix-based methods. This gives higher order schemes an edge. A particularly nice feature in matrix-free evaluation is that the $\mathcal O(1)$ terms often dominate, so it appears that higher order methods are as fast in terms of evaluation time as low order ones, when they have the same number of degrees of freedom. For the implementation in deal.II, best throughput is typically achieved for polynomial degrees between three and six.

    To summarize, matrix-free computations are the way to go for higher order elements (where higher order means everything except linear shape functions) and use in explicit time stepping (step-48) or iterative solvers where also preconditioning can be done in a matrix-free way, as demonstrated in the step-37 and step-59 tutorial programs.

    The matrix-free evaluation infrastructure

    @@ -152,7 +152,7 @@

    The motivation for the FEEvaluationAccess classes is to allow for specializations of the value and gradient access of interpolated solution fields depending on the number of components. Whereas the base class FEEvaluationBase returns the gradient as a Tensor<1,n_components,Tensor<1,dim,VectorizedArray<Number>>>, with the outer tensor going over the components and the inner tensor going through the dim components of the gradient. For a scalar field, i.e., n_components=1, we can skip the outer tensor and simply use Tensor<1,dim,VectorizedArray<Number>> as the gradient type. Likewise, for a system with n_components=dim, the appropriate format for the gradient is Tensor<2,dim,VectorizedArray<Number>>.

    The FEFaceEvaluation class

    Face integrals, like for inhomogeneous Neumann conditions in continuous FEM or for the large class of discontinuous Galerkin schemes, require the evaluation of quantities on the quadrature point of a face, besides the cell integrals. The facilities for face evaluation are mostly shared with FEEvaluation, in the sense that FEFaceEvaluation also inherits from FEEvaluationAccess. All data fields regarding the degrees of freedom and shape functions can be reused, the latter because all information consists of 1D shape data anyway. With respect to the mapping data, however, a specialization is used because the data is of structdim=dim-1. As a consequence, the FEEvaluationAccess and FEEvaluationBase are given a template argument is_face to hold pointers to the cell and face mapping information, respectively. Besides access to the function values with FEEvaluationAccess::get_value() or gradients with FEEvaluationAccess::get_gradient(), the face evaluator also enables the access to the normal vector by FEEvaluationAccess::normal_vector() and a specialized field FEEvaluationAccess::get_normal_derivative(), which returns the derivative of the solution field normal to the face. This quantity is computed as the gradient (in real space) multiplied by the normal vector. The combination of the gradient and normal vector is typical of many (simple) second-order elliptic equations, such as the discretization of the Laplacian with the interior penalty method. If the gradient alone is not needed, the combined operation significantly reduces the data access, because only dim data entries for normal * Jacobian per quadrature point are necessary, as opposed to dim^2 fields for the Jacobian and dim fields for the normal when accessing them individually.

    -

    An important optimization for the computation of face integrals is to think about the amount of vector data that must be accessed to evaluate the integrals on a face. Think for example of the case of FE_DGQ, i.e., Lagrange polynomials that have some of their nodes on the element boundary. For evaluation of the function values, only $(k+1)^{d-1}$ degrees of freedom contribute via a non-zero basis function, whereas the rest of the $(k+1)^d$ basis functions evaluate to zero on that boundary. Since vector access is one of the bottlenecks in matrix-free computations, the access to the vector should be restricted to the interesting entries. To enable this setup, the method FEFaceEvaluation::gather_evaluate() (and FEFaceEvaluation::integrate_scatter() for the integration equivalent) combines the vector access with the interpolation to the quadrature points. There exist two specializations, including the aforementioned "non-zero" value case, which is stored as the field internal::MatrixFreeFunctions::ShapeInfo::nodal_at_cell_boundaries. A similar property is also possible for the case where only the value and the first derivative of a selected number of basis functions evaluate to nonzero on a face. The associated element type is FE_DGQHermite and the decision is stored on the property internal::MatrixFreeFunctions::tensor_symmetric_hermite. The decision on whether such an optimized kernel can be used is made automatically inside FEFaceEvaluation::gather_evaluate() and FEFaceEvaluation::integrate_scatter(). It might seem inefficient to do this decision for every integration task, but in the end this is a single if statement (conditional jump) that is easily predictable for a modern CPU as the decision is always the same inside an integration loop. (One only pays by somewhat increased compile times because the compiler needs to generate code for all paths, though).

    +

    An important optimization for the computation of face integrals is to think about the amount of vector data that must be accessed to evaluate the integrals on a face. Think for example of the case of FE_DGQ, i.e., Lagrange polynomials that have some of their nodes on the element boundary. For evaluation of the function values, only $(k+1)^{d-1}$ degrees of freedom contribute via a non-zero basis function, whereas the rest of the $(k+1)^d$ basis functions evaluate to zero on that boundary. Since vector access is one of the bottlenecks in matrix-free computations, the access to the vector should be restricted to the interesting entries. To enable this setup, the method FEFaceEvaluation::gather_evaluate() (and FEFaceEvaluation::integrate_scatter() for the integration equivalent) combines the vector access with the interpolation to the quadrature points. There exist two specializations, including the aforementioned "non-zero" value case, which is stored as the field internal::MatrixFreeFunctions::ShapeInfo::nodal_at_cell_boundaries. A similar property is also possible for the case where only the value and the first derivative of a selected number of basis functions evaluate to nonzero on a face. The associated element type is FE_DGQHermite and the decision is stored on the property internal::MatrixFreeFunctions::tensor_symmetric_hermite. The decision on whether such an optimized kernel can be used is made automatically inside FEFaceEvaluation::gather_evaluate() and FEFaceEvaluation::integrate_scatter(). It might seem inefficient to do this decision for every integration task, but in the end this is a single if statement (conditional jump) that is easily predictable for a modern CPU as the decision is always the same inside an integration loop. (One only pays by somewhat increased compile times because the compiler needs to generate code for all paths, though).

    The data storage through the MatrixFree class

    The tasks performed by FEEvaluation and FEFaceEvaluation can be split into the three categories: index access into vectors, evaluation and integration on the unit cell, and operation on quadrature points including the geometry evaluation. This split is reflected by the major data fields contained by MatrixFree, using internal::MatrixFreeFunctions::DoFInfo, internal::MatrixFreeFunctions::ShapeInfo, and internal::MatrixFreeFunctions::MappingInfo for each these three categories, respectively. Their design principles and internal layout is described in the following subsections.

    The main interface all these data structure adhere to is that integration tasks are broken down into a range of cells or faces that one can index into by a single integer index. The information about an integer range for the cell integrals, inner face integrals, and boundary integrals is provided by the class internal::MatrixFreeFunctions::TaskInfo, using the data fields cell_partition_data, face_partition_data, and boundary_partition_data. This class also contains information about subranges of indices for scheduling tasks in parallel using threads, and a grouping of the index range within {cell,face,boundary}_partition_data for interleaving cell and face integrals such that the access to vector entries for cell and face integrals re-uses data already in caches.

    /usr/share/doc/packages/dealii/doxygen/deal.II/group__reordering.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/group__reordering.html 2024-01-30 03:04:48.900851823 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/group__reordering.html 2024-01-30 03:04:48.900851823 +0000 @@ -169,7 +169,7 @@

    From the examples above, it is obvious that if we encounter a cell that cannot be added to the cells which have already been entered, we can not usually point to a cell that is the culprit and that must be entered in a different orientation. Furthermore, even if we knew which cell, there might be large number of cells that would then cease to fit into the grid and which we would have to find a different orientation as well (in the second example above, if we rotated cell 1, then we would have to rotate the cells 1 through N-1 as well).

    A brute force approach to this problem is the following: if cell N can't be added, then try to rotate cell N-1. If we can't rotate cell N-1 any more, then try to rotate cell N-2 and try to add cell N with all orientations of cell N-1. And so on. Algorithmically, we can visualize this by a tree structure, where node N has as many children as there are possible orientations of node N+1 (in two space dimensions, there are four orientations in which each cell can be constructed from its four vertices; for example, if the vertex indices are (0 1 3 2), then the four possibilities would be (0 1 3 2), (1 3 2 0), (3 2 0 1), and (2 0 1 3)). When adding one cell after the other, we traverse this tree in a depth-first (pre-order) fashion. When we encounter that one path from the root (cell 0) to a leaf (the last cell) is not allowed (i.e. that the orientations of the cells which are encoded in the path through the tree does not lead to a valid triangulation), we have to track back and try another path through the tree.

    In practice, of course, we do not follow each path to a final node and then find out whether a path leads to a valid triangulation, but rather use an inductive argument: if for all previously added cells the triangulation is a valid one, then we can find out whether a path through the tree can yield a valid triangulation by checking whether entering the present cell would introduce any faces that have a nonunique direction; if that is so, then we can stop following all paths below this point and track back immediately.

    -

    Nevertheless, it is already obvious that the tree has $4^N$ leaves in two space dimensions, since each of the $N$ cells can be added in four orientations. Most of these nodes can be discarded rapidly, since firstly the orientation of the first cell is irrelevant, and secondly if we add one cell that has a neighbor that has already been added, then there are already only two possible orientations left, so the total number of checks we have to make until we find a valid way is significantly smaller than $4^N$. However, the algorithm is still exponential in time and linear in memory (we only have to store the information for the present path in form of a stack of orientations of cells that have already been added).

    +

    Nevertheless, it is already obvious that the tree has $4^N$ leaves in two space dimensions, since each of the $N$ cells can be added in four orientations. Most of these nodes can be discarded rapidly, since firstly the orientation of the first cell is irrelevant, and secondly if we add one cell that has a neighbor that has already been added, then there are already only two possible orientations left, so the total number of checks we have to make until we find a valid way is significantly smaller than $4^N$. However, the algorithm is still exponential in time and linear in memory (we only have to store the information for the present path in form of a stack of orientations of cells that have already been added).

    In fact, the two examples above show that the exponential estimate is not a pessimistic one: we indeed have to track back to one of the very first cells there to find a way to add all cells in a consistent fashion.

    This discouraging situation is greatly improved by the fact that we have an alternative algorithm for 2d that is always linear in runtime (discovered and implemented by Michael Anderson of TICAM, University of Texas, in 2003), and that for 3d we can find an algorithm that in practice is usually only roughly linear in time and memory. We will describe these algorithms in the following. A full description and theoretical analysis is given in [AABB17] .

    The 2d linear complexity algorithm

    /usr/share/doc/packages/dealii/doxygen/deal.II/group__threads.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/group__threads.html 2024-01-30 03:04:48.940852156 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/group__threads.html 2024-01-30 03:04:48.940852156 +0000 @@ -312,7 +312,7 @@
    second
    Point< 2 > second
    Definition grid_out.cc:4616
    first
    Point< 2 > first
    Definition grid_out.cc:4615
    parallel::transform
    void transform(const InputIterator &begin_in, const InputIterator &end_in, OutputIterator out, const Function &function, const unsigned int grainsize)
    Definition parallel.h:148
    -

    In this example, we used a lambda expression to construct, on the fly, a function object that takes two arguments and returns the sum of the two. This is exactly what we needed when we want to add the individual elements of vectors $x$ and $y$ and write the sum of the two into the elements of $z$. The function object that we get here is completely known to the compiler and when it expands the loop that results from parallel::transform will be as if we had written the loop in its obvious form:

    InputIterator1 in_1 = x.begin();
    +

    In this example, we used a lambda expression to construct, on the fly, a function object that takes two arguments and returns the sum of the two. This is exactly what we needed when we want to add the individual elements of vectors $x$ and $y$ and write the sum of the two into the elements of $z$. The function object that we get here is completely known to the compiler and when it expands the loop that results from parallel::transform will be as if we had written the loop in its obvious form:

    InputIterator1 in_1 = x.begin();
    InputIterator2 in_2 = y.begin();
    OutputIterator out = z.begin();
    @@ -405,7 +405,7 @@
    }
    void apply_to_subranges(const Iterator &begin, const std_cxx20::type_identity_t< Iterator > &end, const Function &f, const unsigned int grainsize)
    Definition parallel.h:435

    Here, we call the vmult_on_subrange function on sub-ranges of at least 200 elements each, so that the initial setup cost can amortize.

    -

    A related operation is when the loops over elements each produce a result that must then be accumulated (other reduction operations than addition of numbers would work as well). An example is to form the matrix norm $x^T M x$ (it really is only a norm if $M$ is positive definite, but let's assume for a moment that it is). A sequential implementation would look like this for sparse matrices:

    double SparseMatrix::mat_norm (const Vector &x) const
    +

    A related operation is when the loops over elements each produce a result that must then be accumulated (other reduction operations than addition of numbers would work as well). An example is to form the matrix norm $x^T M x$ (it really is only a norm if $M$ is positive definite, but let's assume for a moment that it is). A sequential implementation would look like this for sparse matrices:

    double SparseMatrix::mat_norm (const Vector &x) const
    {
    const double *val_ptr = &values[0];
    const unsigned int *colnum_ptr = &colnums[0];
    @@ -607,7 +607,7 @@

  • -

    The last issue that is worth addressing is that the way we wrote the MyClass::assemble_on_one_cell function above, we create and destroy an FEValues object every time the function is called, i.e. once for each cell in the triangulation. That's an immensely expensive operation because the FEValues class tries to do a lot of work in its constructor in an attempt to reduce the number of operations we have to do on each cell (i.e. it increases the constant in the ${\cal O}(1)$ effort to initialize such an object in order to reduce the constant in the ${\cal O}(N)$ operations to call FEValues::reinit on the $N$ cells of a triangulation). Creating and destroying an FEValues object on each cell invalidates this effort.

    +

    The last issue that is worth addressing is that the way we wrote the MyClass::assemble_on_one_cell function above, we create and destroy an FEValues object every time the function is called, i.e. once for each cell in the triangulation. That's an immensely expensive operation because the FEValues class tries to do a lot of work in its constructor in an attempt to reduce the number of operations we have to do on each cell (i.e. it increases the constant in the ${\cal O}(1)$ effort to initialize such an object in order to reduce the constant in the ${\cal O}(N)$ operations to call FEValues::reinit on the $N$ cells of a triangulation). Creating and destroying an FEValues object on each cell invalidates this effort.

    The way to avoid this is to put the FEValues object into a second structure that will hold scratch data, and initialize it in the constructor:

    struct PerTaskData {
    FullMatrix<double> cell_matrix;
    Vector<double> cell_rhs;
    /usr/share/doc/packages/dealii/doxygen/deal.II/group__vector__valued.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/group__vector__valued.html 2024-01-30 03:04:48.972852423 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/group__vector__valued.html 2024-01-30 03:04:48.972852423 +0000 @@ -281,8 +281,8 @@ \right) \end{eqnarray*}" src="form_302.png"/>

    -

    indeed has four components. We note that we could change the ordering of the solution components $\textbf u$ and $p$ inside $U$ if we also change columns of the matrix operator.

    -

    Next, we need to think about test functions $V$. We want to multiply both sides of the equation with them, then integrate over $\Omega$. The result should be a scalar equality. We can achieve this by choosing $V$ also vector valued as

    +

    indeed has four components. We note that we could change the ordering of the solution components $\textbf u$ and $p$ inside $U$ if we also change columns of the matrix operator.

    +

    Next, we need to think about test functions $V$. We want to multiply both sides of the equation with them, then integrate over $\Omega$. The result should be a scalar equality. We can achieve this by choosing $V$ also vector valued as

    \begin{eqnarray*}
   V =
   \left(
@@ -450,7 +450,7 @@
 <p class=

  • -

    These views can then be asked for information about these individual components. For example, when you write fe_values[pressure].value(i,q) you get the value of the pressure component of the $i$th shape function $V_i$ at the $q$th quadrature point. Because the extractor pressure represents a scalar component, the results of the operator fe_values[pressure].value(i,q) is a scalar number. On the other hand, the call fe_values[velocities].value(i,q) would produce the value of a whole set of dim components, which would be of type Tensor<1,dim>.

    +

    These views can then be asked for information about these individual components. For example, when you write fe_values[pressure].value(i,q) you get the value of the pressure component of the $i$th shape function $V_i$ at the $q$th quadrature point. Because the extractor pressure represents a scalar component, the results of the operator fe_values[pressure].value(i,q) is a scalar number. On the other hand, the call fe_values[velocities].value(i,q) would produce the value of a whole set of dim components, which would be of type Tensor<1,dim>.

  • @@ -594,10 +594,10 @@
    }
  • So if, again, this is not the code we use in step-8, what do we do there? The answer rests on the finite element we use. In step-8, we use the following element:

    FESystem<dim> finite_element (FE_Q<dim>(1), dim);
    -

    In other words, the finite element we use consists of dim copies of the same scalar element. This is what we call a primitive element: an element that may be vector-valued but where each shape function has exactly one non-zero component. In other words: if the $x$-component of a displacement shape function is nonzero, then the $y$- and $z$-components must be zero and similarly for the other components. What this means is that also derived quantities based on shape functions inherit this sparsity property. For example: the divergence $\mathrm{div}\ \Phi(x,y,z)=\partial_x\varphi_x(x,y,z) +
+</div><!-- fragment --><p> In other words, the finite element we use consists of <code>dim</code> copies of the same scalar element. This is what we call a <a class=primitive element: an element that may be vector-valued but where each shape function has exactly one non-zero component. In other words: if the $x$-component of a displacement shape function is nonzero, then the $y$- and $z$-components must be zero and similarly for the other components. What this means is that also derived quantities based on shape functions inherit this sparsity property. For example: the divergence $\mathrm{div}\ \Phi(x,y,z)=\partial_x\varphi_x(x,y,z) +
    \partial_y\varphi_y(x,y,z) + \partial_z\varphi_z(x,y,z)$ of a vector-valued shape function $\Phi(x,y,z)=(\varphi_x(x,y,z), \varphi_y(x,y,z), \varphi_z(x,y,z))^T$ is, in the present case, either $\mathrm{div}\ \Phi(x,y,z)=\partial_x\varphi_x(x,y,z)$, $\mathrm{div}\ \Phi(x,y,z)=\partial_y\varphi_y(x,y,z)$, or $\mathrm{div}\ \Phi(x,y,z)=\partial_z\varphi_z(x,y,z)$, because exactly one of the $\varphi_\ast$ is nonzero. Knowing this means that we can save a number of computations that, if we were to do them, would only yield zeros to add up.

    In a similar vein, if only one component of a shape function is nonzero, then only one row of its gradient $\nabla\Phi$ is nonzero. What this means for terms like $(\mu \nabla\Phi_i,\nabla\Phi_j)$, where the scalar product between two tensors is defined as $(\tau, \gamma)_\Omega=\int_\Omega \sum_{i,j=1}^d \tau_{ij} \gamma_{ij}$, is that the term is only nonzero if both tensors have their nonzero entries in the same row, which means that the two shape functions have to have their single nonzero component in the same location.

    -

    If we use this sort of knowledge, then we can in a first step avoid computing gradient tensors if we can determine up front that their scalar product will be nonzero, in a second step avoid building the entire tensors and only get its nonzero components, and in a final step simplify the scalar product by only considering that index $i$ for the one nonzero row, rather than multiplying and adding up zeros.

    +

    If we use this sort of knowledge, then we can in a first step avoid computing gradient tensors if we can determine up front that their scalar product will be nonzero, in a second step avoid building the entire tensors and only get its nonzero components, and in a final step simplify the scalar product by only considering that index $i$ for the one nonzero row, rather than multiplying and adding up zeros.

    The vehicle for all this is the ability to determine which vector component is going to be nonzero. This information is provided by the FiniteElement::system_to_component_index function. What can be done with it, using the example above, is explained in detail in step-8.

    Block solvers

    Using techniques as shown above, it isn't particularly complicated to assemble the linear system, i.e. matrix and right hand side, for a vector-valued problem. However, then it also has to be solved. This is more complicated. Naively, one could just consider the matrix as a whole. For most problems, this matrix is not going to be definite (except for special cases like the elasticity equations covered in step-8 and step-17). It will, often, also not be symmetric. This rather general class of matrices presents problems for iterative solvers: the lack of structural properties prevents the use of most efficient methods and preconditioners. While it can be done, the solution process will therefore most often be slower than necessary.

    @@ -615,7 +615,7 @@ \right), \end{eqnarray*}" src="form_337.png"/>

    -

    where $M$ represents the mass matrix that results from discretizing the identity operator $\mathbf 1$ and $B$ the equivalent of the gradient operator.

    +

    where $M$ represents the mass matrix that results from discretizing the identity operator $\mathbf 1$ and $B$ the equivalent of the gradient operator.

    By default, this is not what happens, however. Rather, deal.II assigns numbers to degrees of freedom in a rather random manner. Consequently, if you form a vector out of the values of degrees of freedom will not be neatly ordered in a vector like

    \begin{eqnarray*}
   \left(
@@ -655,8 +655,8 @@
   MU = F-BP.
 \end{eqnarray*}

    -

    This has the advantage that the matrices $B^TM^{-1}B$ and $M$ that we have to solve with are both symmetric and positive definite, as opposed to the large whole matrix we had before.

    -

    How a solver like this is implemented is explained in more detail in step-20, step-31, and a few other tutorial programs. What we would like to point out here is that we now need a way to extract certain parts of a matrix or vector: if we are to multiply, say, the $U$ part of the solution vector by the $M$ part of the global matrix, then we need to have a way to access these parts of the whole.

    +

    This has the advantage that the matrices $B^TM^{-1}B$ and $M$ that we have to solve with are both symmetric and positive definite, as opposed to the large whole matrix we had before.

    +

    How a solver like this is implemented is explained in more detail in step-20, step-31, and a few other tutorial programs. What we would like to point out here is that we now need a way to extract certain parts of a matrix or vector: if we are to multiply, say, the $U$ part of the solution vector by the $M$ part of the global matrix, then we need to have a way to access these parts of the whole.

    This is where the BlockVector, BlockSparseMatrix, and similar classes come in. For all practical purposes, then can be used as regular vectors or sparse matrices, i.e. they offer element access, provide the usual vector operations and implement, for example, matrix-vector multiplications. In other words, assembling matrices and right hand sides works in exactly the same way as for the non-block versions. That said, internally they store the elements of vectors and matrices in "blocks"; for example, instead of using one large array, the BlockVector class stores it as a set of arrays each of which we call a block. The advantage is that, while the whole thing can be used as a vector, one can also access an individual block which then, again, is a vector with all the vector operations.

    To show how to do this, let us consider the second equation $MU=F-BP$ to be solved above. This can be achieved using the following sequence similar to what we have in step-20:

    Vector<double> tmp (solution.block(0).size());
    system_matrix.block(0,1).vmult (tmp, solution.block(1));
    @@ -676,7 +676,7 @@ -

    What's happening here is that we allocate a temporary vector with as many elements as the first block of the solution vector, i.e. the velocity component $U$, has. We then set this temporary vector equal to the $(0,1)$ block of the matrix, i.e. $B$, times component 1 of the solution which is the previously computed pressure $P$. The result is multiplied by $-1$, and component 0 of the right hand side, $F$ is added to it. The temporary vector now contains $F-BP$. The rest of the code snippet simply solves a linear system with $F-BP$ as right hand side and the $(0,0)$ block of the global matrix, i.e. $M$. Using block vectors and matrices in this way therefore allows us to quite easily write rather complicated solvers making use of the block structure of a linear system.

    +

    What's happening here is that we allocate a temporary vector with as many elements as the first block of the solution vector, i.e. the velocity component $U$, has. We then set this temporary vector equal to the $(0,1)$ block of the matrix, i.e. $B$, times component 1 of the solution which is the previously computed pressure $P$. The result is multiplied by $-1$, and component 0 of the right hand side, $F$ is added to it. The temporary vector now contains $F-BP$. The rest of the code snippet simply solves a linear system with $F-BP$ as right hand side and the $(0,0)$ block of the global matrix, i.e. $M$. Using block vectors and matrices in this way therefore allows us to quite easily write rather complicated solvers making use of the block structure of a linear system.

    Extracting data from solutions

    Once one has computed a solution, it is often necessary to evaluate it at quadrature points, for example to evaluate nonlinear residuals for the next Newton iteration, to evaluate the finite element residual for error estimators, or to compute the right hand side for the next time step in a time dependent problem.

    The way this is done us to again use an FEValues object to evaluate the shape functions at quadrature points, and with those also the values of a finite element function. For the example of the mixed Laplace problem above, consider the following code after solving:

    std::vector<Vector<double> > local_solution_values (n_q_points,
    /usr/share/doc/packages/dealii/doxygen/deal.II/index.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/index.html 2024-01-30 03:04:48.992852589 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/index.html 2024-01-30 03:04:48.992852589 +0000 @@ -119,7 +119,7 @@
  • DoFHandler: DoFHandler objects are the confluence of triangulations and finite elements: the finite element class describes how many degrees of freedom it needs per vertex, line, or cell, and the DoFHandler class allocates this space so that each vertex, line, or cell of the triangulation has the correct number of them. It also gives them a global numbering.

    -

    A different viewpoint is this: While the mesh and finite element describe abstract properties of the finite dimensional space $V_h$ in which we seek the discrete solution, the DoFHandler classes enumerate a concrete basis of this space so that we can represent the discrete solution as $u_h(\mathbf x)= \sum_j U_j \varphi_i(\mathbf x)$ by an ordered set of coefficients $U_j$.

    +

    A different viewpoint is this: While the mesh and finite element describe abstract properties of the finite dimensional space $V_h$ in which we seek the discrete solution, the DoFHandler classes enumerate a concrete basis of this space so that we can represent the discrete solution as $u_h(\mathbf x)= \sum_j U_j \varphi_i(\mathbf x)$ by an ordered set of coefficients $U_j$.

    Just as with triangulation objects, most operations on DoFHandlers are done by looping over all cells and doing something on each or a subset of them. The interfaces of the two classes are therefore rather similar: they allow to get iterators to the first and last cell (or face, or line, etc) and offer information through these iterators. The information that can be gotten from these iterators is the geometric and topological information that can already be gotten from the triangulation iterators (they are in fact derived classes) as well as things like the global numbers of the degrees of freedom on the present cell. On can also ask an iterator to extract the values corresponding to the degrees of freedom on the present cell from a data vector that stores values for all degrees of freedom associated with a triangulation.

    It is worth noting that, just as triangulations, DoFHandler classes do not know anything about the mapping from the unit cell to its individual cells. It is also ignorant of the shape functions that correspond to the degrees of freedom it manages: all it knows is that there are, for example, 2 degrees of freedom for each vertex and 4 per cell interior. Nothing about their specifics is relevant to the DoFHandler class with the exception of the fact that they exist.

    The DoFHandler class and its associates are described in the Degrees of Freedom module. In addition, there are specialized versions that can handle multilevel and hp-discretizations. These are described in the Multilevel support and hp-finite element support modules. Finite element methods frequently imply constraints on degrees of freedom, such as for hanging nodes or nodes at which boundary conditions apply; dealing with such constraints is described in the Constraints on degrees of freedom module.

    /usr/share/doc/packages/dealii/doxygen/deal.II/index__set_8h.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/index__set_8h.html 2024-01-30 03:04:49.008852722 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/index__set_8h.html 2024-01-30 03:04:49.008852722 +0000 @@ -146,7 +146,7 @@
  • -

    Create and return an index set of size $N$ that contains every single index within this range. In essence, this function returns an index set created by

    IndexSet is (N);
    +

    Create and return an index set of size $N$ that contains every single index within this range. In essence, this function returns an index set created by

    IndexSet is (N);
    is.add_range(0, N);

    This function exists so that one can create and initialize index sets that are complete in one step, or so one can write code like

    if (my_index_set == complete_index_set(my_index_set.size())
    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceAdaptationStrategies_1_1Coarsening.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceAdaptationStrategies_1_1Coarsening.html 2024-01-30 03:04:49.024852855 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceAdaptationStrategies_1_1Coarsening.html 2024-01-30 03:04:49.024852855 +0000 @@ -132,11 +132,11 @@ const std::vector< value_type > & children_values&#href_anchor"memdoc">

    Check if data on all children match, and return value of the first child.

    -\[
+<picture><source srcset=\[
   d_{K_p} = d_{K_c}
   \qquad
   \forall K_c \text{ children of } K_p
-\] +\]" src="form_2146.png"/>

    @@ -160,13 +160,13 @@ const std::vector< value_type > & children_values&#href_anchor"memdoc">

    Return sum of data on all children.

    -\[
+<picture><source srcset=\[
   d_{K_p} = \sum d_{K_c}
   \qquad
   \forall K_c \text{ children of } K_p
-\] +\]" src="form_2147.png"/>

    -

    This strategy preserves the $l_1$-norm of the corresponding global data vector before and after adaptation.

    +

    This strategy preserves the $l_1$-norm of the corresponding global data vector before and after adaptation.

    @@ -187,15 +187,15 @@ const std::vector< value_type > & children_values&#href_anchor"memdoc"> -

    Return $ l_2 $-norm of data on all children.

    +

    Return $ l_2 $-norm of data on all children.

    -\[
+<picture><source srcset=\[
   d_{K_p}^2 = \sum d_{K_c}^2
   \qquad
   \forall K_c \text{ children of } K_p
-\] +\]" src="form_2149.png"/>

    -

    This strategy preserves the $l_2$-norm of the corresponding global data vector before and after adaptation.

    +

    This strategy preserves the $l_2$-norm of the corresponding global data vector before and after adaptation.

    @@ -218,11 +218,11 @@ const std::vector< value_type > & children_values&#href_anchor"memdoc">

    Return mean value of data on all children.

    -\[
+<picture><source srcset=\[
   d_{K_p} = \sum d_{K_c} / n_\text{children}
   \qquad
   \forall K_c \text{ children of } K_p
-\] +\]" src="form_2150.png"/>

    @@ -246,11 +246,11 @@ const std::vector< value_type > & children_values&#href_anchor"memdoc">

    Return maximum value of data on all children.

    -\[
+<picture><source srcset=\[
   d_{K_p} = \max \left( d_{K_c} \right)
   \qquad
   \forall K_c \text{ children of } K_p
-\] +\]" src="form_2151.png"/>

    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceAdaptationStrategies_1_1Refinement.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceAdaptationStrategies_1_1Refinement.html 2024-01-30 03:04:49.040852989 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceAdaptationStrategies_1_1Refinement.html 2024-01-30 03:04:49.040852989 +0000 @@ -128,11 +128,11 @@ const value_type parent_value&#href_anchor"memdoc">

    Return a vector containing copies of data of the parent cell for each child.

    -\[
+<picture><source srcset=\[
   d_{K_c} = d_{K_p}
   \qquad
   \forall K_c \text{ children of } K_p
-\] +\]" src="form_2143.png"/>

    @@ -156,13 +156,13 @@ const value_type parent_value&#href_anchor"memdoc">

    Return a vector which contains data of the parent cell being equally divided among all children.

    -\[
+<picture><source srcset=\[
   d_{K_c} = d_{K_p} / n_\text{children}
   \qquad
   \forall K_c \text{ children of } K_p
-\] +\]" src="form_2144.png"/>

    -

    This strategy preserves the $l_1$-norm of the corresponding global data Vector before and after adaptation.

    +

    This strategy preserves the $l_1$-norm of the corresponding global data Vector before and after adaptation.

    @@ -185,13 +185,13 @@ const value_type parent_value&#href_anchor"memdoc">

    Return a vector which contains squared data of the parent cell being equally divided among the squares of all children.

    -\[
+<picture><source srcset=\[
   d_{K_c}^2 = d_{K_p}^2 / n_\text{children}
   \qquad
   \forall K_c \text{ children of } K_p
-\] +\]" src="form_2145.png"/>

    -

    This strategy preserves the $l_2$-norm of the corresponding global data Vector before and after adaptation.

    +

    This strategy preserves the $l_2$-norm of the corresponding global data Vector before and after adaptation.

    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDataComponentInterpretation.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDataComponentInterpretation.html 2024-01-30 03:04:49.052853089 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDataComponentInterpretation.html 2024-01-30 03:04:49.052853089 +0000 @@ -115,7 +115,7 @@
    -

    The members of this enum are used to describe the logical interpretation of what the various components of a vector-valued data set mean. For example, if one has a finite element for the Stokes equations in 2d, representing components $(u,v,p)$, one would like to indicate that the first two, $u$ and $v$, represent a logical vector so that later on when we generate graphical output we can hand them off to a visualization program that will automatically know to render them as a vector field, rather than as two separate and independent scalar fields.

    +

    The members of this enum are used to describe the logical interpretation of what the various components of a vector-valued data set mean. For example, if one has a finite element for the Stokes equations in 2d, representing components $(u,v,p)$, one would like to indicate that the first two, $u$ and $v$, represent a logical vector so that later on when we generate graphical output we can hand them off to a visualization program that will automatically know to render them as a vector field, rather than as two separate and independent scalar fields.

    By passing a set of enums of the current kind to the DataOut_DoFData::add_data_vector functions, this can be achieved.

    See the step-22 tutorial program for an example on how this information can be used in practice.

    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDataOutBase.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDataOutBase.html 2024-01-30 03:04:49.104853522 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDataOutBase.html 2024-01-30 03:04:49.104853522 +0000 @@ -540,7 +540,7 @@

    While this discussion applies to two spatial dimensions, it is more complicated in 3d. The reason is that we could still use patches, but it is difficult when trying to visualize them, since if we use a cut through the data (by, for example, using x- and z-coordinates, a fixed y-value and plot function values in z-direction, then the patched data is not a patch in the sense GNUPLOT wants it any more. Therefore, we use another approach, namely writing the data on the 3d grid as a sequence of lines, i.e. two points each associated with one or more data sets. There are therefore 12 lines for each subcells of a patch.

    Given the lines as described above, a cut through this data in Gnuplot can then be achieved like this:

    *   set data style lines
     *   splot [:][:][0:] "T" using 1:2:(\$3==.5 ? \$4 : -1)
    -* 

    This command plots data in $x$- and $y$-direction unbounded, but in $z$-direction only those data points which are above the $x$- $y$-plane (we assume here a positive solution, if it has negative values, you might want to decrease the lower bound). Furthermore, it only takes the data points with z-values (&3) equal to 0.5, i.e. a cut through the domain at z=0.5. For the data points on this plane, the data values of the first data set (&4) are raised in z-direction above the x-y-plane; all other points are denoted the value -1 instead of the value of the data vector and are not plotted due to the lower bound in z plotting direction, given in the third pair of brackets.

    +*

    This command plots data in $x$- and $y$-direction unbounded, but in $z$-direction only those data points which are above the $x$- $y$-plane (we assume here a positive solution, if it has negative values, you might want to decrease the lower bound). Furthermore, it only takes the data points with z-values (&3) equal to 0.5, i.e. a cut through the domain at z=0.5. For the data points on this plane, the data values of the first data set (&4) are raised in z-direction above the x-y-plane; all other points are denoted the value -1 instead of the value of the data vector and are not plotted due to the lower bound in z plotting direction, given in the third pair of brackets.

    More complex cuts are possible, including nonlinear ones. Note however, that only those points which are actually on the cut-surface are plotted.

    Definition at line 3557 of file data_out_base.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDerivativeApproximation.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDerivativeApproximation.html 2024-01-30 03:04:49.124853689 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDerivativeApproximation.html 2024-01-30 03:04:49.128853722 +0000 @@ -120,17 +120,17 @@

    Detailed Description

    This namespace provides functions that compute a cell-wise approximation of the norm of a derivative of a finite element field by taking difference quotients between neighboring cells. This is a rather simple but efficient form to get an error indicator, since it can be computed with relatively little numerical effort and yet gives a reasonable approximation.

    -

    The way the difference quotients are computed on cell $K$ is the following (here described for the approximation of the gradient of a finite element field, but see below for higher derivatives): let $K'$ be a neighboring cell, and let $y_{K'}=x_{K'}-x_K$ be the distance vector between the centers of the two cells, then $ \frac{u_h(x_{K'}) - u_h(x_K)}{ \|y_{K'}\|
-}$ is an approximation of the directional derivative $ \nabla u(x_K) \cdot
-\frac{y_{K'}}{ \|y_{K'}\| }.$ By multiplying both terms by $\frac{y_{K'}}{
-\|y_{K'}\| }$ from the left and summing over all neighbors $K'$, we obtain $ \sum_{K'} \left( \frac{y_{K'}}{ \|y_{K'}\|} \frac{y_{K'}^T}{ \|y_{K'}\| }
+<p>The way the difference quotients are computed on cell <picture><source srcset=$K$ is the following (here described for the approximation of the gradient of a finite element field, but see below for higher derivatives): let $K'$ be a neighboring cell, and let $y_{K'}=x_{K'}-x_K$ be the distance vector between the centers of the two cells, then $ \frac{u_h(x_{K'}) - u_h(x_K)}{ \|y_{K'}\|
+}$ is an approximation of the directional derivative $ \nabla u(x_K) \cdot
+\frac{y_{K'}}{ \|y_{K'}\| }.$ By multiplying both terms by $\frac{y_{K'}}{
+\|y_{K'}\| }$ from the left and summing over all neighbors $K'$, we obtain $ \sum_{K'} \left( \frac{y_{K'}}{ \|y_{K'}\|} \frac{y_{K'}^T}{ \|y_{K'}\| }
 \right) \nabla u(x_K) \approx \sum_{K'} \left( \frac{y_{K'}}{ \|y_{K'}\|}
-\frac{u_h(x_{K'}) - u_h(x_K)}{ \|y_{K'}\| }  \right).$

    -

    Thus, if the matrix $ Y =  \sum_{K'} \left( \frac{y_{K'}}{\|y_{K'}\|}
-\frac{y_{K'}^T}{ \|y_{K'}\| } \right)$ is regular (which is the case when the vectors $y_{K'}$ to all neighbors span the whole space), we can obtain an approximation to the true gradient by $ \nabla u(x_K) \approx Y^{-1}
+\frac{u_h(x_{K'}) - u_h(x_K)}{ \|y_{K'}\| }  \right).$

    +

    Thus, if the matrix $ Y =  \sum_{K'} \left( \frac{y_{K'}}{\|y_{K'}\|}
+\frac{y_{K'}^T}{ \|y_{K'}\| } \right)$ is regular (which is the case when the vectors $y_{K'}$ to all neighbors span the whole space), we can obtain an approximation to the true gradient by $ \nabla u(x_K) \approx Y^{-1}
 \sum_{K'} \left( \frac{y_{K'}}{\|y_{K'}\|} \frac{u_h(x_{K'}) - u_h(x_K)}{
-\|y_{K'}\| } \right).$ This is a quantity that is easily computed. The value returned for each cell when calling the approximate_gradient function of this class is the $l_2$ norm of this approximation to the gradient. To make this a useful quantity, you may want to scale each element by the correct power of the respective cell size.

    -

    The computation of this quantity must fail if a cell has only neighbors for which the direction vectors $y_K$ do not span the whole space, since then the matrix $Y$ is no longer invertible. If this happens, you will get an error similar to this one:

    --------------------------------------------------------
    +\|y_{K'}\| } \right).$" src="form_2171.png"/> This is a quantity that is easily computed. The value returned for each cell when calling the approximate_gradient function of this class is the $l_2$ norm of this approximation to the gradient. To make this a useful quantity, you may want to scale each element by the correct power of the respective cell size.

    +

    The computation of this quantity must fail if a cell has only neighbors for which the direction vectors $y_K$ do not span the whole space, since then the matrix $Y$ is no longer invertible. If this happens, you will get an error similar to this one:

    --------------------------------------------------------
    An error occurred in line <749>
    of file <source/numerics/derivative_approximation.cc> in function
    void DerivativeApproximation::approximate(...)
    @@ -148,19 +148,19 @@
    DEAL_II_HOST constexpr Number determinant(const SymmetricTensor< 2, dim, Number > &)

    As can easily be verified, this can only happen on very coarse grids, when some cells and all their neighbors have not been refined even once. You should therefore only call the functions of this class if all cells are at least once refined. In practice this is not much of a restriction.

    Approximation of higher derivatives

    -

    Similar to the reasoning above, approximations to higher derivatives can be computed in a similar fashion. For example, the tensor of second derivatives is approximated by the formula $ \nabla^2 u(x_K) \approx Y^{-1}
+<p>Similar to the reasoning above, approximations to higher derivatives can be computed in a similar fashion. For example, the tensor of second derivatives is approximated by the formula <picture><source srcset=$ \nabla^2 u(x_K) \approx Y^{-1}
 \sum_{K'} \left( \frac{y_{K'}}{\|y_{K'}\|} \otimes \frac{\nabla u_h(x_{K'})
-- \nabla u_h(x_K)}{ \|y_{K'}\| } \right), $ where $\otimes$ denotes the outer product of two vectors. Note that unlike the true tensor of second derivatives, its approximation is not necessarily symmetric. This is due to the fact that in the derivation, it is not clear whether we shall consider as projected second derivative the term $\nabla^2 u y_{KK'}$ or $y_{KK'}^T
-\nabla^2 u$. Depending on which choice we take, we obtain one approximation of the tensor of second derivatives or its transpose. To avoid this ambiguity, as result we take the symmetrized form, which is the mean value of the approximation and its transpose.

    -

    The returned value on each cell is the spectral norm of the approximated tensor of second derivatives, i.e. the largest eigenvalue by absolute value. This equals the largest curvature of the finite element field at each cell, and the spectral norm is the matrix norm associated to the $l_2$ vector norm.

    +- \nabla u_h(x_K)}{ \|y_{K'}\| } \right), $" src="form_2173.png"/> where $\otimes$ denotes the outer product of two vectors. Note that unlike the true tensor of second derivatives, its approximation is not necessarily symmetric. This is due to the fact that in the derivation, it is not clear whether we shall consider as projected second derivative the term $\nabla^2 u y_{KK'}$ or $y_{KK'}^T
+\nabla^2 u$. Depending on which choice we take, we obtain one approximation of the tensor of second derivatives or its transpose. To avoid this ambiguity, as result we take the symmetrized form, which is the mean value of the approximation and its transpose.

    +

    The returned value on each cell is the spectral norm of the approximated tensor of second derivatives, i.e. the largest eigenvalue by absolute value. This equals the largest curvature of the finite element field at each cell, and the spectral norm is the matrix norm associated to the $l_2$ vector norm.

    Even higher than the second derivative can be obtained along the same lines as exposed above.

    Refinement indicators based on the derivatives

    -

    If you would like to base a refinement criterion upon these approximation of the derivatives, you will have to scale the results of this class by an appropriate power of the mesh width. For example, since $\|u-u_h\|^2_{L_2}
-\le C h^2 \|\nabla u\|^2_{L_2}$, it might be the right thing to scale the indicators as $\eta_K = h \|\nabla u\|_K$, i.e. $\eta_K = h^{1+d/2}
-\|\nabla u\|_{\infty;K}$, i.e. the right power is $1+d/2$.

    -

    Likewise, for the second derivative, one should choose a power of the mesh size $h$ one higher than for the gradient.

    +

    If you would like to base a refinement criterion upon these approximation of the derivatives, you will have to scale the results of this class by an appropriate power of the mesh width. For example, since $\|u-u_h\|^2_{L_2}
+\le C h^2 \|\nabla u\|^2_{L_2}$, it might be the right thing to scale the indicators as $\eta_K = h \|\nabla u\|_K$, i.e. $\eta_K = h^{1+d/2}
+\|\nabla u\|_{\infty;K}$, i.e. the right power is $1+d/2$.

    +

    Likewise, for the second derivative, one should choose a power of the mesh size $h$ one higher than for the gradient.

    Implementation

    -

    The formulae for the computation of approximations to the gradient and to the tensor of second derivatives shown above are very much alike. The basic difference is that in one case the finite difference quotient is a scalar, while in the other case it is a vector. For higher derivatives, this would be a tensor of even higher rank. We then have to form the outer product of this difference quotient with the distance vector $y_{KK'}$, symmetrize it, contract it with the matrix $Y^{-1}$ and compute its norm. To make the implementation simpler and to allow for code reuse, all these operations that are dependent on the actual order of the derivatives to be approximated, as well as the computation of the quantities entering the difference quotient, have been separated into auxiliary nested classes (names Gradient and SecondDerivative) and the main algorithm is simply passed one or the other data types and asks them to perform the order dependent operations. The main framework that is independent of this, such as finding all active neighbors, or setting up the matrix $Y$ is done in the main function approximate.

    +

    The formulae for the computation of approximations to the gradient and to the tensor of second derivatives shown above are very much alike. The basic difference is that in one case the finite difference quotient is a scalar, while in the other case it is a vector. For higher derivatives, this would be a tensor of even higher rank. We then have to form the outer product of this difference quotient with the distance vector $y_{KK'}$, symmetrize it, contract it with the matrix $Y^{-1}$ and compute its norm. To make the implementation simpler and to allow for code reuse, all these operations that are dependent on the actual order of the derivatives to be approximated, as well as the computation of the quantities entering the difference quotient, have been separated into auxiliary nested classes (names Gradient and SecondDerivative) and the main algorithm is simply passed one or the other data types and asks them to perform the order dependent operations. The main framework that is independent of this, such as finding all active neighbors, or setting up the matrix $Y$ is done in the main function approximate.

    Due to this way of operation, the class may be easily extended for higher order derivatives than are presently implemented. Basically, only an additional class along the lines of the derivative descriptor classes Gradient and SecondDerivative has to be implemented, with the respective alias and functions replaced by the appropriate analogues for the derivative that is to be approximated.

    Function Documentation

    @@ -268,7 +268,7 @@ const unsigned int component = 0&#href_anchor"memdoc"> -

    This function is the analogue to the one above, computing finite difference approximations of the tensor of second derivatives. Pass it the DoF handler object that describes the finite element field, a nodal value vector, and receive the cell-wise spectral norm of the approximated tensor of second derivatives. The spectral norm is the matrix norm associated to the $l_2$ vector norm.

    +

    This function is the analogue to the one above, computing finite difference approximations of the tensor of second derivatives. Pass it the DoF handler object that describes the finite element field, a nodal value vector, and receive the cell-wise spectral norm of the approximated tensor of second derivatives. The spectral norm is the matrix norm associated to the $l_2$ vector norm.

    The last parameter denotes the solution component, for which the gradient is to be computed. It defaults to the first component. For scalar elements, this is the only valid choice; for vector-valued ones, any component between zero and the number of vector components can be given here.

    In a parallel computation the solution vector needs to contain the locally relevant unknowns.

    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDifferentiation_1_1SD.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDifferentiation_1_1SD.html 2024-01-30 03:04:49.244854689 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDifferentiation_1_1SD.html 2024-01-30 03:04:49.244854689 +0000 @@ -643,7 +643,7 @@
    -

    Return a symbolic number that represents the Euler constant $e \approx 2.71828$ raised to the given exponent.

    +

    Return a symbolic number that represents the Euler constant $e \approx 2.71828$ raised to the given exponent.

    Mimics the function std::exp(exponent) using the standard math library.

    Definition at line 60 of file symengine_math.cc.

    @@ -2507,7 +2507,7 @@

    Return an Expression representing a scalar symbolic variable with the identifier specified by symbol.

    -

    For example, if the symbol is the string "x" then the scalar symbolic variable that is returned represents the scalar $x$.

    +

    For example, if the symbol is the string "x" then the scalar symbolic variable that is returned represents the scalar $x$.

    Parameters
    @@ -2540,7 +2540,7 @@
    [in]symbolAn identifier (or name) for the returned symbolic variable.

    Return an Expression representing a scalar symbolic function with the identifier specified by symbol. The function's symbolic dependencies are specified by the input arguments.

    -

    For example, if the symbol is the string "f", and the arguments to the function that is generated are the symbolic variable x and the symbolic expression y+z, then the generic symbolic function that is returned represents $f(x, y+z)$.

    +

    For example, if the symbol is the string "f", and the arguments to the function that is generated are the symbolic variable x and the symbolic expression y+z, then the generic symbolic function that is returned represents $f(x, y+z)$.

    Parameters
    @@ -2572,7 +2572,7 @@
    [in]symbolAn identifier (or name) for the returned symbolic function.

    Return an Expression representing a scalar symbolic function with the identifier specified by symbol. The function's symbolic dependencies are specified by the keys to the input arguments map; the values stored in the map are ignored.

    -

    For example, if the symbol is the string "f", and the arguments to the function that is generated are the symbolic variable x and the symbolic expression y+z, then the generic symbolic function that is returned represents $f(x, y+z)$.

    +

    For example, if the symbol is the string "f", and the arguments to the function that is generated are the symbolic variable x and the symbolic expression y+z, then the generic symbolic function that is returned represents $f(x, y+z)$.

    Parameters
    @@ -2608,7 +2608,7 @@
    [in]symbolAn identifier (or name) for the returned symbolic function.
    -
    Returns
    The symbolic function or expression representing the result $\frac{\partial f}{\partial x}$.
    +
    Returns
    The symbolic function or expression representing the result $\frac{\partial f}{\partial x}$.

    Definition at line 70 of file symengine_scalar_operations.cc.

    @@ -3647,7 +3647,7 @@

    Return a substitution map that has any explicit interdependencies between the entries of the input substitution_map resolved.

    The force_cyclic_dependency_resolution flag exists to ensure, if desired, that no cyclic dependencies can exist in the returned map. If a cyclic dependency exists in the input substitution map, substitution_map, then with this flag set to true the dependency cycle is broken by a dictionary-ordered substitution. For example, if the substitution map contains two entries map["a"] -> "b" and map["b"] -> "a", then the result of calling this function would be a map with the elements map["a"] -> "a" and map["b"] -> "a".

    If one symbol is an explicit function of another, and it is desired that all their values are completely resolved, then it may be necessary to perform substitution a number of times before the result is finalized. This function performs substitution sweeps for a set of symbolic variables until all explicit relationships between the symbols in the map have been resolved. Whether each entry returns a symbolic or real value depends on the nature of the values stored in the substitution map. If the values associated with a key are also symbolic then the returned result may still be symbolic in nature. The terminal result of using the input substitution map, symbol_values, is then guaranteed to be rendered by a single substitution of the returned dependency-resolved map.

    -

    Example: If map["a"] -> 1 and map["b"] -> "a"+ 2, then the function $f(a,b(a)) = a+b$ will be evaluated and the result $f\vert_{a=1,b=a+2} = 3+a$ is determined upon the completion of the first sweep. A second sweep is therefore necessary to resolve the final symbol, and the returned value is ultimately $f = [3+a]_{a=1} = 4$. By resolving the explicit relationships between all symbols in the map, we determine that map["a"] -> 1 and map["b"] -> 1 + 2 = 3 and thus, using only one substitution, that $f = a+b = 1 + 3 = 4$.

    +

    Example: If map["a"] -> 1 and map["b"] -> "a"+ 2, then the function $f(a,b(a)) = a+b$ will be evaluated and the result $f\vert_{a=1,b=a+2} = 3+a$ is determined upon the completion of the first sweep. A second sweep is therefore necessary to resolve the final symbol, and the returned value is ultimately $f = [3+a]_{a=1} = 4$. By resolving the explicit relationships between all symbols in the map, we determine that map["a"] -> 1 and map["b"] -> 1 + 2 = 3 and thus, using only one substitution, that $f = a+b = 1 + 3 = 4$.

    @@ -3707,11 +3707,11 @@ If the symbols stored in the map are explicitly dependent on one another, then the returned result depends on the order in which the map is traversed. It is recommended to first resolve all interdependencies in the map using the resolve_explicit_dependencies() function.

    Examples:

    1. -

      If map["a"] == 1 and map["b"] == "a" + 2, then the function $f(a,b(a)) := a+b$ will be evaluated and the result $f\vert_{a=1,b=a+2} = 3+a$ is returned. This return is because the symbol "a" is substituted throughout the function first, and only then is the symbol "b(a)" substituted, by which time its explicit dependency on "a" cannot be resolved.

      +

      If map["a"] == 1 and map["b"] == "a" + 2, then the function $f(a,b(a)) := a+b$ will be evaluated and the result $f\vert_{a=1,b=a+2} = 3+a$ is returned. This return is because the symbol "a" is substituted throughout the function first, and only then is the symbol "b(a)" substituted, by which time its explicit dependency on "a" cannot be resolved.

    2. -If map["a"] == "b"+2 and map["b"] == 1, then the function $f(a(b),b): = a+b$ will be evaluated and the result $f\vert_{a=b+2, b} = [b+2+b]_{b=1} = 4$ is returned. This is because the explicitly dependent symbol "a(b)" is substituted first followed by the symbol "b".
    3. +If map["a"] == "b"+2 and map["b"] == 1, then the function $f(a(b),b): = a+b$ will be evaluated and the result $f\vert_{a=b+2, b} = [b+2+b]_{b=1} = 4$ is returned. This is because the explicitly dependent symbol "a(b)" is substituted first followed by the symbol "b".
    @@ -3862,7 +3862,7 @@

    Return a vector of Expressions representing a vectorial symbolic variable with the identifier specified by symbol.

    -

    For example, if the symbol is the string "v" then the vectorial symbolic variable that is returned represents the vector $v$. Each component of $v$ is prefixed by the given symbol, and has a suffix that indicates its component index.

    +

    For example, if the symbol is the string "v" then the vectorial symbolic variable that is returned represents the vector $v$. Each component of $v$ is prefixed by the given symbol, and has a suffix that indicates its component index.

    Template Parameters
    @@ -3899,7 +3899,7 @@
    dimThe dimension of the returned tensor.

    Return a tensor of Expressions representing a tensorial symbolic variable with the identifier specified by symbol.

    -

    For example, if the symbol is the string "T" then the tensorial symbolic variable that is returned represents the vector $T$. Each component of $T$ is prefixed by the given symbol, and has a suffix that indicates its component indices.

    +

    For example, if the symbol is the string "T" then the tensorial symbolic variable that is returned represents the vector $T$. Each component of $T$ is prefixed by the given symbol, and has a suffix that indicates its component indices.

    Template Parameters
    @@ -3937,7 +3937,7 @@
    rankThe rank of the returned tensor.

    Return a symmetric tensor of Expressions representing a tensorial symbolic variable with the identifier specified by symbol.

    -

    For example, if the symbol is the string "S" then the tensorial symbolic variable that is returned represents the vector $S$. Each component of $S$ is prefixed by the given symbol, and has a suffix that indicates its component indices.

    +

    For example, if the symbol is the string "S" then the tensorial symbolic variable that is returned represents the vector $S$. Each component of $S$ is prefixed by the given symbol, and has a suffix that indicates its component indices.

    Template Parameters
    @@ -4102,7 +4102,7 @@
    rankThe rank of the returned tensor.
    -
    Returns
    The tensor of symbolic functions or expressions representing the result $\frac{\partial f}{\partial \mathbf{T}}$.
    +
    Returns
    The tensor of symbolic functions or expressions representing the result $\frac{\partial f}{\partial \mathbf{T}}$.
    @@ -4131,7 +4131,7 @@ -
    Returns
    The symmetric tensor of symbolic functions or expressions representing the result $\frac{\partial f}{\partial \mathbf{S}}$.
    +
    Returns
    The symmetric tensor of symbolic functions or expressions representing the result $\frac{\partial f}{\partial \mathbf{S}}$.
    @@ -4160,7 +4160,7 @@ -
    Returns
    The tensor of symbolic functions or expressions representing the result $\frac{\partial f}{\partial \mathbf{T}}$.
    +
    Returns
    The tensor of symbolic functions or expressions representing the result $\frac{\partial f}{\partial \mathbf{T}}$.
    @@ -4189,7 +4189,7 @@ -
    Returns
    The symmetric tensor of symbolic functions or expressions representing the result $\frac{\partial f}{\partial \mathbf{S}}$.
    +
    Returns
    The symmetric tensor of symbolic functions or expressions representing the result $\frac{\partial f}{\partial \mathbf{S}}$.
    @@ -4218,7 +4218,7 @@ -
    Returns
    The tensor of symbolic functions or expressions representing the result $\frac{\partial \mathbf{T}}{\partial x}$.
    +
    Returns
    The tensor of symbolic functions or expressions representing the result $\frac{\partial \mathbf{T}}{\partial x}$.
    @@ -4247,7 +4247,7 @@ -
    Returns
    The symmetric tensor of symbolic functions or expressions representing the result $\frac{\partial \mathbf{S}}{\partial x}$.
    +
    Returns
    The symmetric tensor of symbolic functions or expressions representing the result $\frac{\partial \mathbf{S}}{\partial x}$.
    @@ -4276,7 +4276,7 @@ -
    Returns
    The tensor of symbolic functions or expressions representing the result $\frac{\partial \mathbf{T}}{\partial x}$.
    +
    Returns
    The tensor of symbolic functions or expressions representing the result $\frac{\partial \mathbf{T}}{\partial x}$.
    @@ -4305,7 +4305,7 @@ -
    Returns
    The symmetric tensor of symbolic functions or expressions representing the result $\frac{\partial \mathbf{S}}{\partial x}$.
    +
    Returns
    The symmetric tensor of symbolic functions or expressions representing the result $\frac{\partial \mathbf{S}}{\partial x}$.
    @@ -4334,8 +4334,8 @@ -
    Returns
    The tensor of symbolic functions or variables representing the result $\frac{\partial \mathbf{T}_{1}}{\partial
-\mathbf{T}_{2}}$.
    +
    Returns
    The tensor of symbolic functions or variables representing the result $\frac{\partial \mathbf{T}_{1}}{\partial
+\mathbf{T}_{2}}$.
    @@ -4364,8 +4364,8 @@ -
    Returns
    The symmetric tensor of symbolic functions or variables representing the result $\frac{\partial \mathbf{S}_{1}}{\partial
-\mathbf{S}_{2}}$.
    +
    Returns
    The symmetric tensor of symbolic functions or variables representing the result $\frac{\partial \mathbf{S}_{1}}{\partial
+\mathbf{S}_{2}}$.
    @@ -4394,7 +4394,7 @@ -
    Returns
    The tensor of symbolic functions or variables representing the result $\frac{\partial \mathbf{T}}{\partial \mathbf{S}}$.
    +
    Returns
    The tensor of symbolic functions or variables representing the result $\frac{\partial \mathbf{T}}{\partial \mathbf{S}}$.
    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDoFRenumbering.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDoFRenumbering.html 2024-01-30 03:04:49.300855155 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDoFRenumbering.html 2024-01-30 03:04:49.300855155 +0000 @@ -228,13 +228,13 @@

    Using the constraint information usually leads to reductions in bandwidth of 10 or 20 per cent, but may for some very unstructured grids also lead to an increase. You have to weigh the decrease in your case with the time spent to use the constraint information, which usually is several times longer than the ‘pure’ renumbering algorithm.

    In almost all cases, the renumbering scheme finds a corner to start with. Since there is more than one corner in most grids and since even an interior degree of freedom may be a better starting point, giving the starting point by the user may be a viable way if you have a simple scheme to derive a suitable point (e.g. by successively taking the third child of the cell top left of the coarsest level, taking its third vertex and the dof index thereof, if you want the top left corner vertex). If you do not know beforehand what your grid will look like (e.g. when using adaptive algorithms), searching a best starting point may be difficult, however, and in many cases will not justify the effort.

    Component-wise and block-wise numberings

    -

    For finite elements composed of several base elements using the FESystem class, or for elements which provide several components themselves, it may be of interest to sort the DoF indices by component. This will then bring out the block matrix structure, since otherwise the degrees of freedom are numbered cell-wise without taking into account that they may belong to different components. For example, one may want to sort degree of freedom for a Stokes discretization so that we first get all velocities and then all the pressures so that the resulting matrix naturally decomposes into a $2\times 2$ system.

    +

    For finite elements composed of several base elements using the FESystem class, or for elements which provide several components themselves, it may be of interest to sort the DoF indices by component. This will then bring out the block matrix structure, since otherwise the degrees of freedom are numbered cell-wise without taking into account that they may belong to different components. For example, one may want to sort degree of freedom for a Stokes discretization so that we first get all velocities and then all the pressures so that the resulting matrix naturally decomposes into a $2\times 2$ system.

    This kind of numbering may be obtained by calling the component_wise() function of this class. Since it does not touch the order of indices within each component, it may be worthwhile to first renumber using the Cuthill- McKee or a similar algorithm and afterwards renumbering component-wise. This will bring out the matrix structure and additionally have a good numbering within each block.

    The component_wise() function allows not only to honor enumeration based on vector components, but also allows to group together vector components into "blocks" using a defaulted argument to the various DoFRenumbering::component_wise() functions (see GlossComponent vs GlossBlock for a description of the difference). The blocks designated through this argument may, but do not have to be, equal to the blocks that the finite element reports. For example, a typical Stokes element would be

    FESystem<dim> stokes_fe (FE_Q<dim>(2), dim, // dim velocities
    FE_Q<dim>(1), 1); // one pressure
    Definition fe_system.h:209
    Definition fe_q.h:551
    -

    This element has dim+1 vector components and equally many blocks. However, one may want to consider the velocities as one logical block so that all velocity degrees of freedom are enumerated the same way, independent of whether they are $x$- or $y$-velocities. This is done, for example, in step-20 and step-22 as well as several other tutorial programs.

    +

    This element has dim+1 vector components and equally many blocks. However, one may want to consider the velocities as one logical block so that all velocity degrees of freedom are enumerated the same way, independent of whether they are $x$- or $y$-velocities. This is done, for example, in step-20 and step-22 as well as several other tutorial programs.

    On the other hand, if you really want to use block structure reported by the finite element itself (a case that is often the case if you have finite elements that have multiple vector components, e.g. the FE_RaviartThomas or FE_Nedelec elements) then you can use the DoFRenumbering::block_wise instead of the DoFRenumbering::component_wise functions.

    Cell-wise numbering

    Given an ordered vector of cells, the function cell_wise() sorts the degrees of freedom such that degrees on earlier cells of this vector will occur before degrees on later cells.

    @@ -247,7 +247,7 @@

    The MatrixFree class provides optimized algorithms for interleaving operations on vectors before and after the access of the vector data in the respective loops. The algorithm matrix_free_data_locality() makes sure that all unknowns with a short distance between the first and last access are grouped together, in order to increase the spatial data locality.

    A comparison of reordering strategies

    As a benchmark of comparison, let us consider what the different sparsity patterns produced by the various algorithms when using the $Q_2^d\times
-Q_1$ element combination typically employed in the discretization of Stokes equations, when used on the mesh obtained in step-22 after one adaptive mesh refinement in 3d. The space dimension together with the coupled finite element leads to a rather dense system matrix with, on average around 180 nonzero entries per row. After applying each of the reordering strategies shown below, the degrees of freedom are also sorted using DoFRenumbering::component_wise into velocity and pressure groups; this produces the $2\times 2$ block structure seen below with the large velocity-velocity block at top left, small pressure-pressure block at bottom right, and coupling blocks at top right and bottom left.

    +Q_1$" src="form_951.png"/> element combination typically employed in the discretization of Stokes equations, when used on the mesh obtained in step-22 after one adaptive mesh refinement in 3d. The space dimension together with the coupled finite element leads to a rather dense system matrix with, on average around 180 nonzero entries per row. After applying each of the reordering strategies shown below, the degrees of freedom are also sorted using DoFRenumbering::component_wise into velocity and pressure groups; this produces the $2\times 2$ block structure seen below with the large velocity-velocity block at top left, small pressure-pressure block at bottom right, and coupling blocks at top right and bottom left.

    The goal of reordering strategies is to improve the preconditioner. In step-22 we use a SparseILU to preconditioner for the velocity-velocity block at the top left. The quality of the preconditioner can then be measured by the number of CG iterations required to solve a linear system with this block. For some of the reordering strategies below we record this number for adaptive refinement cycle 3, with 93176 degrees of freedom; because we solve several linear systems with the same matrix in the Schur complement, the average number of iterations is reported. The lower the number the better the preconditioner and consequently the better the renumbering of degrees of freedom is suited for this task. We also state the run-time of the program, in part determined by the number of iterations needed, for the first 4 cycles on one of our machines. Note that the reported times correspond to the run time of the entire program, not just the affected solver; if a program runs twice as fast with one particular ordering than with another one, then this means that the actual solver is actually several times faster.

    @@ -459,7 +459,7 @@
    -

    Sort the degrees of freedom by vector component. The numbering within each component is not touched, so a degree of freedom with index $i$, belonging to some component, and another degree of freedom with index $j$ belonging to the same component will be assigned new indices $n(i)$ and $n(j)$ with $n(i)<n(j)$ if $i<j$ and $n(i)>n(j)$ if $i>j$.

    +

    Sort the degrees of freedom by vector component. The numbering within each component is not touched, so a degree of freedom with index $i$, belonging to some component, and another degree of freedom with index $j$ belonging to the same component will be assigned new indices $n(i)$ and $n(j)$ with $n(i)<n(j)$ if $i<j$ and $n(i)>n(j)$ if $i>j$.

    You can specify that the components are ordered in a different way than suggested by the FESystem object you use. To this end, set up the vector target_component such that the entry at index i denotes the number of the target component for dofs with component i in the FESystem. Naming the same target component more than once is possible and results in a blocking of several components into one. This is discussed in step-22. If you omit this argument, the same order as given by the finite element is used.

    If one of the base finite elements from which the global finite element under consideration here, is a non-primitive one, i.e. its shape functions have more than one non-zero component, then it is not possible to associate these degrees of freedom with a single vector component. In this case, they are associated with the first vector component to which they belong.

    For finite elements with only one component, or a single non-primitive base element, this function is the identity operation.

    @@ -553,7 +553,7 @@
    -

    Sort the degrees of freedom by vector block. The numbering within each block is not touched, so a degree of freedom with index $i$, belonging to some block, and another degree of freedom with index $j$ belonging to the same block will be assigned new indices $n(i)$ and $n(j)$ with $n(i)<n(j)$ if $i<j$ and $n(i)>n(j)$ if $i>j$.

    +

    Sort the degrees of freedom by vector block. The numbering within each block is not touched, so a degree of freedom with index $i$, belonging to some block, and another degree of freedom with index $j$ belonging to the same block will be assigned new indices $n(i)$ and $n(j)$ with $n(i)<n(j)$ if $i<j$ and $n(i)>n(j)$ if $i>j$.

    Note
    This function only succeeds if each of the elements in the hp::FECollection attached to the DoFHandler argument has exactly the same number of blocks (see the glossary for more information). Note that this is not always given: while the hp::FECollection class ensures that all of its elements have the same number of vector components, they need not have the same number of blocks. At the same time, this function here needs to match individual blocks across elements and therefore requires that elements have the same number of blocks and that subsequent blocks in one element have the same meaning as in another element.

    Definition at line 999 of file dof_renumbering.cc.

    @@ -638,7 +638,7 @@
  • For meshes based on parallel::distributed::Triangulation, the locally owned cells of each MPI process are contiguous in Z order. That means that numbering degrees of freedom by visiting cells in Z order yields locally owned DoF indices that consist of contiguous ranges for each process. This is also true for the default ordering of DoFs on such triangulations, but the default ordering creates an enumeration that also depends on how many processors participate in the mesh, whereas the one generated by this function enumerates the degrees of freedom on a particular cell with indices that will be the same regardless of how many processes the mesh is split up between.
  • For meshes based on parallel::shared::Triangulation, the situation is more complex. Here, the set of locally owned cells is determined by a partitioning algorithm (selected by passing an object of type parallel::shared::Triangulation::Settings to the constructor of the triangulation), and in general these partitioning algorithms may assign cells to subdomains based on decisions that may have nothing to do with the Z order. (Though it is possible to select these flags in a way so that partitioning uses the Z order.) As a consequence, the cells of one subdomain are not contiguous in Z order, and if one renumbered degrees of freedom based on the Z order of cells, one would generally end up with DoF indices that on each processor do not form a contiguous range. This is often inconvenient (for example, because PETSc cannot store vectors and matrices for which the locally owned set of indices is not contiguous), and consequently this function uses the following algorithm for parallel::shared::Triangulation objects:

    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDoFTools.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDoFTools.html 2024-01-30 03:04:49.380855822 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDoFTools.html 2024-01-30 03:04:49.380855822 +0000 @@ -308,7 +308,7 @@

    (As a side note, for corner cases: The question what a degree of freedom on the boundary is, is not so easy. It should really be a degree of freedom of which the respective basis function has nonzero values on the boundary. At least for Lagrange elements this definition is equal to the statement that the off-point, or what deal.II calls support_point, of the shape function, i.e. the point where the function assumes its nominal value (for Lagrange elements this is the point where it has the function value 1), is located on the boundary. We do not check this directly, the criterion is rather defined through the information the finite element class gives: the FiniteElement class defines the numbers of basis functions per vertex, per line, and so on and the basis functions are numbered after this information; a basis function is to be considered to be on the face of a cell (and thus on the boundary if the cell is at the boundary) according to it belonging to a vertex, line, etc but not to the interior of the cell. The finite element uses the same cell-wise numbering so that we can say that if a degree of freedom was numbered as one of the dofs on lines, we assume that it is located on the line. Where the off-point actually is, is a secret of the finite element (well, you can ask it, but we don't do it here) and not relevant in this context.)

    Setting up sparsity patterns for boundary matrices

    In some cases, one wants to only work with DoFs that sit on the boundary. One application is, for example, if rather than interpolating non- homogeneous boundary values, one would like to project them. For this, we need two things: a way to identify nodes that are located on (parts of) the boundary, and a way to build matrices out of only degrees of freedom that are on the boundary (i.e. much smaller matrices, in which we do not even build the large zero block that stems from the fact that most degrees of freedom have no support on the boundary of the domain). The first of these tasks is done by the map_dof_to_boundary_indices() function (described above).

    -

    The second part requires us first to build a sparsity pattern for the couplings between boundary nodes, and then to actually build the components of this matrix. While actually computing the entries of these small boundary matrices is discussed in the MatrixCreator namespace, the creation of the sparsity pattern is done by the create_boundary_sparsity_pattern() function. For its work, it needs to have a numbering of all those degrees of freedom that are on those parts of the boundary that we are interested in. You can get this from the map_dof_to_boundary_indices() function. It then builds the sparsity pattern corresponding to integrals like $\int_\Gamma \varphi_{b2d(i)} \varphi_{b2d(j)} dx$, where $i$ and $j$ are indices into the matrix, and $b2d(i)$ is the global DoF number of a degree of freedom sitting on a boundary (i.e., $b2d$ is the inverse of the mapping returned by map_dof_to_boundary_indices() function).

    +

    The second part requires us first to build a sparsity pattern for the couplings between boundary nodes, and then to actually build the components of this matrix. While actually computing the entries of these small boundary matrices is discussed in the MatrixCreator namespace, the creation of the sparsity pattern is done by the create_boundary_sparsity_pattern() function. For its work, it needs to have a numbering of all those degrees of freedom that are on those parts of the boundary that we are interested in. You can get this from the map_dof_to_boundary_indices() function. It then builds the sparsity pattern corresponding to integrals like $\int_\Gamma \varphi_{b2d(i)} \varphi_{b2d(j)} dx$, where $i$ and $j$ are indices into the matrix, and $b2d(i)$ is the global DoF number of a degree of freedom sitting on a boundary (i.e., $b2d$ is the inverse of the mapping returned by map_dof_to_boundary_indices() function).

    Enumeration Type Documentation

    ◆ Coupling

    @@ -488,7 +488,7 @@

    Otherwise, if face_1 and face_2 are not active faces, this function loops recursively over the children of face_1 and face_2. If only one of the two faces is active, then we recursively iterate over the children of the non-active ones and make sure that the solution function on the refined side equals that on the non-refined face in much the same way as we enforce hanging node constraints at places where differently refined cells come together. (However, unlike hanging nodes, we do not enforce the requirement that there be only a difference of one refinement level between the two sides of the domain you would like to be periodic).

    This routine only constrains DoFs that are not already constrained. If this routine encounters a DoF that already is constrained (for instance by Dirichlet boundary conditions), the old setting of the constraint (dofs the entry is constrained to, inhomogeneities) is kept and nothing happens.

    The flags in the component_mask (see GlossComponentMask) denote which components of the finite element space shall be constrained with periodic boundary conditions. If it is left as specified by the default value all components are constrained. If it is different from the default value, it is assumed that the number of entries equals the number of components of the finite element. This can be used to enforce periodicity in only one variable in a system of equations.

    -

    face_orientation, face_flip and face_rotation describe an orientation that should be applied to face_1 prior to matching and constraining DoFs. This has nothing to do with the actual orientation of the given faces in their respective cells (which for boundary faces is always the default) but instead how you want to see periodicity to be enforced. For example, by using these flags, you can enforce a condition of the kind $u(0,y)=u(1,1-y)$ (i.e., a Moebius band) or in 3d a twisted torus. More precisely, these flags match local face DoF indices in the following manner:

    +

    face_orientation, face_flip and face_rotation describe an orientation that should be applied to face_1 prior to matching and constraining DoFs. This has nothing to do with the actual orientation of the given faces in their respective cells (which for boundary faces is always the default) but instead how you want to see periodicity to be enforced. For example, by using these flags, you can enforce a condition of the kind $u(0,y)=u(1,1-y)$ (i.e., a Moebius band) or in 3d a twisted torus. More precisely, these flags match local face DoF indices in the following manner:

    In 2d: face_orientation must always be true, face_rotation is always false, and face_flip has the meaning of line_flip; this implies e.g. for Q1:

    face_orientation = true, face_flip = false, face_rotation = false:
    @@ -561,7 +561,7 @@
    and any combination of that...

    Optionally a matrix matrix along with a std::vector first_vector_components can be specified that describes how DoFs on face_1 should be modified prior to constraining to the DoFs of face_2. Here, two declarations are possible: If the std::vector first_vector_components is non empty the matrix is interpreted as a dim $\times$ dim rotation matrix that is applied to all vector valued blocks listed in first_vector_components of the FESystem. If first_vector_components is empty the matrix is interpreted as an interpolation matrix with size no_face_dofs $\times$ no_face_dofs.

    This function makes sure that identity constraints don't create cycles in constraints.

    -

    periodicity_factor can be used to implement Bloch periodic conditions (a.k.a. phase shift periodic conditions) of the form $\psi(\mathbf{r})=e^{-i\mathbf{k}\cdot\mathbf{r}}u(\mathbf{r})$ where $u$ is periodic with the same periodicity as the crystal lattice and $\mathbf{k}$ is the wavevector, see https://en.wikipedia.org/wiki/Bloch_wave. The solution at face_2 is equal to the solution at face_1 times periodicity_factor. For example, if the solution at face_1 is $\psi(0)$ and $\mathbf{d}$ is the corresponding point on face_2, then the solution at face_2 should be $\psi(d) = \psi(0)e^{-i \mathbf{k}\cdot \mathbf{d}}$. This condition can be implemented using $\mathrm{periodicity\_factor}=e^{-i \mathbf{k}\cdot \mathbf{d}}$.

    +

    periodicity_factor can be used to implement Bloch periodic conditions (a.k.a. phase shift periodic conditions) of the form $\psi(\mathbf{r})=e^{-i\mathbf{k}\cdot\mathbf{r}}u(\mathbf{r})$ where $u$ is periodic with the same periodicity as the crystal lattice and $\mathbf{k}$ is the wavevector, see https://en.wikipedia.org/wiki/Bloch_wave. The solution at face_2 is equal to the solution at face_1 times periodicity_factor. For example, if the solution at face_1 is $\psi(0)$ and $\mathbf{d}$ is the corresponding point on face_2, then the solution at face_2 should be $\psi(d) = \psi(0)e^{-i \mathbf{k}\cdot \mathbf{d}}$. This condition can be implemented using $\mathrm{periodicity\_factor}=e^{-i \mathbf{k}\cdot \mathbf{d}}$.

    Detailed information can be found in the see Glossary entry on periodic boundary conditions.

    Definition at line 2292 of file dof_tools_constraints.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFESeries.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFESeries.html 2024-01-30 03:04:49.396855955 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFESeries.html 2024-01-30 03:04:49.400855988 +0000 @@ -165,7 +165,7 @@
    -

    Linear regression least-square fit of $y = k \, x + b$. The size of the input vectors should be equal and more than 1. The returned pair will contain $k$ (first) and $b$ (second).

    +

    Linear regression least-square fit of $y = k \, x + b$. The size of the input vectors should be equal and more than 1. The returned pair will contain $k$ (first) and $b$ (second).

    Definition at line 30 of file fe_series.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFETools.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFETools.html 2024-01-30 03:04:49.440856322 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFETools.html 2024-01-30 03:04:49.440856322 +0000 @@ -377,22 +377,22 @@

    This is a rather specialized function used during the construction of finite element objects. It is used to build the basis of shape functions for an element, given a set of polynomials and interpolation points. The function is only implemented for finite elements with exactly dim vector components. In particular, this applies to classes derived from the FE_PolyTensor class.

    -

    Specifically, the purpose of this function is as follows: FE_PolyTensor receives, from its derived classes, an argument that describes a polynomial space. This space may be parameterized in terms of monomials, or in some other way, but is in general not in the form that we use for finite elements where we typically want to use a basis that is derived from some kind of node functional (e.g., the interpolation at specific points). Concretely, assume that the basis used by the polynomial space is $\{\tilde\varphi_j(\mathbf x)\}_{j=1}^N$, and that the node functionals of the finite element are $\{\Psi_i\}_{i=1}^N$. We then want to compute a basis $\{\varphi_j(\mathbf x)\}_{j=1}^N$ for the finite element space so that $\Psi_i[\varphi_j] = \delta_{ij}$. To do this, we can set $\varphi_j(\mathbf x) = \sum_{k=1}^N c_{jk} \tilde\varphi_k(\mathbf x)$ where we need to determine the expansion coefficients $c_{jk}$. We do this by applying $\Psi_i$ to both sides of the equation, to obtain

    -\begin{align*}
+<p>Specifically, the purpose of this function is as follows: <a class=FE_PolyTensor receives, from its derived classes, an argument that describes a polynomial space. This space may be parameterized in terms of monomials, or in some other way, but is in general not in the form that we use for finite elements where we typically want to use a basis that is derived from some kind of node functional (e.g., the interpolation at specific points). Concretely, assume that the basis used by the polynomial space is $\{\tilde\varphi_j(\mathbf x)\}_{j=1}^N$, and that the node functionals of the finite element are $\{\Psi_i\}_{i=1}^N$. We then want to compute a basis $\{\varphi_j(\mathbf x)\}_{j=1}^N$ for the finite element space so that $\Psi_i[\varphi_j] = \delta_{ij}$. To do this, we can set $\varphi_j(\mathbf x) = \sum_{k=1}^N c_{jk} \tilde\varphi_k(\mathbf x)$ where we need to determine the expansion coefficients $c_{jk}$. We do this by applying $\Psi_i$ to both sides of the equation, to obtain

    +\begin{align*}
   \Psi_i [\varphi_j] = \sum_{k=1}^N c_{jk} \Psi_i[\tilde\varphi_k],
-\end{align*} +\end{align*}" src="form_1215.png"/>

    -

    and we know that the left hand side equals $\delta_{ij}$. If you think of this as a system of $N\times N$ equations for the elements of a matrix on the left and on the right, then this can be written as

    -\begin{align*}
+<p> and we know that the left hand side equals <picture><source srcset=$\delta_{ij}$. If you think of this as a system of $N\times N$ equations for the elements of a matrix on the left and on the right, then this can be written as

    +\begin{align*}
   I = C X^T
-\end{align*} +\end{align*}" src="form_1217.png"/>

    -

    where $C$ is the matrix of coefficients $c_{jk}$ and $X_{ik} = \Psi_i[\tilde\varphi_k]$. Consequently, in order to compute the expansion coefficients $C=X^{-T}$, we need to apply the node functionals to all functions of the "raw" basis of the polynomial space.

    -

    Until the finite element receives this matrix $X$ back, it describes its shape functions (e.g., in FiniteElement::shape_value()) in the form $\tilde\varphi_j$. After it calls this function, it has the expansion coefficients and can describe its shape functions as $\varphi_j$.

    -

    This function therefore computes this matrix $X$, for the following specific circumstances:

    @@ -898,7 +898,7 @@ OutVector & z1_difference&#href_anchor"memdoc"> -

    Compute $(Id-I_h)z_1$ for a given dof1-function $z_1$, where $I_h$ is the interpolation from fe1 to fe2. The result $(Id-I_h)z_1$ is written into z1_difference.

    +

    Compute $(Id-I_h)z_1$ for a given dof1-function $z_1$, where $I_h$ is the interpolation from fe1 to fe2. The result $(Id-I_h)z_1$ is written into z1_difference.

    Note, that this function does not work for continuous elements at hanging nodes. For that case use the interpolation_difference function, below, that takes an additional AffineConstraints object.

    @@ -940,7 +940,7 @@ OutVector & z1_difference&#href_anchor"memdoc"> -

    Compute $(Id-I_h)z_1$ for a given dof1-function $z_1$, where $I_h$ is the interpolation from fe1 to fe2. The result $(Id-I_h)z_1$ is written into z1_difference. constraints1 and constraints2 are the hanging node constraints corresponding to dof1 and dof2, respectively. These objects are particular important when continuous elements on grids with hanging nodes (locally refined grids) are involved.

    +

    Compute $(Id-I_h)z_1$ for a given dof1-function $z_1$, where $I_h$ is the interpolation from fe1 to fe2. The result $(Id-I_h)z_1$ is written into z1_difference. constraints1 and constraints2 are the hanging node constraints corresponding to dof1 and dof2, respectively. These objects are particular important when continuous elements on grids with hanging nodes (locally refined grids) are involved.

    For parallel computations, supply z1 with ghost elements and z1_difference without ghost elements.

    @@ -1011,7 +1011,7 @@
  • It then performs a loop over all non-active cells of dof2. If such a non-active cell has at least one active child, then we call the children of this cell a "patch". We then interpolate from the children of this patch to the patch, using the finite element space associated with dof2 and immediately interpolate back to the children. In essence, this information throws away all information in the solution vector that lives on a scale smaller than the patch cell.
  • Since we traverse non-active cells from the coarsest to the finest levels, we may find patches that correspond to child cells of previously treated patches if the mesh had been refined adaptively (this cannot happen if the mesh has been refined globally because there the children of a patch are all active). We also perform the operation described above on these patches, but it is easy to see that on patches that are children of previously treated patches, the operation is now the identity operation (since it interpolates from the children of the current patch a function that had previously been interpolated to these children from an even coarser patch). Consequently, this does not alter the solution vector any more.
  • -

    The name of the function originates from the fact that it can be used to construct a representation of a function of higher polynomial degree on a once coarser mesh. For example, if you imagine that you start with a $Q_1$ function on a globally refined mesh, and that dof2 is associated with a $Q_2$ element, then this function computes the equivalent of the operator $I_{2h}^{(2)}$ interpolating the original piecewise linear function onto a quadratic function on a once coarser mesh with mesh size $2h$ (but representing this function on the original mesh with size $h$). If the exact solution is sufficiently smooth, then $u^\ast=I_{2h}^{(2)}u_h$ is typically a better approximation to the exact solution $u$ of the PDE than $u_h$ is. In other words, this function provides a postprocessing step that improves the solution in a similar way one often obtains by extrapolating a sequence of solutions, explaining the origin of the function's name.

    +

    The name of the function originates from the fact that it can be used to construct a representation of a function of higher polynomial degree on a once coarser mesh. For example, if you imagine that you start with a $Q_1$ function on a globally refined mesh, and that dof2 is associated with a $Q_2$ element, then this function computes the equivalent of the operator $I_{2h}^{(2)}$ interpolating the original piecewise linear function onto a quadratic function on a once coarser mesh with mesh size $2h$ (but representing this function on the original mesh with size $h$). If the exact solution is sufficiently smooth, then $u^\ast=I_{2h}^{(2)}u_h$ is typically a better approximation to the exact solution $u$ of the PDE than $u_h$ is. In other words, this function provides a postprocessing step that improves the solution in a similar way one often obtains by extrapolating a sequence of solutions, explaining the origin of the function's name.

    Note
    The resulting field does not satisfy continuity requirements of the given finite elements if the algorithm outlined above is used. When you use continuous elements on grids with hanging nodes, please use the extrapolate function with an additional AffineConstraints argument, see below.
    Since this function operates on patches of cells, it requires that the underlying grid is refined at least once for every coarse grid cell. If this is not the case, an exception will be raised.
    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFETools_1_1Compositing.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFETools_1_1Compositing.html 2024-01-30 03:04:49.468856555 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFETools_1_1Compositing.html 2024-01-30 03:04:49.468856555 +0000 @@ -127,13 +127,13 @@
    1. Tensor product construction (do_tensor_product=true): The tensor product construction, in the simplest case, builds a vector-valued element from scalar elements (see this documentation module and this glossary entry for more information). To give an example, consider creating a vector-valued element with two vector components, where the first should have linear shape functions and the second quadratic shape functions. In 1d, the shape functions (on the reference cell) of the base elements are then

      -\begin{align*}
+<picture><source srcset=\begin{align*}
   Q_1 &= \{ 1-x, x \},
   \\  Q_2 &= \{ 2(\frac 12 - x)(1-x), 2(x - \frac 12)x, 4x(1-x) \},
-\end{align*} +\end{align*}" src="form_1232.png"/>

      where shape functions are ordered in the usual way (first on the first vertex, then on the second vertex, then in the interior of the cell). The tensor product construction will create an element with the following shape functions:

      -\begin{align*}
+<picture><source srcset=\begin{align*}
   Q_1 \times Q_2 &=
   \left\{
     \begin{pmatrix} 1-x \\ 0 \end{pmatrix},
@@ -142,7 +142,7 @@
     \begin{pmatrix} 0 \\ 2(x - \frac 12)x \end{pmatrix},
     \begin{pmatrix} 0 \\ 4x(1-x) \end{pmatrix}
   \right\}.
-\end{align*} +\end{align*}" src="form_1233.png"/>

      The list here is again in standard order.

      Of course, the procedure also works if the base elements are already vector valued themselves: in that case, the composed element simply has as many vector components as the base elements taken together.

      @@ -150,10 +150,10 @@
    2. Combining shape functions (do_tensor_product=false): In contrast to the previous strategy, combining shape functions simply takes all of the shape functions together. In the case above, this would yield the following element:

      -\begin{align*}
+<picture><source srcset=\begin{align*}
   Q_1 + Q_2 &= \{ 1-x, 2(\frac 12 - x)(1-x),
                   x, 2(x - \frac 12)x, 4x(1-x) \}.
-\end{align*} +\end{align*}" src="form_1234.png"/>

      In other words, if the base elements are scalar, the resulting element will also be. In general, the base elements all will have to have the same number of vector components.

      The element constructed above of course no longer has a linearly independent set of shape functions. As a consequence, any matrix one creates by treating all shape functions of the composed element in the same way will be singular. In practice, this strategy is therefore typically used in situations where one explicitly makes sure that certain shape functions are treated differently (e.g., by multiplying them with weight functions), or in cases where the shape functions one combines are not linearly dependent.

      /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFunctionTools.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFunctionTools.html 2024-01-30 03:04:49.484856688 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFunctionTools.html 2024-01-30 03:04:49.484856688 +0000 @@ -130,13 +130,13 @@ const unsigned int component = 0&#href_anchor"memdoc"> -

      Estimate bounds on the value and bounds on each gradient component of a Function, $f$, over a BoundingBox, by approximating it by a 2nd order Taylor polynomial starting from the box center.

      -

      Each lower and upper bound is returned as a std::pair<double, double>, such that the first entry is the lower bound, $L$, and the second is the upper bound, $U$, i.e. $f(x) \in [L, U]$.

      +

      Estimate bounds on the value and bounds on each gradient component of a Function, $f$, over a BoundingBox, by approximating it by a 2nd order Taylor polynomial starting from the box center.

      +

      Each lower and upper bound is returned as a std::pair<double, double>, such that the first entry is the lower bound, $L$, and the second is the upper bound, $U$, i.e. $f(x) \in [L, U]$.

      The function value, gradient, and Hessian are computed at the box center. The bounds on the value of the function are then estimated as

      -

      $f(x) \in [f(x_c) - F, f(x_c) + F]$, where $F = \sum_i |\partial_i f(x_c)| h_i
-   + 1/2 \sum_i \sum_j |\partial_i \partial_j f(x_c)| h_i h_j$.

      -

      Here, $h_i$ is half the side length of the box in the $i$th coordinate direction, which is the distance we extrapolate. The bounds on the gradient components are estimated similarly as

      -

      $\partial_i f \in [\partial_i f(x_c) - G_i, \partial_i f(x_c) + G_i]$, where $G_i = \sum_j |\partial_i \partial_j f(x_c)| h_j$.

      +

      $f(x) \in [f(x_c) - F, f(x_c) + F]$, where $F = \sum_i |\partial_i f(x_c)| h_i
+   + 1/2 \sum_i \sum_j |\partial_i \partial_j f(x_c)| h_i h_j$.

      +

      Here, $h_i$ is half the side length of the box in the $i$th coordinate direction, which is the distance we extrapolate. The bounds on the gradient components are estimated similarly as

      +

      $\partial_i f \in [\partial_i f(x_c) - G_i, \partial_i f(x_c) + G_i]$, where $G_i = \sum_j |\partial_i \partial_j f(x_c)| h_j$.

      If the function has more than 1 component the component parameter can be used to specify which function component the bounds should be computed for.

      Definition at line 26 of file function_tools.cc.

      /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGeometricUtilities_1_1Coordinates.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGeometricUtilities_1_1Coordinates.html 2024-01-30 03:04:49.500856822 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGeometricUtilities_1_1Coordinates.html 2024-01-30 03:04:49.500856822 +0000 @@ -130,13 +130,13 @@
      -

      Return spherical coordinates of a Cartesian point point. The returned array is filled with radius, azimuth angle $\in [0,2 \pi)$ and polar/inclination angle $ \in [0,\pi]$ (omitted in 2d).

      +

      Return spherical coordinates of a Cartesian point point. The returned array is filled with radius, azimuth angle $\in [0,2 \pi)$ and polar/inclination angle $ \in [0,\pi]$ (omitted in 2d).

      In 3d the transformation is given by

      -\begin{align*}
+<picture><source srcset=\begin{align*}
  r &= \sqrt{x^2+y^2+z^2} \\
  \theta &= {\rm atan}(y/x) \\
  \phi &= {\rm acos} (z/r)
-\end{align*} +\end{align*}" src="form_546.png"/>

      The use of this function is demonstrated in step-75.

      @@ -160,13 +160,13 @@
      -

      Return the Cartesian coordinates of a spherical point defined by scoord which is filled with radius $r \in [0,\infty)$, azimuth angle $\theta \in [0,2 \pi)$ and polar/inclination angle $\phi \in [0,\pi]$ (omitted in 2d).

      +

      Return the Cartesian coordinates of a spherical point defined by scoord which is filled with radius $r \in [0,\infty)$, azimuth angle $\theta \in [0,2 \pi)$ and polar/inclination angle $\phi \in [0,\pi]$ (omitted in 2d).

      In 3d the transformation is given by

      -\begin{align*}
+<picture><source srcset=\begin{align*}
  x &= r\, \cos(\theta) \, \sin(\phi) \\
  y &= r\, \sin(\theta) \, \sin(\phi) \\
  z &= r\, \cos(\phi)
-\end{align*} +\end{align*}" src="form_550.png"/>

      Definition at line 77 of file geometric_utilities.cc.

      /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGraphColoring.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGraphColoring.html 2024-01-30 03:04:49.516856955 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGraphColoring.html 2024-01-30 03:04:49.516856955 +0000 @@ -135,9 +135,9 @@

      Create a partitioning of the given range of iterators so that iterators that point to conflicting objects will be placed into different partitions, where the question whether two objects conflict is determined by a user-provided function.

      This function can also be considered as a graph coloring: each object pointed to by an iterator is considered to be a node and there is an edge between each two nodes that conflict. The graph coloring algorithm then assigns a color to each node in such a way that two nodes connected by an edge do not have the same color.

      A typical use case for this function is in assembling a matrix in parallel. There, one would like to assemble local contributions on different cells at the same time (an operation that is purely local and so requires no synchronization) but then we need to add these local contributions to the global matrix. In general, the contributions from different cells may be to the same matrix entries if the cells share degrees of freedom and, consequently, can not happen at the same time unless we want to risk a race condition (see http://en.wikipedia.org/wiki/Race_condition). Thus, we call these two cells in conflict, and we can only allow operations in parallel from cells that do not conflict. In other words, two cells are in conflict if the set of matrix entries (for example characterized by the rows) have a nonempty intersection.

      -

      In this generality, computing the graph of conflicts would require calling a function that determines whether two iterators (or the two objects they represent) conflict, and calling it for every pair of iterators, i.e., $\frac 12 N (N-1)$ times. This is too expensive in general. A better approach is to require a user-defined function that returns for every iterator it is called for a set of indicators of some kind that characterize a conflict; two iterators are in conflict if their conflict indicator sets have a nonempty intersection. In the example of assembling a matrix, the conflict indicator set would contain the indices of all degrees of freedom on the cell pointed to (in the case of continuous Galerkin methods) or the union of indices of degree of freedom on the current cell and all cells adjacent to the faces of the current cell (in the case of discontinuous Galerkin methods, because there one computes face integrals coupling the degrees of freedom connected by a common face – see step-12).

      +

      In this generality, computing the graph of conflicts would require calling a function that determines whether two iterators (or the two objects they represent) conflict, and calling it for every pair of iterators, i.e., $\frac 12 N (N-1)$ times. This is too expensive in general. A better approach is to require a user-defined function that returns for every iterator it is called for a set of indicators of some kind that characterize a conflict; two iterators are in conflict if their conflict indicator sets have a nonempty intersection. In the example of assembling a matrix, the conflict indicator set would contain the indices of all degrees of freedom on the cell pointed to (in the case of continuous Galerkin methods) or the union of indices of degree of freedom on the current cell and all cells adjacent to the faces of the current cell (in the case of discontinuous Galerkin methods, because there one computes face integrals coupling the degrees of freedom connected by a common face – see step-12).

      Note
      The conflict set returned by the user defined function passed as third argument needs to accurately describe all degrees of freedom for which anything is written into the matrix or right hand side. In other words, if the writing happens through a function like AffineConstraints::copy_local_to_global(), then the set of conflict indices must actually contain not only the degrees of freedom on the current cell, but also those they are linked to by constraints such as hanging nodes.
      -

      In other situations, the conflict indicator sets may represent something different altogether – it is up to the caller of this function to describe what it means for two iterators to conflict. Given this, computing conflict graph edges can be done significantly more cheaply than with ${\cal O}(N^2)$ operations.

      +

      In other situations, the conflict indicator sets may represent something different altogether – it is up to the caller of this function to describe what it means for two iterators to conflict. Given this, computing conflict graph edges can be done significantly more cheaply than with ${\cal O}(N^2)$ operations.

      In any case, the result of the function will be so that iterators whose conflict indicator sets have overlap will not be assigned to the same color.

      Note
      The algorithm used in this function is described in a paper by Turcksin, Kronbichler and Bangerth, see workstream_paper.
      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGridGenerator.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGridGenerator.html 2024-01-30 03:04:49.596857621 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGridGenerator.html 2024-01-30 03:04:49.600857655 +0000 @@ -295,7 +295,7 @@ const bool colorize = false&#href_anchor"memdoc"> -

      Initialize the given triangulation with a hypercube (line in 1d, square in 2d, etc) consisting of exactly one cell. The hypercube volume is the tensor product interval $[left,right]^{\text{dim}}$ in the present number of dimensions, where the limits are given as arguments. They default to zero and unity, then producing the unit hypercube.

      +

      Initialize the given triangulation with a hypercube (line in 1d, square in 2d, etc) consisting of exactly one cell. The hypercube volume is the tensor product interval $[left,right]^{\text{dim}}$ in the present number of dimensions, where the limits are given as arguments. They default to zero and unity, then producing the unit hypercube.

      If the argument colorize is false, then all boundary indicators are set to zero (the default boundary indicator) for 2d and 3d. If it is true, the boundary is colorized as in hyper_rectangle(). In 1d the indicators are always colorized, see hyper_rectangle().

      @@ -323,7 +323,7 @@ const std::vector< Point< dim > > & vertices&#href_anchor"memdoc"> -

      Create a $d$-simplex (i.e., a triangle in 2d, or a tetrahedron in 3d) with $d+1$ corners. Since deal.II does not support triangular and tetrahedral cells, the simplex described by the input arguments is subdivided into quadrilaterals and hexahedra by adding edge, face, and simplex midpoints, resulting in a mesh that consists of $d+1$ quadrilateral or hexahedral cells.

      +

      Create a $d$-simplex (i.e., a triangle in 2d, or a tetrahedron in 3d) with $d+1$ corners. Since deal.II does not support triangular and tetrahedral cells, the simplex described by the input arguments is subdivided into quadrilaterals and hexahedra by adding edge, face, and simplex midpoints, resulting in a mesh that consists of $d+1$ quadrilateral or hexahedral cells.

      The vertices argument contains a vector with all d+1 vertices defining the corners of the simplex. They must be given in an order such that the vectors from the first vertex to each of the others form a right-handed system.

      The meshes generated in two and three dimensions are:

      @@ -728,7 +728,7 @@ const bool colorize = false&#href_anchor"memdoc"> -

      Generate a grid consisting of a channel with a cylinder. This is a common benchmark for Navier-Stokes solvers. The geometry consists of a channel of size $[0, 2.2] \times [0, 0.41] \times [0, 0.41] $ (where the $z$ dimension is omitted in 2d) with a cylinder, parallel to the $z$ axis with diameter $0.1$, centered at $(0.2, 0.2, 0)$. The channel has three distinct regions:

        +

        Generate a grid consisting of a channel with a cylinder. This is a common benchmark for Navier-Stokes solvers. The geometry consists of a channel of size $[0, 2.2] \times [0, 0.41] \times [0, 0.41] $ (where the $z$ dimension is omitted in 2d) with a cylinder, parallel to the $z$ axis with diameter $0.1$, centered at $(0.2, 0.2, 0)$. The channel has three distinct regions:

        1. If n_shells is greater than zero, then there are that many shells centered around the cylinder,
        2. @@ -752,10 +752,10 @@
          Parameters
          - + - +
          triaTriangulation to be created. Must be empty upon calling this function.
          shell_region_widthWidth of the layer of shells around the cylinder. This value should be between $0$ and $0.05$; the default value is $0.03$.
          shell_region_widthWidth of the layer of shells around the cylinder. This value should be between $0$ and $0.05$; the default value is $0.03$.
          n_shellsNumber of shells to use in the shell layer.
          skewnessParameter controlling how close the shells are to the cylinder: see the mathematical definition given in GridGenerator::concentric_hyper_shells.
          colorizeIf true, then assign different boundary ids to different parts of the boundary. For more information on boundary indicators see this glossary entry. The left boundary (at $x = 0$) is assigned an id of $0$, the right boundary (at $x = 2.2$) is assigned an id of $1$; the boundary of the obstacle in the middle (i.e., the circle in 2d or the cylinder walls in 3d) is assigned an id of $2$, and the channel walls are assigned an id of $3$.
          colorizeIf true, then assign different boundary ids to different parts of the boundary. For more information on boundary indicators see this glossary entry. The left boundary (at $x = 0$) is assigned an id of $0$, the right boundary (at $x = 2.2$) is assigned an id of $1$; the boundary of the obstacle in the middle (i.e., the circle in 2d or the cylinder walls in 3d) is assigned an id of $2$, and the channel walls are assigned an id of $3$.
          @@ -1307,7 +1307,7 @@ const double half_length = 1.&#href_anchor"memdoc"> -

          Create a dim dimensional cylinder where the $x$-axis serves as the axis of the cylinder. For the purposes of this function, a cylinder is defined as a (dim - 1) dimensional disk of given radius, extruded along the axis of the cylinder (which is the first coordinate direction). Consequently, in three dimensions, the cylinder extends from x=-half_length to x=+half_length and its projection into the yz-plane is a circle of radius radius. In two dimensions, the cylinder is a rectangle from x=-half_length to x=+half_length and from y=-radius to y=radius.

          +

          Create a dim dimensional cylinder where the $x$-axis serves as the axis of the cylinder. For the purposes of this function, a cylinder is defined as a (dim - 1) dimensional disk of given radius, extruded along the axis of the cylinder (which is the first coordinate direction). Consequently, in three dimensions, the cylinder extends from x=-half_length to x=+half_length and its projection into the yz-plane is a circle of radius radius. In two dimensions, the cylinder is a rectangle from x=-half_length to x=+half_length and from y=-radius to y=radius.

          The boundaries are colored according to the following scheme: 0 for the hull of the cylinder, 1 for the left hand face and 2 for the right hand face (see the glossary entry on colorization).

          The manifold id for the hull of the cylinder is set to zero, and a CylindricalManifold is attached to it.

          Precondition
          The triangulation passed as argument needs to be empty when calling this function.
          @@ -1341,7 +1341,7 @@ const double half_length = 1.&#href_anchor"memdoc"> -

          Create a dim dimensional cylinder where the $x$-axis serves as the axis of the cylinder. For the purposes of this function, a cylinder is defined as a (dim - 1) dimensional disk of given radius, extruded along the axis of the cylinder (which is the first coordinate direction). Consequently, in three dimensions, the cylinder extends from x=-half_length to x=+half_length and its projection into the yz-plane is a circle of radius radius. In two dimensions, the cylinder is a rectangle from x=-half_length to x=+half_length and from y=-radius to y=radius. This function is only implemented for dim==3.

          +

          Create a dim dimensional cylinder where the $x$-axis serves as the axis of the cylinder. For the purposes of this function, a cylinder is defined as a (dim - 1) dimensional disk of given radius, extruded along the axis of the cylinder (which is the first coordinate direction). Consequently, in three dimensions, the cylinder extends from x=-half_length to x=+half_length and its projection into the yz-plane is a circle of radius radius. In two dimensions, the cylinder is a rectangle from x=-half_length to x=+half_length and from y=-radius to y=radius. This function is only implemented for dim==3.

          The boundaries are colored according to the following scheme: 0 for the hull of the cylinder, 1 for the left hand face and 2 for the right hand face (see the glossary entry on colorization).

          The manifold id for the hull of the cylinder is set to zero, and a CylindricalManifold is attached to it.

          @@ -1539,7 +1539,7 @@
          Parameters
          - +
          triaA Triangulation object which has to be empty.
          sizesA vector of integers of dimension GeometryInfo<dim>::faces_per_cell with the following meaning: the legs of the cross are stacked on the faces of the center cell, in the usual order of deal.II cells, namely first $-x$, then $x$, then $-y$ and so on. The corresponding entries in sizes name the number of cells stacked on this face. All numbers may be zero, thus L- and T-shaped domains are specializations of this domain.
          sizesA vector of integers of dimension GeometryInfo<dim>::faces_per_cell with the following meaning: the legs of the cross are stacked on the faces of the center cell, in the usual order of deal.II cells, namely first $-x$, then $x$, then $-y$ and so on. The corresponding entries in sizes name the number of cells stacked on this face. All numbers may be zero, thus L- and T-shaped domains are specializations of this domain.
          colorize_cellsIf colorization is enabled, then the material id of a cells corresponds to the leg it is in. The id of the center cell is zero, and then the legs are numbered starting at one (see the glossary entry on colorization).
          @@ -1726,7 +1726,7 @@
        3. 96 for the rhombic dodecahedron refined once. This choice dates from an older version of deal.II before the Manifold classes were implemented: today this choce is equivalent to the rhombic dodecahedron after performing one global refinement.
        4. -Numbers of the kind $192\times 2^m$ with $m\geq 0$ integer. This choice is similar to the 24 and 48 cell cases, but provides additional refinements in azimuthal direction combined with a single layer in radial direction. The base mesh is either the 6 or 12 cell version, depending on whether $m$ in the power is odd or even, respectively.
        5. +Numbers of the kind $192\times 2^m$ with $m\geq 0$ integer. This choice is similar to the 24 and 48 cell cases, but provides additional refinements in azimuthal direction combined with a single layer in radial direction. The base mesh is either the 6 or 12 cell version, depending on whether $m$ in the power is odd or even, respectively.
        6. The versions with 24, 48, and $2^m 192$ cells are useful if the shell is thin and the radial lengths should be made more similar to the circumferential lengths.

          The 3d grids with 12 and 96 cells are plotted below:

          @@ -1878,7 +1878,7 @@ const bool colorize = false&#href_anchor"memdoc">

          Produce a domain that is the intersection between a hyper-shell with given inner and outer radius, i.e. the space between two circles in two space dimensions and the region between two spheres in 3d, and the positive quadrant (in 2d) or octant (in 3d). In 2d, this is indeed a quarter of the full annulus, while the function is a misnomer in 3d because there the domain is not a quarter but one eighth of the full shell.

          If the number of initial cells is zero (as is the default), then it is computed adaptively such that the resulting elements have the least aspect ratio in 2d.

          -

          If colorize is set to true, the inner, outer, left, and right boundary get indicator 0, 1, 2, and 3 in 2d, respectively. Otherwise all indicators are set to 0. In 3d indicator 2 is at the face $x=0$, 3 at $y=0$, 4 at $z=0$ (see the glossary entry on colorization).

          +

          If colorize is set to true, the inner, outer, left, and right boundary get indicator 0, 1, 2, and 3 in 2d, respectively. Otherwise all indicators are set to 0. In 3d indicator 2 is at the face $x=0$, 3 at $y=0$, 4 at $z=0$ (see the glossary entry on colorization).

          All manifold ids are set to zero, and a SphericalManifold is attached to the triangulation.

          Precondition
          The triangulation passed as argument needs to be empty when calling this function.
          @@ -1968,7 +1968,7 @@
      -

      Produce the volume or surface mesh of a torus. The axis of the torus is the $y$-axis while the plane of the torus is the $x$- $z$ plane.

      +

      Produce the volume or surface mesh of a torus. The axis of the torus is the $y$-axis while the plane of the torus is the $x$- $z$ plane.

      If dim is 3, the mesh will be the volume of the torus, using a mesh equivalent to the circle in the poloidal coordinates with 5 cells on the cross section. This function attaches a TorusManifold to all boundary faces which are marked with a manifold id of 1, a CylindricalManifold to the interior cells and all their faces which are marked with a manifold id of 2 (representing a flat state within the poloidal coordinates), and a TransfiniteInterpolationManifold to the cells between the TorusManifold on the surface and the ToroidalManifold in the center, with cells marked with manifold id 0.

      An example for the case if dim is 3 with a cut through the domain at $z=0$, 6 toroidal cells, $R=2$ and $r=0.5$ without any global refinement is given here:

      @@ -2103,7 +2103,7 @@ {\tanh(\mathrm{skewness})}\right) \]" src="form_1359.png"/>

      -

      where skewness is a parameter controlling the shell spacing in the radial direction: values of skewness close to zero correspond to even spacing, while larger values of skewness (such as $2$ or $3$) correspond to shells biased to the inner radius.

      +

      where skewness is a parameter controlling the shell spacing in the radial direction: values of skewness close to zero correspond to even spacing, while larger values of skewness (such as $2$ or $3$) correspond to shells biased to the inner radius.

      n_cells_per_shell is the same as in GridGenerator::hyper_shell: in 2d the default choice of zero will result in 8 cells per shell (and 12 in 3d). The only valid values in 3d are 6 (the default), 12, and 96 cells: see the documentation of GridGenerator::hyper_shell for more information.

      If colorize is true then the outer boundary of the merged shells has a boundary id of $1$ and the inner boundary has a boundary id of $0$.

      Example: The following code (see, e.g., step-10 for instructions on how to visualize GNUPLOT output)

      @@ -2611,9 +2611,9 @@
      -

      Extrude the Triangulation input in the $z$ direction from $z = 0$ to $z =
-\text{height}$ and store it in result. This is done by replicating the input triangulation n_slices times in $z$ direction, and then forming (n_slices-1) layers of cells out of these replicates.

      -

      The boundary indicators of the faces of input will be assigned to the corresponding side walls in $z$ direction. The bottom and top get the next two free boundary indicators: i.e., if input has boundary ids of $0$, $1$, and $42$, then the $z = 0$ boundary id of result will be $43$ and the $z = \text{height}$ boundary id will be $44$.

      +

      Extrude the Triangulation input in the $z$ direction from $z = 0$ to $z =
+\text{height}$ and store it in result. This is done by replicating the input triangulation n_slices times in $z$ direction, and then forming (n_slices-1) layers of cells out of these replicates.

      +

      The boundary indicators of the faces of input will be assigned to the corresponding side walls in $z$ direction. The bottom and top get the next two free boundary indicators: i.e., if input has boundary ids of $0$, $1$, and $42$, then the $z = 0$ boundary id of result will be $43$ and the $z = \text{height}$ boundary id will be $44$.

      This function does not, by default, copy manifold ids. The reason for this is that there is no way to set the manifold ids on the lines of the resulting Triangulation without more information: for example, if two faces of input with different manifold ids meet at a shared vertex then there is no a priori reason to pick one manifold id or another for the lines created in result that are parallel to the $z$-axis and pass through that point. If copy_manifold_ids is true then this function sets line manifold ids by picking the one that appears first in manifold_priorities. For example: if manifold_priorities is {0, 42, numbers::flat_manifold_id} and the line under consideration is adjacent to faces with manifold ids of 0 and 42, then that line will have a manifold id of 0. The correct ordering is almost always

      1. manifold ids set on the boundary,
      2. @@ -2908,7 +2908,7 @@ const bool colorize = false&#href_anchor"memdoc"> -

        Initialize the given triangulation with a hypercube (square in 2d and cube in 3d) consisting of repetitions cells in each direction. The hypercube volume is the tensor product interval $[left,right]^{\text{dim}}$ in the present number of dimensions, where the limits are given as arguments. They default to zero and unity, then producing the unit hypercube.

        +

        Initialize the given triangulation with a hypercube (square in 2d and cube in 3d) consisting of repetitions cells in each direction. The hypercube volume is the tensor product interval $[left,right]^{\text{dim}}$ in the present number of dimensions, where the limits are given as arguments. They default to zero and unity, then producing the unit hypercube.

        Note
        This function connects internally 4/8 vertices to quadrilateral/hexahedral cells and subdivides these into 2/5 triangular/tetrahedral cells.

        Also see Simplex support.

        /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGridRefinement.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGridRefinement.html 2024-01-30 03:04:49.624857854 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGridRefinement.html 2024-01-30 03:04:49.624857854 +0000 @@ -216,7 +216,7 @@

      -

      As an example, with no coarsening, setting top_fraction_of_cells to 1/3 will result in approximately doubling the number of cells in two dimensions. That is because each of these 1/3 of cells will be replaced by its four children, resulting in $4\times \frac 13 N$ cells, whereas the remaining 2/3 of cells remains untouched – thus yielding a total of $4\times \frac 13 N + \frac 23 N = 2N$ cells. The same effect in three dimensions is achieved by refining 1/7th of the cells. These values are therefore frequently used because they ensure that the cost of computations on subsequent meshes become expensive sufficiently quickly that the fraction of time spent on the coarse meshes is not too large. On the other hand, the fractions are small enough that mesh adaptation does not refine too many cells in each step.

      +

      As an example, with no coarsening, setting top_fraction_of_cells to 1/3 will result in approximately doubling the number of cells in two dimensions. That is because each of these 1/3 of cells will be replaced by its four children, resulting in $4\times \frac 13 N$ cells, whereas the remaining 2/3 of cells remains untouched – thus yielding a total of $4\times \frac 13 N + \frac 23 N = 2N$ cells. The same effect in three dimensions is achieved by refining 1/7th of the cells. These values are therefore frequently used because they ensure that the cost of computations on subsequent meshes become expensive sufficiently quickly that the fraction of time spent on the coarse meshes is not too large. On the other hand, the fractions are small enough that mesh adaptation does not refine too many cells in each step.

      Note
      This function only sets the coarsening and refinement flags. The mesh is not changed until you call Triangulation::execute_coarsening_and_refinement().
      Parameters
      @@ -276,14 +276,14 @@

      This function provides a strategy to mark cells for refinement and coarsening with the goal of controlling the reduction of the error estimate.

      Also known as the bulk criterion or Dörfler marking, this function computes the thresholds for refinement and coarsening such that the criteria of cells getting flagged for refinement make up for a certain fraction of the total error. We explain its operation for refinement, coarsening works analogously.

      Let cK be the criterion of cell K. Then the total error estimate is computed by the formula

      -\[
+<picture><source srcset=\[
 E = \sum_{K\in \cal T} c_K.
-\] +\]" src="form_1368.png"/>

      -

      If 0 < a < 1 is top_fraction, then we refine the smallest subset $\cal M$ of the Triangulation $\cal T$ such that

      -\[
+<p>If <em> 0 < a < 1</em> is <code>top_fraction</code>, then we refine the smallest subset <picture><source srcset=$\cal M$ of the Triangulation $\cal T$ such that

      +\[
 a E \le \sum_{K\in \cal M} c_K
-\] +\]" src="form_1371.png"/>

      The algorithm is performed by the greedy algorithm described in refine_and_coarsen_fixed_number().

      Note
      The often used formula with squares on the left and right is recovered by actually storing the square of cK in the vector criteria.
      @@ -326,32 +326,32 @@
      const unsigned int order = 2&#href_anchor"memdoc"> -

      This function flags cells of a triangulation for refinement with the aim to reach a grid that is optimal with respect to an objective function that tries to balance reducing the error and increasing the numerical cost when the mesh is refined. Specifically, this function makes the assumption that if you refine a cell $K$ with error indicator $\eta_K$ provided by the second argument to this function, then the error on the children (for all children together) will only be $2^{-\text{order}}\eta_K$ where order is the third argument of this function. This makes the assumption that the error is only a local property on a mesh and can be reduced by local refinement – an assumption that is true for the interpolation operator, but not for the usual Galerkin projection, although it is approximately true for elliptic problems where the Greens function decays quickly and the error here is not too much affected by a too coarse mesh somewhere else.

      -

      With this, we can define the objective function this function tries to optimize. Let us assume that the mesh currently has $N_0$ cells. Then, if we refine the $m$ cells with the largest errors, we expect to get (in $d$ space dimensions)

      -\[
+<p>This function flags cells of a triangulation for refinement with the aim to reach a grid that is optimal with respect to an objective function that tries to balance reducing the error and increasing the numerical cost when the mesh is refined. Specifically, this function makes the assumption that if you refine a cell <picture><source srcset=$K$ with error indicator $\eta_K$ provided by the second argument to this function, then the error on the children (for all children together) will only be $2^{-\text{order}}\eta_K$ where order is the third argument of this function. This makes the assumption that the error is only a local property on a mesh and can be reduced by local refinement – an assumption that is true for the interpolation operator, but not for the usual Galerkin projection, although it is approximately true for elliptic problems where the Greens function decays quickly and the error here is not too much affected by a too coarse mesh somewhere else.

      +

      With this, we can define the objective function this function tries to optimize. Let us assume that the mesh currently has $N_0$ cells. Then, if we refine the $m$ cells with the largest errors, we expect to get (in $d$ space dimensions)

      +\[
   N(m) = (N_0-m) + 2^d m = N_0 + (2^d-1)m
-\] +\]" src="form_1375.png"/>

      -

      cells ( $N_0-m$ are not refined, and each of the $m$ cells we refine yield $2^d$ child cells. On the other hand, with refining $m$ cells, and using the assumptions above, we expect that the error will be

      -\[
+<p> cells ( <picture><source srcset=$N_0-m$ are not refined, and each of the $m$ cells we refine yield $2^d$ child cells. On the other hand, with refining $m$ cells, and using the assumptions above, we expect that the error will be

      +\[
   \eta^\text{exp}(m)
   =
   \sum_{K, K\; \text{will not be refined}} \eta_K
   +
   \sum_{K, K\; \text{will be refined}} 2^{-\text{order}}\eta_K
-\] +\]" src="form_1378.png"/>

      -

      where the first sum extends over $N_0-m$ cells and the second over the $m$ cells that will be refined. Note that $N(m)$ is an increasing function of $m$ whereas $\eta^\text{exp}(m)$ is a decreasing function.

      -

      This function then tries to find that number $m$ of cells to mark for refinement for which the objective function

      -\[
+<p> where the first sum extends over <picture><source srcset=$N_0-m$ cells and the second over the $m$ cells that will be refined. Note that $N(m)$ is an increasing function of $m$ whereas $\eta^\text{exp}(m)$ is a decreasing function.

      +

      This function then tries to find that number $m$ of cells to mark for refinement for which the objective function

      +\[
   J(m) = N(m)^{\text{order}/d} \eta^\text{exp}(m)
-\] +\]" src="form_1381.png"/>

      is minimal.

      The rationale for this function is two-fold. First, compared to the refine_and_coarsen_fixed_fraction() and refine_and_coarsen_fixed_number() functions, this function has the property that if all refinement indicators are the same (i.e., we have achieved a mesh where the error per cell is equilibrated), then the entire mesh is refined. This is based on the observation that a mesh with equilibrated error indicators is the optimal mesh (i.e., has the least overall error) among all meshes with the same number of cells. (For proofs of this, see R. Becker, M. Braack, R. Rannacher: "Numerical simulation of laminar flames at low Mach number with adaptive finite elements", Combustion Theory and Modelling, Vol. 3, Nr. 3, p. 503-534 1999; and W. Bangerth, R. Rannacher: "Adaptive Finite Element Methods for Differential Equations", Birkhauser, 2003.)

      -

      Second, the function uses the observation that ideally, the error behaves like $e \approx c N^{-\alpha}$ with some constant $\alpha$ that depends on the dimension and the finite element degree. It should - given optimal mesh refinement - not depend so much on the regularity of the solution, as it is based on the idea, that all singularities can be resolved by refinement. Mesh refinement is then based on the idea that we want to make $c=e N^\alpha$ small. This corresponds to the functional $J(m)$ above.

      +

      Second, the function uses the observation that ideally, the error behaves like $e \approx c N^{-\alpha}$ with some constant $\alpha$ that depends on the dimension and the finite element degree. It should - given optimal mesh refinement - not depend so much on the regularity of the solution, as it is based on the idea, that all singularities can be resolved by refinement. Mesh refinement is then based on the idea that we want to make $c=e N^\alpha$ small. This corresponds to the functional $J(m)$ above.

      Note
      This function was originally implemented by Thomas Richter. It follows a strategy described in [Richter2005]. See in particular Section 4.3, pp. 42-43.

      Definition at line 448 of file grid_refinement.cc.

      /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGridTools.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGridTools.html 2024-01-30 03:04:49.740858821 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGridTools.html 2024-01-30 03:04:49.740858821 +0000 @@ -510,8 +510,8 @@
      -

      Compute the volume (i.e. the dim-dimensional measure) of the triangulation. We compute the measure using the integral $\sum_K \int_K 1
-\; dx$ where $K$ are the cells of the given triangulation. The integral is approximated via quadrature. This version of the function uses a linear mapping to compute the JxW values on each cell.

      +

      Compute the volume (i.e. the dim-dimensional measure) of the triangulation. We compute the measure using the integral $\sum_K \int_K 1
+\; dx$ where $K$ are the cells of the given triangulation. The integral is approximated via quadrature. This version of the function uses a linear mapping to compute the JxW values on each cell.

      If the triangulation is a dim-dimensional one embedded in a higher dimensional space of dimension spacedim, then the value returned is the dim-dimensional measure. For example, for a two-dimensional triangulation in three-dimensional space, the value returned is the area of the surface so described. (This obviously makes sense since the spacedim-dimensional measure of a dim-dimensional triangulation would always be zero if dim < spacedim).

      This function also works for objects of type parallel::distributed::Triangulation, in which case the function is a collective operation.

      Parameters
      @@ -543,8 +543,8 @@ const Mapping< dim, spacedim > & mapping&#href_anchor"memdoc"> -

      Compute the volume (i.e. the dim-dimensional measure) of the triangulation. We compute the measure using the integral $\sum_K \int_K 1
-\; dx$ where $K$ are the cells of the given triangulation. The integral is approximated via quadrature for which we use the mapping argument.

      +

      Compute the volume (i.e. the dim-dimensional measure) of the triangulation. We compute the measure using the integral $\sum_K \int_K 1
+\; dx$ where $K$ are the cells of the given triangulation. The integral is approximated via quadrature for which we use the mapping argument.

      If the triangulation is a dim-dimensional one embedded in a higher dimensional space of dimension spacedim, then the value returned is the dim-dimensional measure. For example, for a two-dimensional triangulation in three-dimensional space, the value returned is the area of the surface so described. (This obviously makes sense since the spacedim-dimensional measure of a dim-dimensional triangulation would always be zero if dim < spacedim.

      This function also works for objects of type parallel::distributed::Triangulation, in which case the function is a collective operation.

      Parameters
      @@ -675,8 +675,8 @@
      -

      This function computes an affine approximation of the map from the unit coordinates to the real coordinates of the form $p_\text{real} = A
-p_\text{unit} + b $ by a least squares fit of this affine function to the $2^\text{dim}$ vertices representing a quadrilateral or hexahedral cell in spacedim dimensions. The result is returned as a pair with the matrix A as the first argument and the vector b describing distance of the plane to the origin.

      +

      This function computes an affine approximation of the map from the unit coordinates to the real coordinates of the form $p_\text{real} = A
+p_\text{unit} + b $ by a least squares fit of this affine function to the $2^\text{dim}$ vertices representing a quadrilateral or hexahedral cell in spacedim dimensions. The result is returned as a pair with the matrix A as the first argument and the vector b describing distance of the plane to the origin.

      For any valid mesh cell whose geometry is not degenerate, this operation results in a unique affine mapping, even in cases where the actual transformation by a bi-/trilinear or higher order mapping might be singular. The result is exact in case the transformation from the unit to the real cell is indeed affine, such as in one dimension or for Cartesian and affine (parallelogram) meshes in 2d/3d.

      This approximation is underlying the function TriaAccessor::real_to_unit_cell_affine_approximation() function.

      For exact transformations to the unit cell, use Mapping::transform_real_to_unit_cell().

      @@ -707,7 +707,7 @@ const Quadrature< dim > & quadrature&#href_anchor"memdoc"> -

      Computes an aspect ratio measure for all locally-owned active cells and fills a vector with one entry per cell, given a triangulation and mapping. The size of the vector that is returned equals the number of active cells. The vector contains zero for non locally-owned cells. The aspect ratio of a cell is defined as the ratio of the maximum to minimum singular value of the Jacobian, taking the maximum over all quadrature points of a quadrature rule specified via quadrature. For example, for the special case of rectangular elements in 2d with dimensions $a$ and $b$ ( $a \geq b$), this function returns the usual aspect ratio definition $a/b$. The above definition using singular values is a generalization to arbitrarily deformed elements. This function is intended to be used for $d=2,3$ space dimensions, but it can also be used for $d=1$ returning a value of 1.

      +

      Computes an aspect ratio measure for all locally-owned active cells and fills a vector with one entry per cell, given a triangulation and mapping. The size of the vector that is returned equals the number of active cells. The vector contains zero for non locally-owned cells. The aspect ratio of a cell is defined as the ratio of the maximum to minimum singular value of the Jacobian, taking the maximum over all quadrature points of a quadrature rule specified via quadrature. For example, for the special case of rectangular elements in 2d with dimensions $a$ and $b$ ( $a \geq b$), this function returns the usual aspect ratio definition $a/b$. The above definition using singular values is a generalization to arbitrarily deformed elements. This function is intended to be used for $d=2,3$ space dimensions, but it can also be used for $d=1$ returning a value of 1.

      Note
      Inverted elements do not throw an exception. Instead, a value of inf is written into the vector in case of inverted elements.
      Make sure to use enough quadrature points for a precise calculation of the aspect ratio in case of deformed elements.
      @@ -881,7 +881,7 @@ const double tol = 1e-12&#href_anchor"memdoc">

      Remove vertices that are duplicated, due to the input of a structured grid, for example. If these vertices are not removed, the faces bounded by these vertices become part of the boundary, even if they are in the interior of the mesh.

      -

      This function is called by some GridIn::read_* functions. Only the vertices with indices in considered_vertices are tested for equality. This speeds up the algorithm, which is, for worst-case hyper cube geometries $O(N^{3/2})$ in 2d and $O(N^{5/3})$ in 3d: quite slow. However, if you wish to consider all vertices, simply pass an empty vector. In that case, the function fills considered_vertices with all vertices.

      +

      This function is called by some GridIn::read_* functions. Only the vertices with indices in considered_vertices are tested for equality. This speeds up the algorithm, which is, for worst-case hyper cube geometries $O(N^{3/2})$ in 2d and $O(N^{5/3})$ in 3d: quite slow. However, if you wish to consider all vertices, simply pass an empty vector. In that case, the function fills considered_vertices with all vertices.

      Two vertices are considered equal if their difference in each coordinate direction is less than tol. This implies that nothing happens if the tolerance is set to zero.

      Definition at line 761 of file grid_tools.cc.

      @@ -1016,7 +1016,7 @@ Triangulation< dim, spacedim > & triangulation&#href_anchor"memdoc">

      Transform the vertices of the given triangulation by applying the function object provided as first argument to all its vertices.

      -

      The transformation given as argument is used to transform each vertex. Its respective type has to offer a function-like syntax, i.e. the predicate is either an object of a type that has an operator(), or it is a pointer to a non-member function, or it is a lambda function object. In either case, argument and return value have to be of type Point<spacedim>. An example – a simple transformation that moves the object two units to the right in the $x_1$ direction – could look like as follows:

      +

      The transformation given as argument is used to transform each vertex. Its respective type has to offer a function-like syntax, i.e. the predicate is either an object of a type that has an operator(), or it is a pointer to a non-member function, or it is a lambda function object. In either case, argument and return value have to be of type Point<spacedim>. An example – a simple transformation that moves the object two units to the right in the $x_1$ direction – could look like as follows:

      ... // fill triangulation with something
      {
      @@ -1197,13 +1197,13 @@ const bool solve_for_absolute_positions = false&#href_anchor"memdoc">

      Transform the given triangulation smoothly to a different domain where, typically, each of the vertices at the boundary of the triangulation is mapped to the corresponding points in the new_points map.

      -

      The unknown displacement field $u_d(\mathbf x)$ in direction $d$ is obtained from the minimization problem

      -\[ \min\, \int \frac{1}{2}
+<p>The unknown displacement field <picture><source srcset=$u_d(\mathbf x)$ in direction $d$ is obtained from the minimization problem

      +\[ \min\, \int \frac{1}{2}
   c(\mathbf x)
   \mathbf \nabla u_d(\mathbf x) \cdot
   \mathbf \nabla u_d(\mathbf x)
   \,\rm d x
-\] +\]" src="form_1395.png"/>

      subject to prescribed constraints. The minimizer is obtained by solving the Laplace equation of the dim components of a displacement field that maps the current domain into one described by new_points . Linear finite elements with four Gaussian quadrature points in each direction are used. The difference between the vertex positions specified in new_points and their current value in tria therefore represents the prescribed values of this displacement field at the boundary of the domain, or more precisely at all of those locations for which new_points provides values (which may be at part of the boundary, or even in the interior of the domain). The function then evaluates this displacement field at each unconstrained vertex and uses it to place the mapped vertex where the displacement field locates it. Because the solution of the Laplace equation is smooth, this guarantees a smooth mapping from the old domain to the new one.

      Parameters
      @@ -2984,7 +2984,7 @@

      This function does the same as the previous one, i.e. it partitions a triangulation using a partitioning algorithm into a number of subdomains identified by the cell->subdomain_id() flag.

      The difference to the previous function is the second argument, a sparsity pattern that represents the connectivity pattern between cells.

      -

      While the function above builds it directly from the triangulation by considering which cells neighbor each other, this function can take a more refined connectivity graph. The sparsity pattern needs to be of size $N\times N$, where $N$ is the number of active cells in the triangulation. If the sparsity pattern contains an entry at position $(i,j)$, then this means that cells $i$ and $j$ (in the order in which they are traversed by active cell iterators) are to be considered connected; partitioning algorithm will then try to partition the domain in such a way that (i) the subdomains are of roughly equal size, and (ii) a minimal number of connections are broken.

      +

      While the function above builds it directly from the triangulation by considering which cells neighbor each other, this function can take a more refined connectivity graph. The sparsity pattern needs to be of size $N\times N$, where $N$ is the number of active cells in the triangulation. If the sparsity pattern contains an entry at position $(i,j)$, then this means that cells $i$ and $j$ (in the order in which they are traversed by active cell iterators) are to be considered connected; partitioning algorithm will then try to partition the domain in such a way that (i) the subdomains are of roughly equal size, and (ii) a minimal number of connections are broken.

      This function is mainly useful in cases where connections between cells exist that are not present in the triangulation alone (otherwise the previous function would be the simpler one to use). Such connections may include that certain parts of the boundary of a domain are coupled through symmetric boundary conditions or integrals (e.g. friction contact between the two sides of a crack in the domain), or if a numerical scheme is used that not only connects immediate neighbors but a larger neighborhood of cells (e.g. when solving integral equations).

      In addition, this function may be useful in cases where the default sparsity pattern is not entirely sufficient. This can happen because the default is to just consider face neighbors, not neighboring cells that are connected by edges or vertices. While the latter couple when using continuous finite elements, they are typically still closely connected in the neighborship graph, and partitioning algorithm will not usually cut important connections in this case. However, if there are vertices in the mesh where many cells (many more than the common 4 or 6 in 2d and 3d, respectively) come together, then there will be a significant number of cells that are connected across a vertex, but several degrees removed in the connectivity graph built only using face neighbors. In a case like this, partitioning algorithm may sometimes make bad decisions and you may want to build your own connectivity graph.

      Note
      If the weight signal has been attached to the triangulation, then this will be used and passed to the partitioner.
      @@ -3511,7 +3511,7 @@

      An orthogonal equality test for faces.

      face1 and face2 are considered equal, if a one to one matching between its vertices can be achieved via an orthogonal equality relation.

      -

      Here, two vertices v_1 and v_2 are considered equal, if $M\cdot v_1 + offset - v_2$ is parallel to the unit vector in unit direction direction. If the parameter matrix is a reference to a spacedim x spacedim matrix, $M$ is set to matrix, otherwise $M$ is the identity matrix.

      +

      Here, two vertices v_1 and v_2 are considered equal, if $M\cdot v_1 + offset - v_2$ is parallel to the unit vector in unit direction direction. If the parameter matrix is a reference to a spacedim x spacedim matrix, $M$ is set to matrix, otherwise $M$ is the identity matrix.

      If the matching was successful, the relative orientation of face1 with respect to face2 is returned in the bitset orientation, where

      orientation[0] -> face_orientation
      orientation[1] -> face_flip
      orientation[2] -> face_rotation
      @@ -3626,8 +3626,8 @@

      This function tries to match all faces belonging to the first boundary with faces belonging to the second boundary with the help of orthogonal_equality().

      The bitset that is returned inside of PeriodicFacePair encodes the relative orientation of the first face with respect to the second face, see the documentation of orthogonal_equality() for further details.

      The direction refers to the space direction in which periodicity is enforced. When matching periodic faces this vector component is ignored.

      -

      The offset is a vector tangential to the faces that is added to the location of vertices of the 'first' boundary when attempting to match them to the corresponding vertices of the 'second' boundary. This can be used to implement conditions such as $u(0,y)=u(1,y+1)$.

      -

      Optionally, a $dim\times dim$ rotation matrix can be specified that describes how vector valued DoFs of the first face should be modified prior to constraining to the DoFs of the second face. The matrix is used in two places. First, matrix will be supplied to orthogonal_equality() and used for matching faces: Two vertices $v_1$ and $v_2$ match if $\text{matrix}\cdot v_1 + \text{offset} - v_2$ is parallel to the unit vector in unit direction direction. (For more details see DoFTools::make_periodicity_constraints(), the glossary glossary entry on periodic conditions and step-45). Second, matrix will be stored in the PeriodicFacePair collection matched_pairs for further use.

      +

      The offset is a vector tangential to the faces that is added to the location of vertices of the 'first' boundary when attempting to match them to the corresponding vertices of the 'second' boundary. This can be used to implement conditions such as $u(0,y)=u(1,y+1)$.

      +

      Optionally, a $dim\times dim$ rotation matrix can be specified that describes how vector valued DoFs of the first face should be modified prior to constraining to the DoFs of the second face. The matrix is used in two places. First, matrix will be supplied to orthogonal_equality() and used for matching faces: Two vertices $v_1$ and $v_2$ match if $\text{matrix}\cdot v_1 + \text{offset} - v_2$ is parallel to the unit vector in unit direction direction. (For more details see DoFTools::make_periodicity_constraints(), the glossary glossary entry on periodic conditions and step-45). Second, matrix will be stored in the PeriodicFacePair collection matched_pairs for further use.

      Template Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators.html 2024-01-30 03:04:49.764859021 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators.html 2024-01-30 03:04:49.764859021 +0000 @@ -127,9 +127,9 @@

      The namespace L2 contains functions for mass matrices and L2-inner products.

      Notational conventions

      In most cases, the action of a function in this namespace can be described by a single integral. We distinguish between integrals over cells Z and over faces F. If an integral is denoted as

      -\[
+<picture><source srcset=\[
   \int_Z u \otimes v \,dx,
-\] +\]" src="form_1564.png"/>

      it will yield the following results, depending on the type of operation

      • @@ -139,7 +139,7 @@
      • If the function returns a number, then this number is the integral of the two given functions u and v.
      -

      We will use regular cursive symbols $u$ for scalars and bold symbols $\mathbf u$ for vectors. Test functions are always v and trial functions are always u. Parameters are Greek and the face normal vectors are $\mathbf n = \mathbf n_1 = -\mathbf n_2$.

      +

      We will use regular cursive symbols $u$ for scalars and bold symbols $\mathbf u$ for vectors. Test functions are always v and trial functions are always u. Parameters are Greek and the face normal vectors are $\mathbf n = \mathbf n_1 = -\mathbf n_2$.

      Signature of functions

      Functions in this namespace follow a generic signature. In the simplest case, you have two related functions

      template <int dim>
      void
      /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Advection.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Advection.html 2024-01-30 03:04:49.792859254 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Advection.html 2024-01-30 03:04:49.792859254 +0000 @@ -162,8 +162,8 @@
      MeshTypeA type that satisfies the requirements of the MeshType concept.
      const double factor = 1.&#href_anchor"memdoc">

      Advection along the direction w in weak form with derivative on the test function

      -\[ m_{ij} = \int_Z u_j\,(\mathbf w \cdot \nabla) v_i
-\, dx. \] +\[ m_{ij} = \int_Z u_j\,(\mathbf w \cdot \nabla) v_i
+\, dx. \]

      The FiniteElement in fe may be scalar or vector valued. In the latter case, the advection operator is applied to each component separately.

      Parameters
      @@ -222,7 +222,7 @@

      Scalar advection residual operator in strong form

      -\[ r_i = \int_Z  (\mathbf w \cdot \nabla)u\, v_i \, dx. \] +\[ r_i = \int_Z  (\mathbf w \cdot \nabla)u\, v_i \, dx. \]

      Warning
      This is not the residual consistent with cell_matrix(), but with its transpose.
      @@ -271,8 +271,8 @@

      Vector-valued advection residual operator in strong form

      -\[ r_i = \int_Z \bigl((\mathbf w \cdot \nabla) \mathbf u\bigr)
-\cdot\mathbf v_i \, dx. \] +\[ r_i = \int_Z \bigl((\mathbf w \cdot \nabla) \mathbf u\bigr)
+\cdot\mathbf v_i \, dx. \]

      Warning
      This is not the residual consistent with cell_matrix(), but with its transpose.
      @@ -321,7 +321,7 @@

      Scalar advection residual operator in weak form

      -\[ r_i = \int_Z  (\mathbf w \cdot \nabla)v\, u_i \, dx. \] +\[ r_i = \int_Z  (\mathbf w \cdot \nabla)v\, u_i \, dx. \]

      Definition at line 216 of file advection.h.

      @@ -369,8 +369,8 @@

      Vector-valued advection residual operator in weak form

      -\[ r_i = \int_Z \bigl((\mathbf w \cdot \nabla) \mathbf v\bigr)
-\cdot\mathbf u_i \, dx. \] +\[ r_i = \int_Z \bigl((\mathbf w \cdot \nabla) \mathbf v\bigr)
+\cdot\mathbf u_i \, dx. \]

      Definition at line 256 of file advection.h.

      @@ -410,11 +410,11 @@
      double factor = 1.&#href_anchor"memdoc">

      Upwind flux at the boundary for weak advection operator. This is the value of the trial function at the outflow boundary and zero else:

      -\[
+<picture><source srcset=\[
 a_{ij} = \int_{\partial\Omega}
 [\mathbf w\cdot\mathbf n]_+
 u_i v_j \, ds
-\] +\]" src="form_1518.png"/>

      The velocity is provided as an ArrayView, having dim vectors, one for each velocity component. Each of the vectors must either have only a single entry, if the advection velocity is constant, or have an entry for each quadrature point.

      The finite element can have several components, in which case each component is advected by the same velocity.

      @@ -468,13 +468,13 @@

      Scalar case: Residual for upwind flux at the boundary for weak advection operator. This is the value of the trial function at the outflow boundary and the value of the incoming boundary condition on the inflow boundary:

      -\[
+<picture><source srcset=\[
 a_{ij} = \int_{\partial\Omega}
 (\mathbf w\cdot\mathbf n)
 \widehat u v_j \, ds
-\] +\]" src="form_1519.png"/>

      -

      Here, the numerical flux $\widehat u$ is the upwind value at the face, namely the finite element function whose values are given in the argument input on the outflow boundary. On the inflow boundary, it is the inhomogeneous boundary value in the argument data.

      +

      Here, the numerical flux $\widehat u$ is the upwind value at the face, namely the finite element function whose values are given in the argument input on the outflow boundary. On the inflow boundary, it is the inhomogeneous boundary value in the argument data.

      The velocity is provided as an ArrayView, having dim vectors, one for each velocity component. Each of the vectors must either have only a single entry, if the advection velocity is constant, or have an entry for each quadrature point.

      The finite element can have several components, in which case each component is advected by the same velocity.

      @@ -527,13 +527,13 @@

      Vector-valued case: Residual for upwind flux at the boundary for weak advection operator. This is the value of the trial function at the outflow boundary and the value of the incoming boundary condition on the inflow boundary:

      -\[
+<picture><source srcset=\[
 a_{ij} = \int_{\partial\Omega}
 (\mathbf w\cdot\mathbf n)
 \widehat u v_j \, ds
-\] +\]" src="form_1519.png"/>

      -

      Here, the numerical flux $\widehat u$ is the upwind value at the face, namely the finite element function whose values are given in the argument input on the outflow boundary. On the inflow boundary, it is the inhomogeneous boundary value in the argument data.

      +

      Here, the numerical flux $\widehat u$ is the upwind value at the face, namely the finite element function whose values are given in the argument input on the outflow boundary. On the inflow boundary, it is the inhomogeneous boundary value in the argument data.

      The velocity is provided as an ArrayView, having dim vectors, one for each velocity component. Each of the vectors must either have only a single entry, if the advection velocity is constant, or have an entry for each quadrature point.

      The finite element can have several components, in which case each component is advected by the same velocity.

      @@ -599,13 +599,13 @@ const double factor = 1.&#href_anchor"memdoc">

      Upwind flux in the interior for weak advection operator. Matrix entries correspond to the upwind value of the trial function, multiplied by the jump of the test functions

      -\[
+<picture><source srcset=\[
 a_{ij} = \int_F \left|\mathbf w
 \cdot \mathbf n\right|
 u^\uparrow
 (v^\uparrow-v^\downarrow)
 \,ds
-\] +\]" src="form_1521.png"/>

      The velocity is provided as an ArrayView, having dim vectors, one for each velocity component. Each of the vectors must either have only a single entry, if the advection velocity is constant, or have an entry for each quadrature point.

      The finite element can have several components, in which case each component is advected the same way.

      @@ -662,13 +662,13 @@ const double factor = 1.&#href_anchor"memdoc">

      Scalar case: Upwind flux in the interior for weak advection operator. Matrix entries correspond to the upwind value of the trial function, multiplied by the jump of the test functions

      -\[
+<picture><source srcset=\[
 a_{ij} = \int_F \left|\mathbf w
 \cdot \mathbf n\right|
 u^\uparrow
 (v^\uparrow-v^\downarrow)
 \,ds
-\] +\]" src="form_1521.png"/>

      The velocity is provided as an ArrayView, having dim vectors, one for each velocity component. Each of the vectors must either have only a single entry, if the advection velocity is constant, or have an entry for each quadrature point.

      The finite element can have several components, in which case each component is advected the same way.

      @@ -725,13 +725,13 @@ const double factor = 1.&#href_anchor"memdoc">

      Vector-valued case: Upwind flux in the interior for weak advection operator. Matrix entries correspond to the upwind value of the trial function, multiplied by the jump of the test functions

      -\[
+<picture><source srcset=\[
 a_{ij} = \int_F \left|\mathbf w
 \cdot \mathbf n\right|
 u^\uparrow
 (v^\uparrow-v^\downarrow)
 \,ds
-\] +\]" src="form_1521.png"/>

      The velocity is provided as an ArrayView, having dim vectors, one for each velocity component. Each of the vectors must either have only a single entry, if the advection velocity is constant, or have an entry for each quadrature point.

      The finite element can have several components, in which case each component is advected the same way.

      /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Divergence.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Divergence.html 2024-01-30 03:04:49.816859454 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Divergence.html 2024-01-30 03:04:49.816859454 +0000 @@ -157,7 +157,7 @@ double factor = 1.&#href_anchor"memdoc">

      Cell matrix for divergence. The derivative is on the trial function.

      -\[ \int_Z v\nabla \cdot \mathbf u \,dx \] +\[ \int_Z v\nabla \cdot \mathbf u \,dx \]

      This is the strong divergence operator and the trial space should be at least Hdiv. The test functions may be discontinuous.

      @@ -193,8 +193,8 @@ const double factor = 1.&#href_anchor"memdoc">

      The residual of the divergence operator in strong form.

      -\[ \int_Z
-v\nabla \cdot \mathbf u \,dx \] +\[ \int_Z
+v\nabla \cdot \mathbf u \,dx \]

      This is the strong divergence operator and the trial space should be at least Hdiv. The test functions may be discontinuous.

      The function cell_matrix() is the Frechet derivative of this function with respect to the test functions.

      @@ -231,8 +231,8 @@ const double factor = 1.&#href_anchor"memdoc">

      The residual of the divergence operator in weak form.

      -\[ - \int_Z
-\nabla v \cdot \mathbf u \,dx \] +\[ - \int_Z
+\nabla v \cdot \mathbf u \,dx \]

      This is the weak divergence operator and the test space should be at least H1. The trial functions may be discontinuous.

      Todo
      Verify: The function cell_matrix() is the Frechet derivative of this function with respect to the test functions.
      @@ -306,8 +306,8 @@ const double factor = 1.&#href_anchor"memdoc">

      The residual of the gradient operator in strong form.

      -\[ \int_Z
-\mathbf v\cdot\nabla u \,dx \] +\[ \int_Z
+\mathbf v\cdot\nabla u \,dx \]

      This is the strong gradient operator and the trial space should be at least H1. The test functions may be discontinuous.

      The function gradient_matrix() is the Frechet derivative of this function with respect to the test functions.

      @@ -344,8 +344,8 @@ const double factor = 1.&#href_anchor"memdoc">

      The residual of the gradient operator in weak form.

      -\[ -\int_Z
-\nabla\cdot \mathbf v u \,dx \] +\[ -\int_Z
+\nabla\cdot \mathbf v u \,dx \]

      This is the weak gradient operator and the test space should be at least Hdiv. The trial functions may be discontinuous.

      Todo
      Verify: The function gradient_matrix() is the Frechet derivative of this function with respect to the test functions.
      @@ -382,7 +382,7 @@ double factor = 1.&#href_anchor"memdoc">

      The trace of the divergence operator, namely the product of the normal component of the vector valued trial space and the test space.

      -\[ \int_F (\mathbf u\cdot \mathbf n) v \,ds \] +\[ \int_F (\mathbf u\cdot \mathbf n) v \,ds \]

      Definition at line 259 of file divergence.h.

      @@ -422,9 +422,9 @@ double factor = 1.&#href_anchor"memdoc">

      The trace of the divergence operator, namely the product of the normal component of the vector valued trial space and the test space.

      -\[
+<picture><source srcset=\[
 \int_F (\mathbf u\cdot \mathbf n) v \,ds
-\] +\]" src="form_1529.png"/>

      Definition at line 292 of file divergence.h.

      @@ -459,9 +459,9 @@ double factor = 1.&#href_anchor"memdoc">

      The trace of the gradient operator, namely the product of the normal component of the vector valued test space and the trial space.

      -\[
+<picture><source srcset=\[
 \int_F u (\mathbf v\cdot \mathbf n) \,ds
-\] +\]" src="form_1530.png"/>

      Definition at line 324 of file divergence.h.

      @@ -521,10 +521,10 @@ double factor = 1.&#href_anchor"memdoc">

      The trace of the divergence operator, namely the product of the jump of the normal component of the vector valued trial function and the mean value of the test function.

      -\[
+<picture><source srcset=\[
 \int_F (\mathbf u_1\cdot \mathbf n_1 + \mathbf u_2 \cdot \mathbf n_2)
 \frac{v_1+v_2}{2} \,ds
-\] +\]" src="form_1531.png"/>

      Definition at line 358 of file divergence.h.

      @@ -574,12 +574,12 @@ double factor = 1.&#href_anchor"memdoc">

      The jump of the normal component

      -\[
+<picture><source srcset=\[
 \int_F
  (\mathbf u_1\cdot \mathbf n_1 + \mathbf u_2 \cdot \mathbf n_2)
  (\mathbf v_1\cdot \mathbf n_1 + \mathbf v_2 \cdot \mathbf n_2)
 \,ds
-\] +\]" src="form_1532.png"/>

      Definition at line 417 of file divergence.h.

      /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Elasticity.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Elasticity.html 2024-01-30 03:04:49.840859654 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Elasticity.html 2024-01-30 03:04:49.840859654 +0000 @@ -154,7 +154,7 @@

      The linear elasticity operator in weak form, namely double contraction of symmetric gradients.

      -\[ \int_Z \varepsilon(u): \varepsilon(v)\,dx \] +\[ \int_Z \varepsilon(u): \varepsilon(v)\,dx \]

      Definition at line 51 of file elasticity.h.

      @@ -197,7 +197,7 @@

      Vector-valued residual operator for linear elasticity in weak form

      -\[ - \int_Z \varepsilon(u): \varepsilon(v) \,dx \] +\[ - \int_Z \varepsilon(u): \varepsilon(v) \,dx \]

      Definition at line 84 of file elasticity.h.

      @@ -239,10 +239,10 @@

      The matrix for the weak boundary condition of Nitsche type for linear elasticity:

      -\[
+<picture><source srcset=\[
 \int_F \Bigl(\gamma u \cdot v - n^T \epsilon(u) v - u \epsilon(v)
 n\Bigr)\;ds.
-\] +\]" src="form_1535.png"/>

      Definition at line 123 of file elasticity.h.

      @@ -284,10 +284,10 @@

      The matrix for the weak boundary condition of Nitsche type for the tangential displacement in linear elasticity:

      -\[
+<picture><source srcset=\[
 \int_F \Bigl(\gamma u_\tau \cdot v_\tau - n^T \epsilon(u_\tau) v_\tau -
 u_\tau^T \epsilon(v_\tau) n\Bigr)\;ds.
-\] +\]" src="form_1536.png"/>

      Definition at line 178 of file elasticity.h.

      @@ -337,12 +337,12 @@ double factor = 1.&#href_anchor"memdoc">

      Weak boundary condition for the elasticity operator by Nitsche, namely on the face F the vector

      -\[
+<picture><source srcset=\[
 \int_F \Bigl(\gamma (u-g) \cdot v - n^T \epsilon(u) v - (u-g) \epsilon(v)
 n^T\Bigr)\;ds.
-\] +\]" src="form_1537.png"/>

      -

      Here, u is the finite element function whose values and gradient are given in the arguments input and Dinput, respectively. g is the inhomogeneous boundary value in the argument data. $n$ is the outer normal vector and $\gamma$ is the usual penalty parameter.

      +

      Here, u is the finite element function whose values and gradient are given in the arguments input and Dinput, respectively. g is the inhomogeneous boundary value in the argument data. $n$ is the outer normal vector and $\gamma$ is the usual penalty parameter.

      Definition at line 257 of file elasticity.h.

      @@ -398,10 +398,10 @@

      The weak boundary condition of Nitsche type for the tangential displacement in linear elasticity:

      -\[
+<picture><source srcset=\[
 \int_F \Bigl(\gamma (u_\tau-g_\tau) \cdot v_\tau - n^T \epsilon(u_\tau) v
 - (u_\tau-g_\tau) \epsilon(v_\tau) n\Bigr)\;ds.
-\] +\]" src="form_1539.png"/>

      Definition at line 309 of file elasticity.h.

      @@ -446,12 +446,12 @@ double factor = 1.&#href_anchor"memdoc">

      Homogeneous weak boundary condition for the elasticity operator by Nitsche, namely on the face F the vector

      -\[
+<picture><source srcset=\[
 \int_F \Bigl(\gamma u \cdot v - n^T \epsilon(u) v - u \epsilon(v)
 n^T\Bigr)\;ds.
-\] +\]" src="form_1540.png"/>

      -

      Here, u is the finite element function whose values and gradient are given in the arguments input and Dinput, respectively. $n$ is the outer normal vector and $\gamma$ is the usual penalty parameter.

      +

      Here, u is the finite element function whose values and gradient are given in the arguments input and Dinput, respectively. $n$ is the outer normal vector and $\gamma$ is the usual penalty parameter.

      Definition at line 387 of file elasticity.h.

      /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1GradDiv.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1GradDiv.html 2024-01-30 03:04:49.860859821 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1GradDiv.html 2024-01-30 03:04:49.860859821 +0000 @@ -137,9 +137,9 @@ double factor = 1.&#href_anchor"memdoc">

      The weak form of the grad-div operator penalizing volume changes

      -\[
+<picture><source srcset=\[
  \int_Z \nabla\cdot u \nabla \cdot v \,dx
-\] +\]" src="form_1541.png"/>

      Definition at line 52 of file grad_div.h.

      @@ -174,9 +174,9 @@ const double factor = 1.&#href_anchor"memdoc">

      The weak form of the grad-div residual

      -\[
+<picture><source srcset=\[
  \int_Z \nabla\cdot u \nabla \cdot v \,dx
-\] +\]" src="form_1541.png"/>

      Definition at line 86 of file grad_div.h.

      @@ -218,10 +218,10 @@

      The matrix for the weak boundary condition of Nitsche type for linear elasticity:

      -\[
+<picture><source srcset=\[
 \int_F \Bigl(\gamma (u \cdot n)(v \cdot n)  - \nabla\cdot u
 v\cdot n - u \cdot n \nabla \cdot v \Bigr)\;ds.
-\] +\]" src="form_1542.png"/>

      Definition at line 122 of file grad_div.h.

      @@ -271,14 +271,14 @@ double factor = 1.&#href_anchor"memdoc">

      Weak boundary condition for the Laplace operator by Nitsche, vector valued version, namely on the face F the vector

      -\[
+<picture><source srcset=\[
 \int_F \Bigl(\gamma (\mathbf u \cdot \mathbf n- \mathbf g \cdot
 \mathbf n) (\mathbf v \cdot \mathbf n)
 - \nabla \cdot \mathbf u (\mathbf v \cdot \mathbf n)
 - (\mathbf u-\mathbf g) \cdot \mathbf n \nabla \cdot v\Bigr)\;ds.
-\] +\]" src="form_1543.png"/>

      -

      Here, u is the finite element function whose values and gradient are given in the arguments input and Dinput, respectively. g is the inhomogeneous boundary value in the argument data. $\gamma$ is the usual penalty parameter.

      +

      Here, u is the finite element function whose values and gradient are given in the arguments input and Dinput, respectively. g is the inhomogeneous boundary value in the argument data. $\gamma$ is the usual penalty parameter.

      Definition at line 174 of file grad_div.h.

      @@ -405,12 +405,12 @@ double ext_factor = -1.&#href_anchor"memdoc">

      Grad-div residual term for the symmetric interior penalty method:

      -\[
+<picture><source srcset=\[
 \int_F \Bigl( \gamma [\mathbf u \cdot\mathbf n]
 \cdot[\mathbf v \cdot \mathbf n]
 - \{\nabla \cdot \mathbf u\}[\mathbf v\cdot \mathbf n]
 - [\mathbf u\times \mathbf n]\{\nabla\cdot \mathbf v\} \Bigr) \; ds.
-\] +\]" src="form_1544.png"/>

      See for instance Hansbo and Larson, 2002

      /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1L2.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1L2.html 2024-01-30 03:04:49.880859987 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1L2.html 2024-01-30 03:04:49.880859987 +0000 @@ -138,12 +138,12 @@ const double factor = 1.&#href_anchor"memdoc">

      The mass matrix for scalar or vector values finite elements.

      -\[ \int_Z
-uv\,dx \quad \text{or} \quad \int_Z \mathbf u\cdot \mathbf v\,dx \] +\[ \int_Z
+uv\,dx \quad \text{or} \quad \int_Z \mathbf u\cdot \mathbf v\,dx \]

      Likewise, this term can be used on faces, where it computes the integrals

      -\[ \int_F uv\,ds \quad \text{or} \quad \int_F \mathbf u\cdot
-\mathbf v\,ds \] +\[ \int_F uv\,ds \quad \text{or} \quad \int_F \mathbf u\cdot
+\mathbf v\,ds \]

      Parameters
      @@ -181,18 +181,18 @@
      const std::vector< double > & weights&#href_anchor"memdoc">

      The weighted mass matrix for scalar or vector values finite elements.

      -\[ \int_Z \omega(x) uv\,dx \quad \text{or} \quad \int_Z \omega(x)
-\mathbf u\cdot \mathbf v\,dx \] +\[ \int_Z \omega(x) uv\,dx \quad \text{or} \quad \int_Z \omega(x)
+\mathbf u\cdot \mathbf v\,dx \]

      Likewise, this term can be used on faces, where it computes the integrals

      -\[ \int_F \omega(x) uv\,ds \quad \text{or} \quad \int_F
-\omega(x) \mathbf u\cdot \mathbf v\,ds \] +\[ \int_F \omega(x) uv\,ds \quad \text{or} \quad \int_F
+\omega(x) \mathbf u\cdot \mathbf v\,ds \]

      Parameters
      - +
      MThe weighted mass matrix obtained as result.
      feThe FEValues object describing the local trial function space. update_values and update_JxW_values must be set.
      weightsThe weights, $\omega(x)$, evaluated at the quadrature points in the finite element (size must be equal to the number of quadrature points in the element).
      weightsThe weights, $\omega(x)$, evaluated at the quadrature points in the finite element (size must be equal to the number of quadrature points in the element).
      @@ -230,13 +230,13 @@
      const double factor = 1.&#href_anchor"memdoc">

      L2-inner product for scalar functions.

      -\[ \int_Z fv\,dx \quad \text{or} \quad \int_F fv\,ds \] +\[ \int_Z fv\,dx \quad \text{or} \quad \int_F fv\,ds \]

      Parameters
      - +
      resultThe vector obtained as result.
      feThe FEValues object describing the local trial function space. update_values and update_JxW_values must be set.
      inputThe representation of $f$ evaluated at the quadrature points in the finite element (size must be equal to the number of quadrature points in the element).
      inputThe representation of $f$ evaluated at the quadrature points in the finite element (size must be equal to the number of quadrature points in the element).
      factorA constant that multiplies the result.
      @@ -274,14 +274,14 @@
      const double factor = 1.&#href_anchor"memdoc">

      L2-inner product for a slice of a vector valued right hand side.

      -\[ \int_Z \mathbf f\cdot \mathbf v\,dx \quad \text{or}
-\quad \int_F \mathbf f\cdot \mathbf v\,ds \] +\[ \int_Z \mathbf f\cdot \mathbf v\,dx \quad \text{or}
+\quad \int_F \mathbf f\cdot \mathbf v\,ds \]

      Parameters
      - +
      resultThe vector obtained as result.
      feThe FEValues object describing the local trial function space. update_values and update_JxW_values must be set.
      inputThe vector valued representation of $\mathbf f$ evaluated at the quadrature points in the finite element (size of each component must be equal to the number of quadrature points in the element).
      inputThe vector valued representation of $\mathbf f$ evaluated at the quadrature points in the finite element (size of each component must be equal to the number of quadrature points in the element).
      factorA constant that multiplies the result.
      @@ -338,9 +338,9 @@
      const double factor2 = 1.&#href_anchor"memdoc"> -

      The jump matrix between two cells for scalar or vector values finite elements. Note that the factor $\gamma$ can be used to implement weighted jumps.

      -\[ \int_F [\gamma u][\gamma v]\,ds \quad \text{or}
-\int_F [\gamma \mathbf u]\cdot [\gamma \mathbf v]\,ds \] +

      The jump matrix between two cells for scalar or vector values finite elements. Note that the factor $\gamma$ can be used to implement weighted jumps.

      +\[ \int_F [\gamma u][\gamma v]\,ds \quad \text{or}
+\int_F [\gamma \mathbf u]\cdot [\gamma \mathbf v]\,ds \]

      Using appropriate weights, this term can be used to penalize violation of conformity in H1.

      Note that for the parameters that follow, the external matrix refers to the flux between cells, while the internal matrix refers to entries coupling inside the cell.

      /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Laplace.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Laplace.html 2024-01-30 03:04:49.908860220 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Laplace.html 2024-01-30 03:04:49.908860220 +0000 @@ -152,8 +152,8 @@
      const double factor = 1.&#href_anchor"memdoc">

      Laplacian in weak form, namely on the cell Z the matrix

      -\[
-\int_Z \nu \nabla u \cdot \nabla v \, dx. \] +\[
+\int_Z \nu \nabla u \cdot \nabla v \, dx. \]

      The FiniteElement in fe may be scalar or vector valued. In the latter case, the Laplacian is applied to each component separately.

      @@ -197,7 +197,7 @@

      Laplacian residual operator in weak form

      -\[ \int_Z \nu \nabla u \cdot \nabla v \, dx. \] +\[ \int_Z \nu \nabla u \cdot \nabla v \, dx. \]

      Definition at line 92 of file laplace.h.

      @@ -240,7 +240,7 @@

      Vector-valued Laplacian residual operator in weak form

      -\[ \int_Z \nu \nabla u : \nabla v \, dx. \] +\[ \int_Z \nu \nabla u : \nabla v \, dx. \]

      Definition at line 119 of file laplace.h.

      @@ -275,11 +275,11 @@
      double factor = 1.&#href_anchor"memdoc">

      Weak boundary condition of Nitsche type for the Laplacian, namely on the face F the matrix

      -\[
+<picture><source srcset=\[
 \int_F \Bigl(\gamma u v - \partial_n u v - u \partial_n v\Bigr)\;ds.
-\] +\]" src="form_1557.png"/>

      -

      Here, $\gamma$ is the penalty parameter suitably computed with compute_penalty().

      +

      Here, $\gamma$ is the penalty parameter suitably computed with compute_penalty().

      Definition at line 157 of file laplace.h.

      @@ -313,12 +313,12 @@
      double factor = 1.&#href_anchor"memdoc">

      Weak boundary condition of Nitsche type for the Laplacian applied to the tangential component only, namely on the face F the matrix

      -\[
+<picture><source srcset=\[
 \int_F \Bigl(\gamma u_\tau v_\tau - \partial_n u_\tau v_\tau - u_\tau
 \partial_n v_\tau\Bigr)\;ds.
-\] +\]" src="form_1558.png"/>

      -

      Here, $\gamma$ is the penalty parameter suitably computed with compute_penalty().

      +

      Here, $\gamma$ is the penalty parameter suitably computed with compute_penalty().

      Definition at line 198 of file laplace.h.

      @@ -367,12 +367,12 @@
      double factor = 1.&#href_anchor"memdoc">

      Weak boundary condition for the Laplace operator by Nitsche, scalar version, namely on the face F the vector

      -\[
+<picture><source srcset=\[
 \int_F \Bigl(\gamma (u-g) v - \partial_n u v - (u-g) \partial_n
 v\Bigr)\;ds.
-\] +\]" src="form_1559.png"/>

      -

      Here, u is the finite element function whose values and gradient are given in the arguments input and Dinput, respectively. g is the inhomogeneous boundary value in the argument data. $\gamma$ is the usual penalty parameter.

      +

      Here, u is the finite element function whose values and gradient are given in the arguments input and Dinput, respectively. g is the inhomogeneous boundary value in the argument data. $\gamma$ is the usual penalty parameter.

      Definition at line 261 of file laplace.h.

      @@ -421,13 +421,13 @@
      double factor = 1.&#href_anchor"memdoc">

      Weak boundary condition for the Laplace operator by Nitsche, vector valued version, namely on the face F the vector

      -\[
+<picture><source srcset=\[
 \int_F \Bigl(\gamma (\mathbf u- \mathbf g) \cdot \mathbf v
 - \partial_n \mathbf u \cdot \mathbf v
 - (\mathbf u-\mathbf g) \cdot \partial_n \mathbf v\Bigr)\;ds.
-\] +\]" src="form_1560.png"/>

      -

      Here, u is the finite element function whose values and gradient are given in the arguments input and Dinput, respectively. g is the inhomogeneous boundary value in the argument data. $\gamma$ is the usual penalty parameter.

      +

      Here, u is the finite element function whose values and gradient are given in the arguments input and Dinput, respectively. g is the inhomogeneous boundary value in the argument data. $\gamma$ is the usual penalty parameter.

      Definition at line 308 of file laplace.h.

      @@ -486,10 +486,10 @@
      double factor2 = -1.&#href_anchor"memdoc">

      Flux for the interior penalty method for the Laplacian, namely on the face F the matrices associated with the bilinear form

      -\[
+<picture><source srcset=\[
 \int_F \Bigl( \gamma [u][v] - \{\nabla u\}[v\mathbf n] - [u\mathbf
 n]\{\nabla v\} \Bigr) \; ds.
-\] +\]" src="form_1561.png"/>

      The penalty parameter should always be the mean value of the penalties needed for stability on each side. In the case of constant coefficients, it can be computed using compute_penalty().

      If factor2 is missing or negative, the factor is assumed the same on both sides. If factors differ, note that the penalty parameter has to be computed accordingly.

      @@ -551,10 +551,10 @@
      double factor2 = -1.&#href_anchor"memdoc">

      Flux for the interior penalty method for the Laplacian applied to the tangential components of a vector field, namely on the face F the matrices associated with the bilinear form

      -\[
+<picture><source srcset=\[
 \int_F \Bigl( \gamma [u_\tau][v_\tau] - \{\nabla u_\tau\}[v_\tau\mathbf
 n] - [u_\tau\mathbf n]\{\nabla v_\tau\} \Bigr) \; ds.
-\] +\]" src="form_1562.png"/>

      Warning
      This function is still under development!
      @@ -625,10 +625,10 @@
      double ext_factor = -1.&#href_anchor"memdoc">

      Residual term for the symmetric interior penalty method:

      -\[
+<picture><source srcset=\[
 \int_F \Bigl( \gamma [u][v] - \{\nabla u\}[v\mathbf n] - [u\mathbf
 n]\{\nabla v\} \Bigr) \; ds.
-\] +\]" src="form_1561.png"/>

      Definition at line 544 of file laplace.h.

      @@ -698,11 +698,11 @@
      double ext_factor = -1.&#href_anchor"memdoc">

      Vector-valued residual term for the symmetric interior penalty method:

      -\[
+<picture><source srcset=\[
 \int_F \Bigl( \gamma [\mathbf u]\cdot[\mathbf v]
 - \{\nabla \mathbf u\}[\mathbf v\otimes \mathbf n]
 - [\mathbf u\otimes \mathbf n]\{\nabla \mathbf v\} \Bigr) \; ds.
-\] +\]" src="form_1563.png"/>

      Definition at line 611 of file laplace.h.

      /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Maxwell.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Maxwell.html 2024-01-30 03:04:49.928860387 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Maxwell.html 2024-01-30 03:04:49.928860387 +0000 @@ -120,22 +120,22 @@

      Local integrators related to curl operators and their traces.

      We use the following conventions for curl operators. First, in three space dimensions

      -\[
+<picture><source srcset=\[
 \nabla\times \mathbf u = \begin{pmatrix}
   \partial_2 u_3 - \partial_3 u_2 \\
   \partial_3 u_1 - \partial_1 u_3 \\
   \partial_1 u_2 - \partial_2 u_1
 \end{pmatrix}.
-\] +\]" src="form_1566.png"/>

      -

      In two space dimensions, the curl is obtained by extending a vector u to $(u_1, u_2, 0)^T$ and a scalar p to $(0,0,p)^T$. Computing the nonzero components, we obtain the scalar curl of a vector function and the vector curl of a scalar function. The current implementation exchanges the sign and we have:

      -\[
+<p>In two space dimensions, the curl is obtained by extending a vector <b>u</b> to <picture><source srcset=$(u_1, u_2, 0)^T$ and a scalar p to $(0,0,p)^T$. Computing the nonzero components, we obtain the scalar curl of a vector function and the vector curl of a scalar function. The current implementation exchanges the sign and we have:

      +\[
  \nabla \times \mathbf u = \partial_1 u_2 - \partial_2 u_1,
  \qquad
  \nabla \times p = \begin{pmatrix}
    \partial_2 p \\ -\partial_1 p
  \end{pmatrix}
-\] +\]" src="form_1569.png"/>

      Function Documentation

      @@ -161,7 +161,7 @@
      const Tensor< 2, dim > & h2&#href_anchor"memdoc">

      Auxiliary function. Given the tensors of dim second derivatives, compute the curl of the curl of a vector function. The result in two and three dimensions is:

      -\[
+<picture><source srcset=\[
 \nabla\times\nabla\times \mathbf u = \begin{pmatrix}
 \partial_1\partial_2 u_2 - \partial_2^2 u_1 \\
 \partial_1\partial_2 u_1 - \partial_1^2 u_2
@@ -175,7 +175,7 @@
 \partial_3\partial_1 u_1 + \partial_3\partial_2 u_2
 - (\partial_1^2+\partial_2^2) u_3
 \end{pmatrix}
-\] +\]" src="form_1570.png"/>

      Note
      The third tensor argument is not used in two dimensions and can for instance duplicate one of the previous.
      @@ -211,9 +211,9 @@
      const Tensor< 1, dim > & normal&#href_anchor"memdoc">

      Auxiliary function. Given dim tensors of first derivatives and a normal vector, compute the tangential curl

      -\[
+<picture><source srcset=\[
 \mathbf n \times \nabla \times u.
-\] +\]" src="form_1571.png"/>

      Note
      The third tensor argument is not used in two dimensions and can for instance duplicate one of the previous.
      @@ -244,10 +244,10 @@
      const double factor = 1.&#href_anchor"memdoc">

      The curl-curl operator

      -\[
+<picture><source srcset=\[
 \int_Z \nabla\times u \cdot
 \nabla \times v \,dx
-\] +\]" src="form_1572.png"/>

      in weak form.

      @@ -283,9 +283,9 @@
      double factor = 1.&#href_anchor"memdoc">

      The matrix for the curl operator

      -\[
+<picture><source srcset=\[
 \int_Z \nabla \times u \cdot v \,dx.
-\] +\]" src="form_1573.png"/>

      This is the standard curl operator in 3d and the scalar curl in 2d. The vector curl operator can be obtained by exchanging test and trial functions.

      @@ -327,14 +327,14 @@
      double factor = 1.&#href_anchor"memdoc">

      The matrix for weak boundary condition of Nitsche type for the tangential component in Maxwell systems.

      -\[
+<picture><source srcset=\[
 \int_F \biggl( 2\gamma
 (u\times n) (v\times n) -
 (u\times n)(\nu \nabla\times
 v) - (v\times
 n)(\nu \nabla\times u)
 \biggr)
-\] +\]" src="form_1574.png"/>

      Definition at line 265 of file maxwell.h.

      @@ -364,10 +364,10 @@
      double factor = 1.&#href_anchor"memdoc">

      The product of two tangential traces,

      -\[
+<picture><source srcset=\[
 \int_F (u\times n)(v\times n)
 \, ds.
-\] +\]" src="form_1575.png"/>

      Definition at line 328 of file maxwell.h.

      @@ -435,14 +435,14 @@

      The interior penalty fluxes for Maxwell systems.

      -\[
+<picture><source srcset=\[
 \int_F \biggl( \gamma
 \{u\times n\}\{v\times n\} -
 \{u\times n\}\{\nu \nabla\times
 v\}- \{v\times
 n\}\{\nu \nabla\times u\}
 \biggr)\;dx
-\] +\]" src="form_1576.png"/>

      Definition at line 385 of file maxwell.h.

      /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceNonMatching.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceNonMatching.html 2024-01-30 03:04:49.956860620 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceNonMatching.html 2024-01-30 03:04:49.960860654 +0000 @@ -162,8 +162,8 @@
      -

      Type describing how a cell or a face is located relative to the zero contour of a level set function, $\psi$. The values of the type correspond to:

      -

      inside if $\psi(x) < 0$, outside if $\psi(x) > 0$, intersected if $\psi(x)$ varies in sign,

      +

      Type describing how a cell or a face is located relative to the zero contour of a level set function, $\psi$. The values of the type correspond to:

      +

      inside if $\psi(x) < 0$, outside if $\psi(x) > 0$, intersected if $\psi(x)$ varies in sign,

      over the cell/face. The value "unassigned" is used to describe that the location of a cell/face has not yet been determined.

      @@ -229,17 +229,17 @@
      Enumerator
      inside 
      const AffineConstraints< number > & immersed_constraints = AffineConstraints<number>()&#href_anchor"memdoc">

      Create a coupling sparsity pattern for non-matching, overlapping grids.

      -

      Given two non-matching triangulations, representing the domains $\Omega$ and $B$, with $B \subseteq \Omega$, and two finite element spaces $V(\Omega) = \text{span}\{v_i\}_{i=0}^n$ and $Q(B) =
-\text{span}\{w_j\}_{j=0}^m$, compute the sparsity pattern that would be necessary to assemble the matrix

      -\[
+<p>Given two non-matching triangulations, representing the domains <picture><source srcset=$\Omega$ and $B$, with $B \subseteq \Omega$, and two finite element spaces $V(\Omega) = \text{span}\{v_i\}_{i=0}^n$ and $Q(B) =
+\text{span}\{w_j\}_{j=0}^m$, compute the sparsity pattern that would be necessary to assemble the matrix

      +\[
 M_{ij} \dealcoloneq \int_{B} v_i(x) w_j(x) dx,
                     \quad i \in [0,n), j \in [0,m),
-\] +\]" src="form_2012.png"/>

      -

      where $V(\Omega)$ is the finite element space associated with the space_dh passed to this function (or part of it, if specified in space_comps), while $Q(B)$ is the finite element space associated with the immersed_dh passed to this function (or part of it, if specified in immersed_comps).

      -

      The sparsity is filled by locating the position of quadrature points (obtained by the reference quadrature quad) defined on elements of $B$ with respect to the embedding triangulation $\Omega$. For each overlapping cell, the entries corresponding to space_comps in space_dh and immersed_comps in immersed_dh are added to the sparsity pattern.

      +

      where $V(\Omega)$ is the finite element space associated with the space_dh passed to this function (or part of it, if specified in space_comps), while $Q(B)$ is the finite element space associated with the immersed_dh passed to this function (or part of it, if specified in immersed_comps).

      +

      The sparsity is filled by locating the position of quadrature points (obtained by the reference quadrature quad) defined on elements of $B$ with respect to the embedding triangulation $\Omega$. For each overlapping cell, the entries corresponding to space_comps in space_dh and immersed_comps in immersed_dh are added to the sparsity pattern.

      The space_comps and immersed_comps masks are assumed to be ordered in the same way: the first component of space_comps will couple with the first component of immersed_comps, the second with the second, and so on. If one of the two masks has more non-zero than the other, then the excess components will be ignored.

      -

      If the domain $B$ does not fall within $\Omega$, an exception will be thrown by the algorithm that computes the quadrature point locations. In particular, notice that this function only makes sens for dim1 lower or equal than dim0. A static assert guards that this is actually the case.

      +

      If the domain $B$ does not fall within $\Omega$, an exception will be thrown by the algorithm that computes the quadrature point locations. In particular, notice that this function only makes sens for dim1 lower or equal than dim0. A static assert guards that this is actually the case.

      For both spaces, it is possible to specify a custom Mapping, which defaults to StaticMappingQ1 for both.

      This function will also work in parallel, provided that the immersed triangulation is of type parallel::shared::Triangulation<dim1,spacedim>. An exception is thrown if you use an immersed parallel::distributed::Triangulation<dim1,spacedim>.

      See the tutorial program step-60 for an example on how to use this function.

      @@ -357,17 +357,17 @@
      const AffineConstraints< typename Matrix::value_type > & immersed_constraints = AffineConstraints<typename&#href_anchor"memdoc">

      Create a coupling mass matrix for non-matching, overlapping grids.

      -

      Given two non-matching triangulations, representing the domains $\Omega$ and $B$, with $B \subseteq \Omega$, and two finite element spaces $V(\Omega) = \text{span}\{v_i\}_{i=0}^n$ and $Q(B) =
-\text{span}\{w_j\}_{j=0}^m$, compute the coupling matrix

      -\[
+<p>Given two non-matching triangulations, representing the domains <picture><source srcset=$\Omega$ and $B$, with $B \subseteq \Omega$, and two finite element spaces $V(\Omega) = \text{span}\{v_i\}_{i=0}^n$ and $Q(B) =
+\text{span}\{w_j\}_{j=0}^m$, compute the coupling matrix

      +\[
 M_{ij} \dealcoloneq \int_{B} v_i(x) w_j(x) dx,
                     \quad i \in [0,n), j \in [0,m),
-\] +\]" src="form_2012.png"/>

      -

      where $V(\Omega)$ is the finite element space associated with the space_dh passed to this function (or part of it, if specified in space_comps), while $Q(B)$ is the finite element space associated with the immersed_dh passed to this function (or part of it, if specified in immersed_comps).

      -

      The corresponding sparsity patterns can be computed by calling the make_coupling_sparsity_pattern function. The elements of the matrix are computed by locating the position of quadrature points defined on elements of $B$ with respect to the embedding triangulation $\Omega$.

      +

      where $V(\Omega)$ is the finite element space associated with the space_dh passed to this function (or part of it, if specified in space_comps), while $Q(B)$ is the finite element space associated with the immersed_dh passed to this function (or part of it, if specified in immersed_comps).

      +

      The corresponding sparsity patterns can be computed by calling the make_coupling_sparsity_pattern function. The elements of the matrix are computed by locating the position of quadrature points defined on elements of $B$ with respect to the embedding triangulation $\Omega$.

      The space_comps and immersed_comps masks are assumed to be ordered in the same way: the first component of space_comps will couple with the first component of immersed_comps, the second with the second, and so on. If one of the two masks has more non-zero entries non-zero than the other, then the excess components will be ignored.

      -

      If the domain $B$ does not fall within $\Omega$, an exception will be thrown by the algorithm that computes the quadrature point locations. In particular, notice that this function only makes sense for dim1 lower or equal than dim0. A static assert guards that this is actually the case.

      +

      If the domain $B$ does not fall within $\Omega$, an exception will be thrown by the algorithm that computes the quadrature point locations. In particular, notice that this function only makes sense for dim1 lower or equal than dim0. A static assert guards that this is actually the case.

      For both spaces, it is possible to specify a custom Mapping, which defaults to StaticMappingQ1 for both.

      This function will also work in parallel, provided that the immersed triangulation is of type parallel::shared::Triangulation<dim1,spacedim>. An exception is thrown if you use an immersed parallel::distributed::Triangulation<dim1,spacedim>.

      See the tutorial program step-60 for an example on how to use this function.

      @@ -491,16 +491,16 @@
      const ComponentMask & comps1 = ComponentMask()&#href_anchor"memdoc">

      Create a coupling sparsity pattern for non-matching independent grids, using a convolution kernel with compact support of radius epsilon.

      -

      Given two non-matching triangulations, representing the domains $\Omega^0$ and $\Omega^1$, both embedded in $\mathbb{R}^d$, and two finite element spaces $V^0(\Omega^0) = \text{span}\{v_i\}_{i=0}^n$ and $V^1(\Omega^1) =
-\text{span}\{w_\alpha\}_{\alpha=0}^m$, compute the sparsity pattern that would be necessary to assemble the matrix

      +

      Given two non-matching triangulations, representing the domains $\Omega^0$ and $\Omega^1$, both embedded in $\mathbb{R}^d$, and two finite element spaces $V^0(\Omega^0) = \text{span}\{v_i\}_{i=0}^n$ and $V^1(\Omega^1) =
+\text{span}\{w_\alpha\}_{\alpha=0}^m$, compute the sparsity pattern that would be necessary to assemble the matrix

      -\[
+<picture><source srcset=\[
 M_{i\alpha} \dealcoloneq \int_{\Omega^0} \int_{\Omega^1}
 v_i(x) K^{\epsilon}(x-y) w_\alpha(y) dx \ dy,
 \quad i \in [0,n), \alpha \in [0,m),
-\] +\]" src="form_2020.png"/>

      -

      where $V^0(\Omega^0)$ is the finite element space associated with the dh0 passed to this function (or part of it, if specified in comps0), while $V^1(\Omega^1)$ is the finite element space associated with the dh1 passed to this function (or part of it, if specified in comps1), and $K^\epsilon$ is a function derived from CutOffFunctionBase with compact support included in a ball of radius $\epsilon$.

      +

      where $V^0(\Omega^0)$ is the finite element space associated with the dh0 passed to this function (or part of it, if specified in comps0), while $V^1(\Omega^1)$ is the finite element space associated with the dh1 passed to this function (or part of it, if specified in comps1), and $K^\epsilon$ is a function derived from CutOffFunctionBase with compact support included in a ball of radius $\epsilon$.

      The comps0 and comps1 masks are assumed to be ordered in the same way: the first component of comps0 will couple with the first component of comps1, the second with the second, and so on. If one of the two masks has more active components than the other, then the excess components will be ignored.

      For both spaces, it is possible to specify a custom Mapping, which defaults to StaticMappingQ1 for both.

      This function will also work in parallel, provided that at least one of the triangulations is of type parallel::shared::Triangulation<dim1,spacedim>. An exception is thrown if both triagnulations are of type parallel::distributed::Triangulation<dim1,spacedim>.

      @@ -575,15 +575,15 @@
      const ComponentMask & comps1 = ComponentMask()&#href_anchor"memdoc">

      Create a coupling mass matrix for non-matching independent grids, using a convolution kernel with compact support.

      -

      Given two non-matching triangulations, representing the domains $\Omega^0$ and $\Omega^1$, both embedded in $\mathbb{R}^d$, and two finite element spaces $V^0(\Omega^0) = \text{span}\{v_i\}_{i=0}^n$ and $V^1(\Omega^1) = \text{span}\{w_\alpha\}_{\alpha=0}^m$, compute the matrix

      +

      Given two non-matching triangulations, representing the domains $\Omega^0$ and $\Omega^1$, both embedded in $\mathbb{R}^d$, and two finite element spaces $V^0(\Omega^0) = \text{span}\{v_i\}_{i=0}^n$ and $V^1(\Omega^1) = \text{span}\{w_\alpha\}_{\alpha=0}^m$, compute the matrix

      -\[
+<picture><source srcset=\[
 M_{i\alpha} \dealcoloneq \int_{\Omega^0} \int_{\Omega^1}
 v_i(x) K^{\epsilon}(x-y) w_\alpha(y) dx \ dy,
 \quad i \in [0,n), \alpha \in [0,m),
-\] +\]" src="form_2020.png"/>

      -

      where $V^0(\Omega^0)$ is the finite element space associated with the dh0 passed to this function (or part of it, if specified in comps0), while $V^1(\Omega^1)$ is the finite element space associated with the dh1 passed to this function (or part of it, if specified in comps1), and $K^\epsilon$ is a function derived from CutOffFunctionBase with compact support included in a ball of radius $\epsilon$.

      +

      where $V^0(\Omega^0)$ is the finite element space associated with the dh0 passed to this function (or part of it, if specified in comps0), while $V^1(\Omega^1)$ is the finite element space associated with the dh1 passed to this function (or part of it, if specified in comps1), and $K^\epsilon$ is a function derived from CutOffFunctionBase with compact support included in a ball of radius $\epsilon$.

      The corresponding sparsity patterns can be computed by calling the make_coupling_sparsity_pattern() function.

      The comps0 and comps1 masks are assumed to be ordered in the same way: the first component of comps0 will couple with the first component of comps1, the second with the second, and so on. If one of the two masks has more active components than the other, then the excess components will be ignored.

      For both spaces, it is possible to specify a custom Mapping, which defaults to StaticMappingQ1 for both.

      /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceNonMatching_1_1internal_1_1QuadratureGeneratorImplementation.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceNonMatching_1_1internal_1_1QuadratureGeneratorImplementation.html 2024-01-30 03:04:49.992860920 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceNonMatching_1_1internal_1_1QuadratureGeneratorImplementation.html 2024-01-30 03:04:49.992860920 +0000 @@ -279,7 +279,7 @@
      -

      Returns the max/min bounds on the value, taken over all the entries in the incoming vector of FunctionBounds. That is, given the incoming function bounds, $[L_j, U_j]$, this function returns $[L, U]$, where $L = \min_{j} L_j$ and $U = \max_{j} U_j$.

      +

      Returns the max/min bounds on the value, taken over all the entries in the incoming vector of FunctionBounds. That is, given the incoming function bounds, $[L_j, U_j]$, this function returns $[L, U]$, where $L = \min_{j} L_j$ and $U = \max_{j} U_j$.

      Definition at line 201 of file quadrature_generator.cc.

      @@ -301,21 +301,21 @@
      -

      Finds the best choice of height function direction, given the FunctionBounds for a number of functions $\{\psi_j\}_{j=0}^{n-1}$. Here, "best" is meant in the sense of the implicit function theorem.

      -

      Let $J_I$ be the index set of the indefinite functions:

      -

      $J_I = \{0,..., n - 1\} \setminus \{ j : |\psi_j| > 0 \}$.

      -

      This function converts the incoming bounds to a lower bound, $L_{ij}$, on the absolute value of each component of the gradient:

      -

      $|\partial_k \psi_j| > L_{jk}$.

      -

      and then returns a coordinate direction, $i$, and a lower bound $L$, such that

      +

      Finds the best choice of height function direction, given the FunctionBounds for a number of functions $\{\psi_j\}_{j=0}^{n-1}$. Here, "best" is meant in the sense of the implicit function theorem.

      +

      Let $J_I$ be the index set of the indefinite functions:

      +

      $J_I = \{0,..., n - 1\} \setminus \{ j : |\psi_j| > 0 \}$.

      +

      This function converts the incoming bounds to a lower bound, $L_{ij}$, on the absolute value of each component of the gradient:

      +

      $|\partial_k \psi_j| > L_{jk}$.

      +

      and then returns a coordinate direction, $i$, and a lower bound $L$, such that

      -\[
+<picture><source srcset=\[
 i = \arg \max_{k} \min_{j \in J_I} L_{jk}, \\
 L =      \max_{k} \min_{j \in J_I} L_{jk}.
-\] +\]" src="form_2134.png"/>

      -

      This means $i$ is a coordinate direction such that all functions intersected by the zero contour (i.e. those belonging to $J_I$) fulfill

      -

      $|\partial_i \psi_j| > L$.

      -

      Note that the estimated lower bound, $L$, can be zero or negative. This means that no suitable height function direction exists. If all of the incoming functions are positive or negative definite the returned std::optional is non-set.

      +

      This means $i$ is a coordinate direction such that all functions intersected by the zero contour (i.e. those belonging to $J_I$) fulfill

      +

      $|\partial_i \psi_j| > L$.

      +

      Note that the estimated lower bound, $L$, can be zero or negative. This means that no suitable height function direction exists. If all of the incoming functions are positive or negative definite the returned std::optional is non-set.

      Definition at line 275 of file quadrature_generator.cc.

      @@ -386,7 +386,7 @@ std::pair< double, double > & value_bounds&#href_anchor"memdoc"> -

      Given the incoming lower and upper bounds on the value of a function $[L, U]$, return the minimum/maximum of $[L, U]$ and the function values at the vertices. That is, this function returns

      +

      Given the incoming lower and upper bounds on the value of a function $[L, U]$, return the minimum/maximum of $[L, U]$ and the function values at the vertices. That is, this function returns

      $[\min(L, L_f), \max(U, U_f)]$,

      where $L_f = \min_{v} f(x_v)$, $U_f = \max_{v} f(x_v)|$, and $x_v$ is a vertex.

      It is assumed that the incoming function is scalar valued.

      @@ -474,7 +474,7 @@
      -

      Return a lower bound, $L_a$, on the absolute value of a function, $f(x)$:

      +

      Return a lower bound, $L_a$, on the absolute value of a function, $f(x)$:

      $L_a \leq |f(x)|$,

      by estimating it from the incoming lower and upper bounds: $L \leq f(x) \leq U$.

      By rewriting the lower and upper bounds as $F - C \leq f(x) \leq F + C$, where $L = F - C$, $U = F + C$ (or $F = (U + L)/2$, $C = (U - L)/2$), we get $|f(x) - F| \leq C$. Using the inverse triangle inequality gives $|F| - |f(x)| \leq |f(x) - F| \leq C$. Thus, $L_a = |F| - C$.

      @@ -663,7 +663,7 @@ QPartitioning< dim > & q_partitioning&#href_anchor"memdoc"> -

      Let $\{ y_0, ..., y_{n+1} \}$ be such that $[y_0, y_{n+1}]$ is the interval and $\{ y_1, ..., y_n \}$ are the roots. In each subinterval, $[y_i, y_{i+1}]$, distribute point according to the 1D-quadrature rule $\{(x_q, w_q)\}_q$ (quadrature1D). Take the tensor product with the quadrature point $(x, w)$ (point, weight) to create dim-dimensional quadrature points

      +

      Let $\{ y_0, ..., y_{n+1} \}$ be such that $[y_0, y_{n+1}]$ is the interval and $\{ y_1, ..., y_n \}$ are the roots. In each subinterval, $[y_i, y_{i+1}]$, distribute point according to the 1D-quadrature rule $\{(x_q, w_q)\}_q$ (quadrature1D). Take the tensor product with the quadrature point $(x, w)$ (point, weight) to create dim-dimensional quadrature points

      \[
 X_q = x_I \times (y_i + (y_{i+1} - y_i) x_q),
 W_q = w_I (y_{i+1} - y_i) w_q,
@@ -748,7 +748,7 @@
           <td></td>
           <td class=const std_cxx17::optional< HeightDirectionData > & height_direction_data&#href_anchor"memdoc">

      Return the coordinate direction that the box should be split in, assuming that the box should be split it half.

      -

      If the box is larger in one coordante direction, this direction is returned. If the box have the same extent in all directions, we choose the coordinate direction which is closest to being a height-function direction. That is, the direction $i$ that has a least negative estimate of $|\partial_i \psi_j|$. As a last resort, we choose the direction 0, if height_direction_data non-set.

      +

      If the box is larger in one coordante direction, this direction is returned. If the box have the same extent in all directions, we choose the coordinate direction which is closest to being a height-function direction. That is, the direction $i$ that has a least negative estimate of $|\partial_i \psi_j|$. As a last resort, we choose the direction 0, if height_direction_data non-set.

      Definition at line 995 of file quadrature_generator.cc.

      /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceOpenCASCADE.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceOpenCASCADE.html 2024-01-30 03:04:50.024861187 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceOpenCASCADE.html 2024-01-30 03:04:50.028861221 +0000 @@ -558,7 +558,7 @@ const Mapping< 2, spacedim > & mapping = StaticMappingQ1<2,&#href_anchor"memdoc">

      Given a Triangulation and an optional Mapping, create a vector of smooth curves that interpolate the connected parts of the boundary vertices of the Triangulation and return them as a vector of TopoDS_Edge objects.

      -

      This function constructs closed Bspline curve objects passing through all vertices of the boundary of the triangulation, with $C^2$ Continuity on each vertex except the first, where only $C^1$ continuity is guaranteed.

      +

      This function constructs closed Bspline curve objects passing through all vertices of the boundary of the triangulation, with $C^2$ Continuity on each vertex except the first, where only $C^1$ continuity is guaranteed.

      The returned curves are ordered with respect to the indices of the faces that make up the triangulation boundary, i.e., the first curve is the one extracted starting from the face with the lowest index, and so on.

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceParticles_1_1Utilities.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceParticles_1_1Utilities.html 2024-01-30 03:04:50.048861387 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceParticles_1_1Utilities.html 2024-01-30 03:04:50.048861387 +0000 @@ -141,21 +141,21 @@
      const ComponentMask & space_comps = ComponentMask()&#href_anchor"memdoc">

      Create an interpolation sparsity pattern for particles.

      -

      Given a triangulation representing the domain $\Omega$, a particle handler of particles in $\Omega$, and a scalar finite element space $V(\Omega) = \text{span}\{v_j\}_{j=0}^n$, compute the sparsity pattern that would be necessary to assemble the matrix

      -\[
+<p>Given a triangulation representing the domain <picture><source srcset=$\Omega$, a particle handler of particles in $\Omega$, and a scalar finite element space $V(\Omega) = \text{span}\{v_j\}_{j=0}^n$, compute the sparsity pattern that would be necessary to assemble the matrix

      +\[
 M_{i,j} \dealcoloneq v_j(x_i) ,
-\] +\]" src="form_2424.png"/>

      -

      where $V(\Omega)$ is the finite element space associated with the space_dh, and the index i is given by the particle id whose position is x_i.

      +

      where $V(\Omega)$ is the finite element space associated with the space_dh, and the index i is given by the particle id whose position is x_i.

      In the case of vector valued finite element spaces, the components on which interpolation must be performed can be selected using a component mask. Only primitive finite element spaces are supported.

      When selecting more than one component, the resulting sparsity will have dimension equal to particle_handler.n_global_particles() * mask.n_selected_components() times space_dh.n_dofs(), and the corresponding matrix entries are given by

      -\[
+<picture><source srcset=\[
  M_{(i*n_comps+k),j} \dealcoloneq v_j(x_i) \cdot e_{comp_j},
-\] +\]" src="form_2425.png"/>

      -

      where comp_j is the only non zero component of the vector valued basis function v_j (equal to fe.system_to_component_index(j).first), k corresponds to its index within the selected components of the mask, and $e_{comp_j}$ is the unit vector in the direction comp_j.

      -

      The sparsity is filled by locating the position of the particle with index i within the particle handler with respect to the embedding triangulation $\Omega$, and coupling it with all the local degrees of freedom specified in the component mask space_comps, following the ordering in which they are selected in the mask space_comps.

      -

      If a particle does not fall within $\Omega$, it is ignored, and the corresponding rows of the sparsity will be empty.

      +

      where comp_j is the only non zero component of the vector valued basis function v_j (equal to fe.system_to_component_index(j).first), k corresponds to its index within the selected components of the mask, and $e_{comp_j}$ is the unit vector in the direction comp_j.

      +

      The sparsity is filled by locating the position of the particle with index i within the particle handler with respect to the embedding triangulation $\Omega$, and coupling it with all the local degrees of freedom specified in the component mask space_comps, following the ordering in which they are selected in the mask space_comps.

      +

      If a particle does not fall within $\Omega$, it is ignored, and the corresponding rows of the sparsity will be empty.

      Constraints of the form supported by the AffineConstraints class may be supplied with the constraints argument. The method AffineConstraints::add_entries_local_to_global() is used to fill the final sparsity pattern.

      Definition at line 32 of file utilities.cc.

      @@ -192,21 +192,21 @@
      const ComponentMask & space_comps = ComponentMask()&#href_anchor"memdoc">

      Create an interpolation matrix for particles.

      -

      Given a triangulation representing the domains $\Omega$, a particle handler of particles in $\Omega$, and a scalar finite element space $V(\Omega) = \text{span}\{v_j\}_{j=0}^n$, compute the matrix

      -\[
+<p>Given a triangulation representing the domains <picture><source srcset=$\Omega$, a particle handler of particles in $\Omega$, and a scalar finite element space $V(\Omega) = \text{span}\{v_j\}_{j=0}^n$, compute the matrix

      +\[
 M_{ij} \dealcoloneq v_j(x_i) ,
-\] +\]" src="form_2427.png"/>

      -

      where $V(\Omega)$ is the finite element space associated with the space_dh, and the index i is given by the particle id whose position is x_i.

      +

      where $V(\Omega)$ is the finite element space associated with the space_dh, and the index i is given by the particle id whose position is x_i.

      In the case of vector valued finite element spaces, the components on which interpolation must be performed can be selected using a component mask. Only primitive finite element spaces are supported.

      When selecting more than one component, the resulting sparsity will have dimension equal to particle_handler.n_global_particles() * mask.n_selected_components() times space_dh.n_dofs(), and the corresponding matrix entries are given by

      -\[
+<picture><source srcset=\[
  M_{(i*n_comps+k),j} \dealcoloneq v_j(x_i) \cdot e_{comp_j},
-\] +\]" src="form_2425.png"/>

      -

      where comp_j is the only non zero component of the vector valued basis function v_j (equal to fe.system_to_component_index(j).first), k corresponds to its index within the selected components of the mask, and $e_{comp_j}$ is the unit vector in the direction comp_j.

      -

      The matrix is filled by locating the position of the particle with index i within the particle handler with respect to the embedding triangulation $\Omega$, and coupling it with all the local degrees of freedom specified in the component mask space_comps, following the ordering in which they are selected in the mask space_comps.

      -

      If a particle does not fall within $\Omega$, it is ignored, and the corresponding rows of the matrix will be zero.

      +

      where comp_j is the only non zero component of the vector valued basis function v_j (equal to fe.system_to_component_index(j).first), k corresponds to its index within the selected components of the mask, and $e_{comp_j}$ is the unit vector in the direction comp_j.

      +

      The matrix is filled by locating the position of the particle with index i within the particle handler with respect to the embedding triangulation $\Omega$, and coupling it with all the local degrees of freedom specified in the component mask space_comps, following the ordering in which they are selected in the mask space_comps.

      +

      If a particle does not fall within $\Omega$, it is ignored, and the corresponding rows of the matrix will be zero.

      Constraints of the form supported by the AffineConstraints class may be supplied with the constraints argument. The method AffineConstraints::distribute_local_to_global() is used to distribute the entries of the matrix to respect the given constraints.

      Definition at line 114 of file utilities.cc.

      /usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Notation_1_1Kelvin.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Notation_1_1Kelvin.html 2024-01-30 03:04:50.080861654 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Notation_1_1Kelvin.html 2024-01-30 03:04:50.084861687 +0000 @@ -183,7 +183,7 @@ \end{array} \right] , \]" src="form_2465.png"/>

      -

      where $n$ denotes the Kelvin index for the tensor component, while for a general rank-2 tensor $\mathbf{T}$

      +

      where $n$ denotes the Kelvin index for the tensor component, while for a general rank-2 tensor $\mathbf{T}$

      \[
 \mathbf{T} \dealcoloneq \left[ \begin{array}{ccc}
  T_{00} & T_{01} & T_{02} \\
@@ -249,23 +249,23 @@
 </table>
 </td></tr>
 </table>
-<p>To illustrate the purpose of this notation, consider the rank-2 symmetric tensors <picture><source srcset=$\mathbf{S}$ and $\mathbf{E}$ that are related to one another by $\mathbf{S} = \cal{C} : \mathbf{E}$, where the operator $\cal{C}$ is a fourth-order symmetric tensor. As opposed to the commonly used Voigt notation, Kelvin (or Mandel) notation keeps the same definition of the inner product $\mathbf{S} : \mathbf{E}$ when both $\mathbf{S}$ and $\mathbf{E}$ are symmetric. In general, the inner product of all symmetric and general tensors remain the same regardless of the notation with which it is represented.

      +

      To illustrate the purpose of this notation, consider the rank-2 symmetric tensors $\mathbf{S}$ and $\mathbf{E}$ that are related to one another by $\mathbf{S} = \cal{C} : \mathbf{E}$, where the operator $\cal{C}$ is a fourth-order symmetric tensor. As opposed to the commonly used Voigt notation, Kelvin (or Mandel) notation keeps the same definition of the inner product $\mathbf{S} : \mathbf{E}$ when both $\mathbf{S}$ and $\mathbf{E}$ are symmetric. In general, the inner product of all symmetric and general tensors remain the same regardless of the notation with which it is represented.

      To achieve these two properties, namely that

      -\[
+<picture><source srcset=\[
 \mathbf{S} = \cal{C} : \mathbf{E}
 \quad \Rightarrow   \quad
 \tilde{\mathbf{S}} = \tilde{\cal{C}} \; \tilde{\mathbf{E}}
-\] +\]" src="form_2474.png"/>

      and

      -\[
+<picture><source srcset=\[
 \mathbf{S} : \mathbf{E}
 \, \equiv \,
 \tilde{\mathbf{S}} \cdot \tilde{\mathbf{E}} ,
-\] +\]" src="form_2475.png"/>

      -

      it holds that the Kelvin-condensed equivalents of the previously defined symmetric tensors, indicated by the $\tilde{\left(\bullet\right)}$, must be defined as

      -\[
+<p> it holds that the Kelvin-condensed equivalents of the previously defined symmetric tensors, indicated by the <picture><source srcset=$\tilde{\left(\bullet\right)}$, must be defined as

      +\[
 \tilde{\mathbf{S}}
   = \left[ \begin{array}{c}
   S_{00} \\ S_{11} \\ S_{22} \\ \sqrt{2} S_{12} \\ \sqrt{2} S_{02} \\
@@ -274,10 +274,10 @@
   = \left[ \begin{array}{c}
   E_{00} \\ E_{11} \\ E_{22} \\ \sqrt{2} E_{12} \\ \sqrt{2} E_{02} \\
 \sqrt{2} E_{01} \end{array}\right] .
-\] +\]" src="form_2477.png"/>

      The corresponding and consistent condensed fourth-order symmetric tensor is

      -\[
+<picture><source srcset=\[
 \tilde{\cal{C}}
   = \left[ \begin{array}{cccccc}
   \tilde{\cal{C}}_{00} & \tilde{\cal{C}}_{01} & \tilde{\cal{C}}_{02} &
@@ -312,10 +312,10 @@
 {\cal{C}}_{0201}        \\ \sqrt{2} {\cal{C}}_{0100}  & \sqrt{2}
 {\cal{C}}_{0111} & \sqrt{2} {\cal{C}}_{0122}  & 2 {\cal{C}}_{0112} & 2
 {\cal{C}}_{0102}         & 2 {\cal{C}}_{0101} \end{array}\right] .
-\] +\]" src="form_2478.png"/>

      -

      The mapping from the two Kelvin indices of the FullMatrix $\tilde{\cal{C}}$ to the rank-4 SymmetricTensor $\cal{C}$ can be inferred using the table shown above.

      -

      An important observation is that both the left-hand side tensor $\tilde{\mathbf{S}}$ and right-hand side tensor $\tilde{\mathbf{E}}$ have the same form; this is a property that is not present in Voigt notation. The various factors introduced into $\tilde{\mathbf{S}}$, $\tilde{\mathbf{E}}$ and $\tilde{\cal{C}}$ account for the symmetry of the tensors. The Kelvin description of their non-symmetric counterparts include no such factors.

      +

      The mapping from the two Kelvin indices of the FullMatrix $\tilde{\cal{C}}$ to the rank-4 SymmetricTensor $\cal{C}$ can be inferred using the table shown above.

      +

      An important observation is that both the left-hand side tensor $\tilde{\mathbf{S}}$ and right-hand side tensor $\tilde{\mathbf{E}}$ have the same form; this is a property that is not present in Voigt notation. The various factors introduced into $\tilde{\mathbf{S}}$, $\tilde{\mathbf{E}}$ and $\tilde{\cal{C}}$ account for the symmetry of the tensors. The Kelvin description of their non-symmetric counterparts include no such factors.

      Some useful references that show how this notation works include, amongst others,

      @article{Nagel2016,
      author = {Nagel, T. and G{\"o}rke, U-J. and Moerman, K. and Kolditz,
      O.},
      @@ -397,7 +397,7 @@

      Convert a rank-1 tensor to its compressed vector equivalent.

      -

      The output vector has $dim$ entries.

      +

      The output vector has $dim$ entries.

      @@ -502,7 +502,7 @@

      Convert a rank-1 tensor to its compressed matrix equivalent.

      -

      The output matrix will have $dim$ rows and one column.

      +

      The output matrix will have $dim$ rows and one column.

      @@ -523,7 +523,7 @@

      Convert a rank-2 tensor to its compressed matrix equivalent.

      -

      The output matrix will have $dim$ rows and $dim$ columns.

      +

      The output matrix will have $dim$ rows and $dim$ columns.

      @@ -544,7 +544,7 @@

      Convert a rank-2 symmetric tensor to its compressed matrix equivalent.

      -

      The output matrix will have $dim$ rows and $dim$ columns, with the same format as the equivalent function for non-symmetric tensors. This is because it is not possible to compress the SymmetricTensor<2,dim>::n_independent_components unique entries into a square matrix.

      +

      The output matrix will have $dim$ rows and $dim$ columns, with the same format as the equivalent function for non-symmetric tensors. This is because it is not possible to compress the SymmetricTensor<2,dim>::n_independent_components unique entries into a square matrix.

      @@ -580,7 +580,7 @@ -

      the matrix mtrx_1 will have $dim \times dim$ rows and $dim$ columns (i.e. size Tensor<2,dim>::n_independent_components $\times$ Tensor<1,dim>::n_independent_components), while those of the matrix mtrx_2 will have $dim$ rows and $(dim \times dim + dim)/2$ columns (i.e. size Tensor<1,dim>::n_independent_components $\times$ SymmetricTensor<2,dim>::n_independent_components), as it is assumed that the entries corresponding to the alternation of the second and third indices are equal. That is to say that r3_symm_tnsr[i][j][k] == r3_symm_tnsr[i][k][j].

      +

      the matrix mtrx_1 will have $dim \times dim$ rows and $dim$ columns (i.e. size Tensor<2,dim>::n_independent_components $\times$ Tensor<1,dim>::n_independent_components), while those of the matrix mtrx_2 will have $dim$ rows and $(dim \times dim + dim)/2$ columns (i.e. size Tensor<1,dim>::n_independent_components $\times$ SymmetricTensor<2,dim>::n_independent_components), as it is assumed that the entries corresponding to the alternation of the second and third indices are equal. That is to say that r3_symm_tnsr[i][j][k] == r3_symm_tnsr[i][k][j].

      /usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations.html 2024-01-30 03:04:50.108861887 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations.html 2024-01-30 03:04:50.108861887 +0000 @@ -123,7 +123,7 @@ &#href_anchor"details" id="details">

      Detailed Description

      A collection of operations to assist in the transformation of tensor quantities from the reference to spatial configuration, and vice versa. These types of transformation are typically used to re-express quantities measured or computed in one configuration in terms of a second configuration.

      Notation

      -

      We will use the same notation for the coordinates $\mathbf{X}, \mathbf{x}$, transformations $\varphi$, differential operator $\nabla_{0}$ and deformation gradient $\mathbf{F}$ as discussed for namespace Physics::Elasticity.

      +

      We will use the same notation for the coordinates $\mathbf{X}, \mathbf{x}$, transformations $\varphi$, differential operator $\nabla_{0}$ and deformation gradient $\mathbf{F}$ as discussed for namespace Physics::Elasticity.

      As a further point on notation, we will follow Holzapfel (2007) and denote the push forward transformation as $\chi\left(\bullet\right)$ and the pull back transformation as $\chi^{-1}\left(\bullet\right)$. We will also use the annotation $\left(\bullet\right)^{\sharp}$ to indicate that a tensor $\left(\bullet\right)$ is a contravariant tensor, and $\left(\bullet\right)^{\flat}$ that it is covariant. In other words, these indices do not actually change the tensor, they just indicate the kind of object a particular tensor is.

      Note
      For these transformations, unless otherwise stated, we will strictly assume that all indices of the transformed tensors derive from one coordinate system; that is to say that they are not multi-point tensors (such as the Piola stress in elasticity).

      Function Documentation

      @@ -144,24 +144,24 @@ const Tensor< 2, dim, Number > & F&#href_anchor"memdoc"> -

      Return the result of applying Nanson's formula for the transformation of the material surface area element $d\mathbf{A}$ to the current surfaces area element $d\mathbf{a}$ under the nonlinear transformation map $\mathbf{x} = \boldsymbol{\varphi} \left( \mathbf{X} \right)$.

      +

      Return the result of applying Nanson's formula for the transformation of the material surface area element $d\mathbf{A}$ to the current surfaces area element $d\mathbf{a}$ under the nonlinear transformation map $\mathbf{x} = \boldsymbol{\varphi} \left( \mathbf{X} \right)$.

      The returned result is the spatial normal scaled by the ratio of areas between the reference and spatial surface elements, i.e.

      -\[
+<picture><source srcset=\[
  \mathbf{n} \frac{da}{dA}
  \dealcoloneq \textrm{det} \mathbf{F} \, \mathbf{F}^{-T} \cdot \mathbf{N}
  = \textrm{cof} \mathbf{F} \cdot \mathbf{N} \, .
-\] +\]" src="form_2503.png"/>

      Parameters
      - - + +
      [in]NThe referential normal unit vector $\mathbf{N}$
      [in]FThe deformation gradient tensor $\mathbf{F} \left(
-\mathbf{X} \right)$
      [in]NThe referential normal unit vector $\mathbf{N}$
      [in]FThe deformation gradient tensor $\mathbf{F} \left(
+\mathbf{X} \right)$
      -
      Returns
      The scaled spatial normal vector $\mathbf{n}
-\frac{da}{dA}$
      +
      Returns
      The scaled spatial normal vector $\mathbf{n}
+\frac{da}{dA}$
      Note
      For a discussion of the background of this function, see G. A. Holzapfel: "Nonlinear solid mechanics. A Continuum Approach for Engineering" (2007), and in particular formula (2.55) on p. 75 (or thereabouts).
      For a discussion of the background of this function, see P. Wriggers: "Nonlinear finite element methods" (2008), and in particular formula (3.11) on p. 23 (or thereabouts).
      @@ -186,18 +186,18 @@ const Tensor< 2, dim, Number > & B&#href_anchor"memdoc">

      Return a vector with a changed basis, i.e.

      -\[
+<picture><source srcset=\[
  \mathbf{V}^{\prime} \dealcoloneq \mathbf{B} \cdot \mathbf{V}
-\] +\]" src="form_2506.png"/>

      Parameters
      - - + +
      [in]VThe vector to be transformed $\mathbf{V}$
      [in]BThe transformation matrix $\mathbf{B}$
      [in]VThe vector to be transformed $\mathbf{V}$
      [in]BThe transformation matrix $\mathbf{B}$
      -
      Returns
      $\mathbf{V}^{\prime}$
      +
      Returns
      $\mathbf{V}^{\prime}$
      @@ -219,19 +219,19 @@ const Tensor< 2, dim, Number > & B&#href_anchor"memdoc">

      Return a rank-2 tensor with a changed basis, i.e.

      -\[
+<picture><source srcset=\[
  \mathbf{T}^{\prime} \dealcoloneq \mathbf{B} \cdot \mathbf{T} \cdot
 \mathbf{B}^{T}
-\] +\]" src="form_2508.png"/>

      Parameters
      - +
      [in]TThe tensor to be transformed $\mathbf{T}$
      [in]BThe transformation matrix $\mathbf{B}$
      [in]BThe transformation matrix $\mathbf{B}$
      -
      Returns
      $\mathbf{T}^{\prime}$
      +
      Returns
      $\mathbf{T}^{\prime}$
      @@ -253,19 +253,19 @@ const Tensor< 2, dim, Number > & B&#href_anchor"memdoc">

      Return a symmetric rank-2 tensor with a changed basis, i.e.

      -\[
+<picture><source srcset=\[
  \mathbf{T}^{\prime} \dealcoloneq \mathbf{B} \cdot \mathbf{T} \cdot
 \mathbf{B}^{T}
-\] +\]" src="form_2508.png"/>

      Parameters
      - +
      [in]TThe tensor to be transformed $\mathbf{T}$
      [in]BThe transformation matrix $\mathbf{B}$
      [in]BThe transformation matrix $\mathbf{B}$
      -
      Returns
      $\mathbf{T}^{\prime}$
      +
      Returns
      $\mathbf{T}^{\prime}$
      @@ -287,18 +287,18 @@ const Tensor< 2, dim, Number > & B&#href_anchor"memdoc">

      Return a rank-4 tensor with a changed basis, i.e. (in index notation):

      -\[
+<picture><source srcset=\[
  H_{ijkl}^{\prime} \dealcoloneq B_{iI} B_{jJ} H_{IJKL} B_{kK} B_{lL}
-\] +\]" src="form_2510.png"/>

      Parameters
      - +
      [in]HThe tensor to be transformed $\mathbf{T}$
      [in]BThe transformation matrix $\mathbf{B}$
      [in]BThe transformation matrix $\mathbf{B}$
      -
      Returns
      $\mathbf{H}^{\prime}$
      +
      Returns
      $\mathbf{H}^{\prime}$
      @@ -320,18 +320,18 @@ const Tensor< 2, dim, Number > & B&#href_anchor"memdoc">

      Return a symmetric rank-4 tensor with a changed basis, i.e. (in index notation):

      -\[
+<picture><source srcset=\[
  H_{ijkl}^{\prime} \dealcoloneq B_{iI} B_{jJ} H_{IJKL} B_{kK} B_{lL}
-\] +\]" src="form_2510.png"/>

      Parameters
      - +
      [in]HThe tensor to be transformed $\mathbf{T}$
      [in]BThe transformation matrix $\mathbf{B}$
      [in]BThe transformation matrix $\mathbf{B}$
      -
      Returns
      $\mathbf{H}^{\prime}$
      +
      Returns
      $\mathbf{H}^{\prime}$
      /usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Contravariant.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Contravariant.html 2024-01-30 03:04:50.128862054 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Contravariant.html 2024-01-30 03:04:50.128862054 +0000 @@ -120,16 +120,16 @@ &#href_anchor"memitem:af70b1a5907ac2a88ab2a053dfb055dbe" id="r_af70b1a5907ac2a88ab2a053dfb055dbe">template<int dim, typename Number > SymmetricTensor< 4, dim, Number >&#href_anchor"memTemplItemRight" valign="bottom">pull_back (const SymmetricTensor< 4, dim, Number > &h, const Tensor< 2, dim, Number > &F) &#href_anchor"details" id="details">

      Detailed Description

      -

      Transformation of tensors that are defined in terms of a set of contravariant bases. Rank-1 and rank-2 contravariant tensors $\left(\bullet\right)^{\sharp} = \mathbf{T}$ (and its spatial counterpart $\mathbf{t}$) typically satisfy the relation

      -\[
+<div class=

      Transformation of tensors that are defined in terms of a set of contravariant bases. Rank-1 and rank-2 contravariant tensors $\left(\bullet\right)^{\sharp} = \mathbf{T}$ (and its spatial counterpart $\mathbf{t}$) typically satisfy the relation

      +\[
    \int_{V_{0}} \nabla_{0} \cdot \mathbf{T} \; dV
      = \int_{\partial V_{0}} \mathbf{T} \cdot \mathbf{N} \; dA
      = \int_{\partial V_{t}} \mathbf{T} \cdot \mathbf{n} \; da
      = \int_{V_{t}} \nabla \cdot \mathbf{t} \; dv
-\] +\]" src="form_2486.png"/>

      -

      where $V_{0}$ and $V_{t}$ are respectively control volumes in the reference and spatial configurations, and their surfaces $\partial
-V_{0}$ and $\partial V_{t}$ have the outwards facing normals $\mathbf{N}$ and $\mathbf{n}$.

      +

      where $V_{0}$ and $V_{t}$ are respectively control volumes in the reference and spatial configurations, and their surfaces $\partial
+V_{0}$ and $\partial V_{t}$ have the outwards facing normals $\mathbf{N}$ and $\mathbf{n}$.

      Function Documentation

      ◆ push_forward() [1/5]

      @@ -149,20 +149,20 @@ const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

      Return the result of the push forward transformation on a contravariant vector, i.e.

      -\[
+<picture><source srcset=\[
  \chi\left(\bullet\right)^{\sharp}
    \dealcoloneq \mathbf{F} \cdot \left(\bullet\right)^{\sharp}
-\] +\]" src="form_2517.png"/>

      Parameters
      - +
      [in]VThe (referential) vector to be operated on
      [in]FThe deformation gradient tensor $\mathbf{F} \left(
-\mathbf{X} \right)$
      [in]FThe deformation gradient tensor $\mathbf{F} \left(
+\mathbf{X} \right)$
      -
      Returns
      $\chi\left( \mathbf{V} \right)$
      +
      Returns
      $\chi\left( \mathbf{V} \right)$
      @@ -184,21 +184,21 @@ const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

      Return the result of the push forward transformation on a rank-2 contravariant tensor, i.e.

      -\[
+<picture><source srcset=\[
  \chi\left(\bullet\right)^{\sharp}
    \dealcoloneq \mathbf{F} \cdot \left(\bullet\right)^{\sharp} \cdot
 \mathbf{F}^{T}
-\] +\]" src="form_2519.png"/>

      Parameters
      - +
      [in]TThe (referential) rank-2 tensor to be operated on
      [in]FThe deformation gradient tensor $\mathbf{F} \left(
-\mathbf{X} \right)$
      [in]FThe deformation gradient tensor $\mathbf{F} \left(
+\mathbf{X} \right)$
      -
      Returns
      $\chi\left( \mathbf{T} \right)$
      +
      Returns
      $\chi\left( \mathbf{T} \right)$
      @@ -220,21 +220,21 @@ const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

      Return the result of the push forward transformation on a rank-2 contravariant symmetric tensor, i.e.

      -\[
+<picture><source srcset=\[
  \chi\left(\bullet\right)^{\sharp}
    \dealcoloneq \mathbf{F} \cdot \left(\bullet\right)^{\sharp} \cdot
 \mathbf{F}^{T}
-\] +\]" src="form_2519.png"/>

      Parameters
      - +
      [in]TThe (referential) rank-2 symmetric tensor to be operated on
      [in]FThe deformation gradient tensor $\mathbf{F} \left(
-\mathbf{X} \right)$
      [in]FThe deformation gradient tensor $\mathbf{F} \left(
+\mathbf{X} \right)$
      -
      Returns
      $\chi\left( \mathbf{T} \right)$
      +
      Returns
      $\chi\left( \mathbf{T} \right)$
      @@ -256,21 +256,21 @@ const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

      Return the result of the push forward transformation on a rank-4 contravariant tensor, i.e. (in index notation):

      -\[
+<picture><source srcset=\[
  \left[ \chi\left(\bullet\right)^{\sharp} \right]_{ijkl}
    \dealcoloneq F_{iI} F_{jJ}
    \left(\bullet\right)^{\sharp}_{IJKL} F_{kK} F_{lL}
-\] +\]" src="form_2521.png"/>

      Parameters
      - +
      [in]HThe (referential) rank-4 tensor to be operated on
      [in]FThe deformation gradient tensor $\mathbf{F} \left(
-\mathbf{X} \right)$
      [in]FThe deformation gradient tensor $\mathbf{F} \left(
+\mathbf{X} \right)$
      -
      Returns
      $\chi\left( \mathbf{H} \right)$
      +
      Returns
      $\chi\left( \mathbf{H} \right)$
      @@ -292,21 +292,21 @@ const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

      Return the result of the push forward transformation on a rank-4 contravariant symmetric tensor, i.e. (in index notation):

      -\[
+<picture><source srcset=\[
  \left[ \chi\left(\bullet\right)^{\sharp} \right]_{ijkl}
    \dealcoloneq F_{iI} F_{jJ}
    \left(\bullet\right)^{\sharp}_{IJKL} F_{kK} F_{lL}
-\] +\]" src="form_2521.png"/>

      Parameters
      - +
      [in]HThe (referential) rank-4 symmetric tensor to be operated on
      [in]FThe deformation gradient tensor $\mathbf{F} \left(
-\mathbf{X} \right)$
      [in]FThe deformation gradient tensor $\mathbf{F} \left(
+\mathbf{X} \right)$
      -
      Returns
      $\chi\left( \mathbf{H} \right)$
      +
      Returns
      $\chi\left( \mathbf{H} \right)$
      @@ -328,20 +328,20 @@ const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

      Return the result of the pull back transformation on a contravariant vector, i.e.

      -\[
+<picture><source srcset=\[
  \chi^{-1}\left(\bullet\right)^{\sharp}
    \dealcoloneq \mathbf{F}^{-1} \cdot \left(\bullet\right)^{\sharp}
-\] +\]" src="form_2523.png"/>

      Parameters
      - +
      [in]vThe (spatial) vector to be operated on
      [in]FThe deformation gradient tensor $\mathbf{F} \left(
-\mathbf{X} \right)$
      [in]FThe deformation gradient tensor $\mathbf{F} \left(
+\mathbf{X} \right)$
      -
      Returns
      $\chi^{-1}\left( \mathbf{v} \right)$
      +
      Returns
      $\chi^{-1}\left( \mathbf{v} \right)$
      @@ -363,21 +363,21 @@ const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

      Return the result of the pull back transformation on a rank-2 contravariant tensor, i.e.

      -\[
+<picture><source srcset=\[
  \chi^{-1}\left(\bullet\right)^{\sharp}
    \dealcoloneq \mathbf{F}^{-1} \cdot \left(\bullet\right)^{\sharp}
    \cdot \mathbf{F}^{-T}
-\] +\]" src="form_2525.png"/>

      Parameters
      -
      [in]tThe (spatial) tensor to be operated on
      [in]FThe deformation gradient tensor $\mathbf{F} \left(
/usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Covariant.html differs (HTML document, ASCII text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Covariant.html	2024-01-30 03:04:50.148862220 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Covariant.html	2024-01-30 03:04:50.148862220 +0000
@@ -120,16 +120,16 @@
 <tr class=&#href_anchor"memitem:a138fff54a44ba86bc2d1a6200b148e90" id="r_a138fff54a44ba86bc2d1a6200b148e90">template<int dim, typename Number >
      SymmetricTensor< 4, dim, Number >&#href_anchor"memTemplItemRight" valign="bottom">pull_back (const SymmetricTensor< 4, dim, Number > &h, const Tensor< 2, dim, Number > &F)
      &#href_anchor"details" id="details">

      Detailed Description

      -

      Transformation of tensors that are defined in terms of a set of covariant basis vectors. Rank-1 and rank-2 covariant tensors $\left(\bullet\right)^{\flat} = \mathbf{T}$ (and its spatial counterpart $\mathbf{t}$) typically satisfy the relation

      -\[
+<div class=

      Transformation of tensors that are defined in terms of a set of covariant basis vectors. Rank-1 and rank-2 covariant tensors $\left(\bullet\right)^{\flat} = \mathbf{T}$ (and its spatial counterpart $\mathbf{t}$) typically satisfy the relation

      +\[
    \int_{\partial V_{0}} \left[ \nabla_{0} \times \mathbf{T} \right]
 \cdot \mathbf{N} \; dA = \oint_{\partial A_{0}} \mathbf{T} \cdot
 \mathbf{L} \; dL = \oint_{\partial A_{t}} \mathbf{t} \cdot \mathbf{l} \;
 dl = \int_{\partial V_{t}} \left[ \nabla \times \mathbf{t} \right] \cdot
 \mathbf{n} \; da
-\] +\]" src="form_2494.png"/>

      -

      where the control surfaces $\partial V_{0}$ and $\partial V_{t}$ with outwards facing normals $\mathbf{N}$ and $\mathbf{n}$ are bounded by the curves $\partial A_{0}$ and $\partial A_{t}$ that are, respectively, associated with the line directors $\mathbf{L}$ and $\mathbf{l}$.

      +

      where the control surfaces $\partial V_{0}$ and $\partial V_{t}$ with outwards facing normals $\mathbf{N}$ and $\mathbf{n}$ are bounded by the curves $\partial A_{0}$ and $\partial A_{t}$ that are, respectively, associated with the line directors $\mathbf{L}$ and $\mathbf{l}$.

      Function Documentation

      ◆ push_forward() [1/5]

      @@ -149,20 +149,20 @@
      const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

      Return the result of the push forward transformation on a covariant vector, i.e.

      -\[
+<picture><source srcset=\[
  \chi\left(\bullet\right)^{\flat}
    \dealcoloneq \mathbf{F}^{-T} \cdot \left(\bullet\right)^{\flat}
-\] +\]" src="form_2530.png"/>

      Parameters
      - +
      [in]VThe (referential) vector to be operated on
      [in]FThe deformation gradient tensor $\mathbf{F} \left(
-\mathbf{X} \right)$
      [in]FThe deformation gradient tensor $\mathbf{F} \left(
+\mathbf{X} \right)$
      -
      Returns
      $\chi\left( \mathbf{V} \right)$
      +
      Returns
      $\chi\left( \mathbf{V} \right)$
      @@ -184,21 +184,21 @@
      const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

      Return the result of the push forward transformation on a rank-2 covariant tensor, i.e.

      -\[
+<picture><source srcset=\[
  \chi\left(\bullet\right)^{\flat}
    \dealcoloneq \mathbf{F}^{-T} \cdot \left(\bullet\right)^{\flat}
    \cdot \mathbf{F}^{-1}
-\] +\]" src="form_2531.png"/>

      Parameters
      - +
      [in]TThe (referential) rank-2 tensor to be operated on
      [in]FThe deformation gradient tensor $\mathbf{F} \left(
-\mathbf{X} \right)$
      [in]FThe deformation gradient tensor $\mathbf{F} \left(
+\mathbf{X} \right)$
      -
      Returns
      $\chi\left( \mathbf{T} \right)$
      +
      Returns
      $\chi\left( \mathbf{T} \right)$
      @@ -220,21 +220,21 @@
      const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

      Return the result of the push forward transformation on a rank-2 covariant symmetric tensor, i.e.

      -\[
+<picture><source srcset=\[
  \chi\left(\bullet\right)^{\flat}
    \dealcoloneq \mathbf{F}^{-T} \cdot \left(\bullet\right)^{\flat}
    \cdot \mathbf{F}^{-1}
-\] +\]" src="form_2531.png"/>

      Parameters
      - +
      [in]TThe (referential) rank-2 symmetric tensor to be operated on
      [in]FThe deformation gradient tensor $\mathbf{F} \left(
-\mathbf{X} \right)$
      [in]FThe deformation gradient tensor $\mathbf{F} \left(
+\mathbf{X} \right)$
      -
      Returns
      $\chi\left( \mathbf{T} \right)$
      +
      Returns
      $\chi\left( \mathbf{T} \right)$
      @@ -256,21 +256,21 @@
      const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

      Return the result of the push forward transformation on a rank-4 covariant tensor, i.e. (in index notation):

      -\[
+<picture><source srcset=\[
  \left[ \chi\left(\bullet\right)^{\flat} \right]_{ijkl}
    \dealcoloneq F^{-T}_{iI} F^{-T}_{jJ}
    \left(\bullet\right)^{\flat}_{IJKL} F^{-T}_{kK} F^{-T}_{lL}
-\] +\]" src="form_2532.png"/>

      Parameters
      - +
      [in]HThe (referential) rank-4 tensor to be operated on
      [in]FThe deformation gradient tensor $\mathbf{F} \left(
-\mathbf{X} \right)$
      [in]FThe deformation gradient tensor $\mathbf{F} \left(
+\mathbf{X} \right)$
      -
      Returns
      $\chi\left( \mathbf{H} \right)$
      +
      Returns
      $\chi\left( \mathbf{H} \right)$
      @@ -292,21 +292,21 @@
      const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

      Return the result of the push forward transformation on a rank-4 covariant symmetric tensor, i.e. (in index notation):

      -\[
+<picture><source srcset=\[
  \left[ \chi\left(\bullet\right)^{\flat} \right]_{ijkl}
    \dealcoloneq F^{-T}_{iI} F^{-T}_{jJ}
    \left(\bullet\right)^{\flat}_{IJKL} F^{-T}_{kK} F^{-T}_{lL}
-\] +\]" src="form_2532.png"/>

      Parameters
      - +
      [in]HThe (referential) rank-4 symmetric tensor to be operated on
      [in]FThe deformation gradient tensor $\mathbf{F} \left(
-\mathbf{X} \right)$
      [in]FThe deformation gradient tensor $\mathbf{F} \left(
+\mathbf{X} \right)$
      -
      Returns
      $\chi\left( \mathbf{H} \right)$
      +
      Returns
      $\chi\left( \mathbf{H} \right)$
      @@ -328,20 +328,20 @@
      const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

      Return the result of the pull back transformation on a covariant vector, i.e.

      -\[
+<picture><source srcset=\[
  \chi^{-1}\left(\bullet\right)^{\flat}
    \dealcoloneq \mathbf{F}^{T} \cdot \left(\bullet\right)^{\flat}
-\] +\]" src="form_2533.png"/>

      Parameters
      - +
      [in]vThe (spatial) vector to be operated on
      [in]FThe deformation gradient tensor $\mathbf{F} \left(
-\mathbf{X} \right)$
      [in]FThe deformation gradient tensor $\mathbf{F} \left(
+\mathbf{X} \right)$
      -
      Returns
      $\chi^{-1}\left( \mathbf{v} \right)$
      +
      Returns
      $\chi^{-1}\left( \mathbf{v} \right)$
      @@ -363,21 +363,21 @@
      const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

      Return the result of the pull back transformation on a rank-2 covariant tensor, i.e.

      -\[
+<picture><source srcset=\[
  \chi^{-1}\left(\bullet\right)^{\flat}
    \dealcoloneq \mathbf{F}^{T} \cdot \left(\bullet\right)^{\flat} \cdot
 \mathbf{F}
-\] +\]" src="form_2534.png"/>

      Parameters
      - /usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Piola.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Piola.html 2024-01-30 03:04:50.172862420 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Piola.html 2024-01-30 03:04:50.172862420 +0000 @@ -140,22 +140,22 @@
      [in]tThe (spatial) tensor to be operated on
      [in]FThe deformation gradient tensor $\mathbf{F} \left(
-\mathbf{X} \right)$
      const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

      Return the result of the push forward transformation on a contravariant vector, i.e.

      -\[
+<picture><source srcset=\[
  \textrm{det} \mathbf{F}^{-1} \; \chi\left(\bullet\right)^{\sharp}
  \dealcoloneq \frac{1}{\textrm{det} \mathbf{F}} \; \mathbf{F} \cdot
  \left(\bullet\right)^{\sharp}
-\] +\]" src="form_2537.png"/>

      Parameters
      - +
      [in]VThe (referential) vector to be operated on
      [in]FThe deformation gradient tensor $\mathbf{F} \left(
-\mathbf{X} \right)$
      [in]FThe deformation gradient tensor $\mathbf{F} \left(
+\mathbf{X} \right)$
      -
      Returns
      $\frac{1}{\textrm{det} \mathbf{F}} \; \chi\left(
-\mathbf{V} \right)$
      +
      Returns
      $\frac{1}{\textrm{det} \mathbf{F}} \; \chi\left(
+\mathbf{V} \right)$
      @@ -177,22 +177,22 @@
      const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

      Return the result of the push forward transformation on a rank-2 contravariant tensor, i.e.

      -\[
+<picture><source srcset=\[
  \textrm{det} \mathbf{F}^{-1} \; \chi\left(\bullet\right)^{\sharp}
    \dealcoloneq \frac{1}{\textrm{det} \mathbf{F}} \; \mathbf{F} \cdot
 \left(\bullet\right)^{\sharp} \cdot \mathbf{F}^{T}
-\] +\]" src="form_2539.png"/>

      Parameters
      - +
      [in]TThe (referential) rank-2 tensor to be operated on
      [in]FThe deformation gradient tensor $\mathbf{F} \left(
-\mathbf{X} \right)$
      [in]FThe deformation gradient tensor $\mathbf{F} \left(
+\mathbf{X} \right)$
      -
      Returns
      $\frac{1}{\textrm{det} \mathbf{F}} \; \chi\left(
-\mathbf{T} \right)$
      +
      Returns
      $\frac{1}{\textrm{det} \mathbf{F}} \; \chi\left(
+\mathbf{T} \right)$
      @@ -214,22 +214,22 @@
      const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

      Return the result of the push forward transformation on a rank-2 contravariant symmetric tensor, i.e.

      -\[
+<picture><source srcset=\[
  \textrm{det} \mathbf{F}^{-1} \; \chi\left(\bullet\right)^{\sharp}
    \dealcoloneq \frac{1}{\textrm{det} \mathbf{F}} \; \mathbf{F} \cdot
 \left(\bullet\right)^{\sharp} \cdot \mathbf{F}^{T}
-\] +\]" src="form_2539.png"/>

      Parameters
      - +
      [in]TThe (referential) rank-2 symmetric tensor to be operated on
      [in]FThe deformation gradient tensor $\mathbf{F} \left(
-\mathbf{X} \right)$
      [in]FThe deformation gradient tensor $\mathbf{F} \left(
+\mathbf{X} \right)$
      -
      Returns
      $\frac{1}{\textrm{det} \mathbf{F}} \; \chi\left(
-\mathbf{T} \right)$
      +
      Returns
      $\frac{1}{\textrm{det} \mathbf{F}} \; \chi\left(
+\mathbf{T} \right)$
      @@ -251,23 +251,23 @@
      const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

      Return the result of the push forward transformation on a rank-4 contravariant tensor, i.e. (in index notation):

      -\[
+<picture><source srcset=\[
  \textrm{det} \mathbf{F}^{-1} \; \left[
 \chi\left(\bullet\right)^{\sharp} \right]_{ijkl}
    \dealcoloneq \frac{1}{\textrm{det} \mathbf{F}} \; F_{iI} F_{jJ}
 \left(\bullet\right)^{\sharp}_{IJKL} F_{kK} F_{lL}
-\] +\]" src="form_2541.png"/>

      Parameters
      - +
      [in]HThe (referential) rank-4 tensor to be operated on
      [in]FThe deformation gradient tensor $\mathbf{F} \left(
-\mathbf{X} \right)$
      [in]FThe deformation gradient tensor $\mathbf{F} \left(
+\mathbf{X} \right)$
      -
      Returns
      $\frac{1}{\textrm{det} \mathbf{F}} \; \chi\left(
-\mathbf{H} \right)$
      +
      Returns
      $\frac{1}{\textrm{det} \mathbf{F}} \; \chi\left(
+\mathbf{H} \right)$
      @@ -289,23 +289,23 @@
      const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

      Return the result of the push forward transformation on a rank-4 contravariant symmetric tensor, i.e. (in index notation):

      -\[
+<picture><source srcset=\[
  \textrm{det} \mathbf{F}^{-1} \; \left[
 \chi\left(\bullet\right)^{\sharp} \right]_{ijkl}
    \dealcoloneq \frac{1}{\textrm{det} \mathbf{F}} \; F_{iI} F_{jJ}
 \left(\bullet\right)^{\sharp}_{IJKL} F_{kK} F_{lL}
-\] +\]" src="form_2541.png"/>

      Parameters
      - +
      [in]HThe (referential) rank-4 symmetric tensor to be operated on
      [in]FThe deformation gradient tensor $\mathbf{F} \left(
-\mathbf{X} \right)$
      [in]FThe deformation gradient tensor $\mathbf{F} \left(
+\mathbf{X} \right)$
      -
      Returns
      $\frac{1}{\textrm{det} \mathbf{F}} \; \chi\left(
-\mathbf{H} \right)$
      +
      Returns
      $\frac{1}{\textrm{det} \mathbf{F}} \; \chi\left(
+\mathbf{H} \right)$
      @@ -327,22 +327,22 @@
      const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

      Return the result of the pull back transformation on a contravariant vector, i.e.

      -\[
+<picture><source srcset=\[
  \textrm{det} \mathbf{F} \; \chi^{-1}\left(\bullet\right)^{\sharp}
    \dealcoloneq \textrm{det} \mathbf{F} \; \mathbf{F}^{-1} \cdot
 \left(\bullet\right)^{\sharp}
-\] +\]" src="form_2543.png"/>

      Parameters
      - +
      [in]vThe (spatial) vector to be operated on
      [in]FThe deformation gradient tensor $\mathbf{F} \left(
-\mathbf{X} \right)$
      [in]FThe deformation gradient tensor $\mathbf{F} \left(
+\mathbf{X} \right)$
      -
      Returns
      $\textrm{det} \mathbf{F} \; \chi^{-1}\left( \mathbf{v}
-\right)$
      +
      Returns
      $\textrm{det} \mathbf{F} \; \chi^{-1}\left( \mathbf{v}
+\right)$
      @@ -364,22 +364,22 @@
      const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

      Return the result of the pull back transformation on a rank-2 contravariant tensor, i.e.

      -\[
+<picture><source srcset=\[
  \textrm{det} \mathbf{F} \; \chi^{-1}\left(\bullet\right)^{\sharp}
    \dealcoloneq \textrm{det} \mathbf{F} \; \mathbf{F}^{-1} \cdot
 \left(\bullet\right)^{\sharp} \cdot \mathbf{F}^{-T}
-\] +\]" src="form_2545.png"/>

      Parameters
      - +
      [in]tThe (spatial) tensor to be operated on
      [in]FThe deformation gradient tensor $\mathbf{F} \left(
-\mathbf{X} \right)$
      [in]FThe deformation gradient tensor $\mathbf{F} \left(
+\mathbf{X} \right)$
      /usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Rotations.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Rotations.html 2024-01-30 03:04:50.188862553 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Rotations.html 2024-01-30 03:04:50.188862553 +0000 @@ -125,14 +125,14 @@

      Return the rotation matrix for 2-d Euclidean space, namely

      -\[
+<picture><source srcset=\[
  \mathbf{R} \dealcoloneq \left[ \begin{array}{cc}
  cos(\theta) & -sin(\theta) \\
  sin(\theta) & cos(\theta)
 \end{array}\right]
-\] +\]" src="form_2512.png"/>

      -

      where $\theta$ is the rotation angle given in radians. In particular, this describes the counter-clockwise rotation of a vector relative to a fixed set of right-handed axes.

      +

      where $\theta$ is the rotation angle given in radians. In particular, this describes the counter-clockwise rotation of a vector relative to a fixed set of right-handed axes.

      Parameters
      @@ -160,12 +160,12 @@
      [in]angleThe rotation angle (about the z-axis) in radians
      const Number & angle&#href_anchor"memdoc">

      Return the rotation matrix for 3-d Euclidean space. Most concisely stated using the Rodrigues' rotation formula, this function returns the equivalent of

      -\[
+<picture><source srcset=\[
  \mathbf{R} \dealcoloneq cos(\theta)\mathbf{I} + sin(\theta)\mathbf{W}
              + (1-cos(\theta))\mathbf{u}\otimes\mathbf{u}
-\] +\]" src="form_2514.png"/>

      -

      where $\mathbf{u}$ is the axial vector (an axial vector) and $\theta$ is the rotation angle given in radians, $\mathbf{I}$ is the identity tensor and $\mathbf{W}$ is the skew symmetric tensor of $\mathbf{u}$.

      +

      where $\mathbf{u}$ is the axial vector (an axial vector) and $\theta$ is the rotation angle given in radians, $\mathbf{I}$ is the identity tensor and $\mathbf{W}$ is the skew symmetric tensor of $\mathbf{u}$.

      Note
      For a discussion of the background of this function, see P. Wriggers: "Nonlinear finite element methods" (2008), and in particular formula (9.194) on p. 374 (or thereabouts). This presents Rodrigues' rotation formula, but the implementation used in this function is described in this wikipedia link. In particular, this describes the counter-clockwise rotation of a vector in a plane with its normal. defined by the axis of rotation. An alternative implementation is discussed at this link, but is inconsistent (sign-wise) with the Rodrigues' rotation formula as it describes the rotation of a coordinate system.
      Parameters
      @@ -197,12 +197,12 @@

      Return the rotation matrix for 3-d Euclidean space. Most concisely stated using the Rodrigues' rotation formula, this function returns the equivalent of

      -\[
+<picture><source srcset=\[
  \mathbf{R} \dealcoloneq cos(\theta)\mathbf{I} + sin(\theta)\mathbf{W}
              + (1-cos(\theta))\mathbf{u}\otimes\mathbf{u}
-\] +\]" src="form_2514.png"/>

      -

      where $\mathbf{u}$ is the axial vector (an axial vector) and $\theta$ is the rotation angle given in radians, $\mathbf{I}$ is the identity tensor and $\mathbf{W}$ is the skew symmetric tensor of $\mathbf{u}$.

      +

      where $\mathbf{u}$ is the axial vector (an axial vector) and $\theta$ is the rotation angle given in radians, $\mathbf{I}$ is the identity tensor and $\mathbf{W}$ is the skew symmetric tensor of $\mathbf{u}$.

      Note
      For a discussion of the background of this function, see P. Wriggers: "Nonlinear finite element methods" (2008), and in particular formula (9.194) on p. 374 (or thereabouts). This presents Rodrigues' rotation formula, but the implementation used in this function is described in this wikipedia link. In particular, this describes the counter-clockwise rotation of a vector in a plane with its normal. defined by the axis of rotation. An alternative implementation is discussed at this link, but is inconsistent (sign-wise) with the Rodrigues' rotation formula as it describes the rotation of a coordinate system.
      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1VectorRelations.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1VectorRelations.html 2024-01-30 03:04:50.204862687 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1VectorRelations.html 2024-01-30 03:04:50.204862687 +0000 @@ -123,11 +123,11 @@
      const Tensor< 1, spacedim, Number > & b&#href_anchor"memdoc"> -

      Calculate the angle $\theta$ between two vectors a and b. The returned angle will be in the range $[0, \pi]$.

      +

      Calculate the angle $\theta$ between two vectors a and b. The returned angle will be in the range $[0, \pi]$.

      This function uses the geometric definition of the scalar product.

      -\[
+<picture><source srcset=\[
   \vec{a} \cdot \vec{b} = \|\vec{a}\| \|\vec{b}\| \cos(\theta)
-\] +\]" src="form_2550.png"/>

      @@ -154,21 +154,21 @@
      const Tensor< 1, spacedim, Number > & axis&#href_anchor"memdoc"> -

      Calculate the angle $\theta$ between two vectors a and b, where both vectors are located in a plane described by a normal vector axis.

      -

      The angle computed by this function corresponds to the rotation angle that would transform the vector a into the vector b around the vector axis. Thus, contrary to the function above, we get a signed angle which will be in the range $[-\pi, \pi]$.

      +

      Calculate the angle $\theta$ between two vectors a and b, where both vectors are located in a plane described by a normal vector axis.

      +

      The angle computed by this function corresponds to the rotation angle that would transform the vector a into the vector b around the vector axis. Thus, contrary to the function above, we get a signed angle which will be in the range $[-\pi, \pi]$.

      The vector axis needs to be a unit vector and be perpendicular to both vectors a and b.

      This function uses the geometric definitions of both the scalar and cross product.

      -\begin{align*}
+<picture><source srcset=\begin{align*}
   \vec{a} \cdot  \vec{b} &= \|\vec{a}\| \|\vec{b}\| \cos(\theta) \\
   \vec{a} \times \vec{b} &= \|\vec{a}\| \|\vec{b}\| \sin(\theta) \vec{n}
-\end{align*} +\end{align*}" src="form_2552.png"/>

      We can create the tangent of the angle using both products.

      -\[
+<picture><source srcset=\[
   \tan{\theta}
   = \frac{\sin(\theta)}{\cos(theta)}
   = \frac{(\vec{a} \times \vec{b}) \cdot \vec{n}}{\vec{a} \cdot \vec{b}}
-\] +\]" src="form_2553.png"/>

      Note
      Only applicable for three-dimensional vectors spacedim == 3.
      /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSLEPcWrappers.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSLEPcWrappers.html 2024-01-30 03:04:50.220862820 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSLEPcWrappers.html 2024-01-30 03:04:50.220862820 +0000 @@ -110,13 +110,13 @@
      &#href_anchor"memitem:">class  TransformationSpectrumFolding
      &#href_anchor"details" id="details">

      Detailed Description

      Base namespace for solver classes using the SLEPc solvers which are selected based on flags passed to the eigenvalue problem solver context. Derived classes set the right flags to set the right solver.

      -

      The SLEPc solvers are intended to be used for solving the generalized eigenspectrum problem $(A-\lambda B)x=0$, for $x\neq0$; where $A$ is a system matrix, $B$ is a mass matrix, and $\lambda, x$ are a set of eigenvalues and eigenvectors respectively. The emphasis is on methods and techniques appropriate for problems in which the associated matrices are sparse. Most of the methods offered by the SLEPc library are projection methods or other methods with similar properties; and wrappers are provided to interface to SLEPc solvers that handle both of these problem sets.

      +

      The SLEPc solvers are intended to be used for solving the generalized eigenspectrum problem $(A-\lambda B)x=0$, for $x\neq0$; where $A$ is a system matrix, $B$ is a mass matrix, and $\lambda, x$ are a set of eigenvalues and eigenvectors respectively. The emphasis is on methods and techniques appropriate for problems in which the associated matrices are sparse. Most of the methods offered by the SLEPc library are projection methods or other methods with similar properties; and wrappers are provided to interface to SLEPc solvers that handle both of these problem sets.

      SLEPcWrappers can be implemented in application codes in the following way:

      SolverControl solver_control (1000, 1e-9);
      SolverArnoldi system (solver_control, mpi_communicator);
      system.solve (A, B, lambda, x, size_of_spectrum);
      -

      for the generalized eigenvalue problem $Ax=B\lambda x$, where the variable const unsigned int size_of_spectrum tells SLEPc the number of eigenvector/eigenvalue pairs to solve for. Additional options and solver parameters can be passed to the SLEPc solvers before calling solve(). For example, if the matrices of the general eigenspectrum problem are not hermitian and the lower eigenvalues are wanted only, the following code can be implemented before calling solve():

      system.set_problem_type (EPS_NHEP);
      +

      for the generalized eigenvalue problem $Ax=B\lambda x$, where the variable const unsigned int size_of_spectrum tells SLEPc the number of eigenvector/eigenvalue pairs to solve for. Additional options and solver parameters can be passed to the SLEPc solvers before calling solve(). For example, if the matrices of the general eigenspectrum problem are not hermitian and the lower eigenvalues are wanted only, the following code can be implemented before calling solve():

      system.set_problem_type (EPS_NHEP);
      system.set_which_eigenpairs (EPS_SMALLEST_REAL);

      These options can also be set at the command line.

      See also step-36 for a hands-on example.

      /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSUNDIALS.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSUNDIALS.html 2024-01-30 03:04:50.236862953 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSUNDIALS.html 2024-01-30 03:04:50.236862953 +0000 @@ -142,7 +142,7 @@
      const VectorType & b,
      double tol)>

      Type of function objects to interface with SUNDIALS' linear solvers

      -

      This function type encapsulates the action of solving $P^{-1}Ax=P^{-1}b$. The LinearOperator op encapsulates the matrix vector product $Ax$ and the LinearOperator prec encapsulates the application of the preconditioner $P^{-1}z$. The user can specify function objects of this type to attach custom linear solver routines to SUNDIALS. The two LinearOperators op and prec are built internally by SUNDIALS based on user settings. The parameters are interpreted as follows:

      +

      This function type encapsulates the action of solving $P^{-1}Ax=P^{-1}b$. The LinearOperator op encapsulates the matrix vector product $Ax$ and the LinearOperator prec encapsulates the application of the preconditioner $P^{-1}z$. The user can specify function objects of this type to attach custom linear solver routines to SUNDIALS. The two LinearOperators op and prec are built internally by SUNDIALS based on user settings. The parameters are interpreted as follows:

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSmoothnessEstimator_1_1Fourier.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSmoothnessEstimator_1_1Fourier.html 2024-01-30 03:04:50.256863120 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSmoothnessEstimator_1_1Fourier.html 2024-01-30 03:04:50.256863120 +0000 @@ -109,19 +109,19 @@
      [in]opA LinearOperator that applies the matrix vector product

      Detailed Description

      Smoothness estimation strategy based on the decay of Fourier expansion coefficients.

      -

      From the definition, we can write our Fourier series expansion $a_{\bf k}$ of the finite element solution on cell $K$ with polynomial degree $p$ as a matrix product

      -\begin{eqnarray*}
+<p>From the definition, we can write our <a class=Fourier series expansion $a_{\bf k}$ of the finite element solution on cell $K$ with polynomial degree $p$ as a matrix product

      +\begin{eqnarray*}
    u_h({\bf x}) &=& \sum_j u_j \varphi_j ({\bf x}) \\
    u_{h, {\bf k}}({\bf x}) &=&
      \sum_{{\bf k}, \|{\bf k}\|\le p} a_{\bf k} \phi_{\bf k}({\bf x}),
      \quad a_{\bf k} = \sum_j {\cal F}_{{\bf k},j} u_j
-\end{eqnarray*} +\end{eqnarray*}" src="form_2228.png"/>

      -

      with $u_j$ the degrees of freedom and $\varphi_j$ the corresponding shape functions. $\{\phi_{\bf k}({\bf x}) = \exp(i \, 2 \pi \, {\bf k} \cdot
-{\bf x}) \}$ are exponential functions on cell $K$. $a_{\bf k}$ and ${\cal
-F}_{{\bf k},j}$ are coefficients and transformation matrices from the Fourier expansion of each shape function. For practical reasons, we will perform the calculation of these matrices and coefficients only on the reference cell $\hat K$. We only have to calculate the transformation matrices once this way. However, results are only applicable if mapping from the reference cell to the actual cell is linear. We use the class FESeries::Fourier to determine all coefficients $a_{\bf k}$.

      -

      If the finite element approximation on cell $K$ is part of the Hilbert space $H^s(K)$, then the following integral must exist for both the finite element and spectral representation of our solution

      -\begin{eqnarray*}
+<p> with <picture><source srcset=$u_j$ the degrees of freedom and $\varphi_j$ the corresponding shape functions. $\{\phi_{\bf k}({\bf x}) = \exp(i \, 2 \pi \, {\bf k} \cdot
+{\bf x}) \}$ are exponential functions on cell $K$. $a_{\bf k}$ and ${\cal
+F}_{{\bf k},j}$ are coefficients and transformation matrices from the Fourier expansion of each shape function. For practical reasons, we will perform the calculation of these matrices and coefficients only on the reference cell $\hat K$. We only have to calculate the transformation matrices once this way. However, results are only applicable if mapping from the reference cell to the actual cell is linear. We use the class FESeries::Fourier to determine all coefficients $a_{\bf k}$.

      +

      If the finite element approximation on cell $K$ is part of the Hilbert space $H^s(K)$, then the following integral must exist for both the finite element and spectral representation of our solution

      +\begin{eqnarray*}
   \| \nabla^s u_h({\bf x}) \|_{L^2(K)}^2 &=&
     \int\limits_K \left| \nabla^s u_h({\bf x}) \right|^2 d{\bf x} <
     \infty \\
@@ -130,40 +130,40 @@
     a_{\bf k} \, \phi_{\bf k}({\bf x}) \right|^2 d{\bf x} =
     (2 \pi)^{2s} \sum\limits_{\bf k} \left| a_{\bf k} \right|^2
     \|{\bf k}\|_2^{2s} < \infty
-\end{eqnarray*} +\end{eqnarray*}" src="form_2232.png"/>

      The sum is finite only if the summands decay at least with order

      -\[
+<picture><source srcset=\[
   |a_{\bf k}|^2 \|{\bf k}\|_2^{2s} \|{\bf k}\|_2^{d - 1} =
     {\cal O}\left( \|{\bf k}\|_2^{-1-\epsilon} \right)
-\] +\]" src="form_2233.png"/>

      -

      for all $\epsilon > 0$. The additional factor stems from the fact that, since we sum over all multi-indices ${\bf k}$ that are located on a dim-dimensional sphere, we actually have, up to a constant, $\|{\bf k}\|_2^{d-1}$ modes located in each increment $\|{\bf k}\|_2 +
-d\|{\bf k}\|_2$ that need to be taken into account. By a comparison of exponents, we can rewrite this condition as

      -\[
+<p> for all <picture><source srcset=$\epsilon > 0$. The additional factor stems from the fact that, since we sum over all multi-indices ${\bf k}$ that are located on a dim-dimensional sphere, we actually have, up to a constant, $\|{\bf k}\|_2^{d-1}$ modes located in each increment $\|{\bf k}\|_2 +
+d\|{\bf k}\|_2$ that need to be taken into account. By a comparison of exponents, we can rewrite this condition as

      +\[
   |a_{\bf k}| = {\cal O}\left(\|{\bf k}\|_2^
     {-\left(s + \frac d2 + \epsilon \right)} \right)
-\] +\]" src="form_2238.png"/>

      -

      The next step is to estimate how fast these coefficients decay with $\|{\bf k}\|_2$. Thus, we perform a least-squares fit

      -\[
+<p>The next step is to estimate how fast these coefficients decay with <picture><source srcset=$\|{\bf k}\|_2$. Thus, we perform a least-squares fit

      +\[
    \min_{\alpha,\sigma}
    \frac 12 \sum_{{\bf k}, \|{\bf k}\|_2 \le p}
    \left( |a_{\bf k}| - \alpha \|{\bf k}\|_2^{-\sigma}\right)^2
-\] +\]" src="form_2240.png"/>

      -

      with regression coefficients $\alpha$ and $\sigma$. For simplification, we apply a logarithm on our minimization problem

      -\[
+<p> with regression coefficients <picture><source srcset=$\alpha$ and $\sigma$. For simplification, we apply a logarithm on our minimization problem

      +\[
    \min_{\beta,\sigma}
    Q(\beta,\sigma) =
    \frac 12 \sum_{{\bf k}, \|{\bf k}\|_2 \le p}
    \left( \ln |a_{\bf k}| - \beta + \sigma \ln \|{\bf k}\|_2
 \right)^2,
-\] +\]" src="form_2241.png"/>

      -

      where $\beta=\ln \alpha$. This is now a problem for which the optimality conditions $\frac{\partial Q}{\partial\beta}=0,
-\frac{\partial Q}{\partial\sigma}=0$, are linear in $\beta,\sigma$. We can write these conditions as follows:

      -\[
+<p> where <picture><source srcset=$\beta=\ln \alpha$. This is now a problem for which the optimality conditions $\frac{\partial Q}{\partial\beta}=0,
+\frac{\partial Q}{\partial\sigma}=0$, are linear in $\beta,\sigma$. We can write these conditions as follows:

      +\[
    \left(\begin{array}{cc}
    \sum_{{\bf k}, \|{\bf k}\|_2 \le p} 1 &
    \sum_{{\bf k}, \|{\bf k}\|_2 \le p} \ln \|{\bf k}\|_2
@@ -180,10 +180,10 @@
    \\
    \sum_{{\bf k}, \|{\bf k}\|_2\le p} \ln |a_{{\bf k}}| \ln \|{\bf
 k}\|_2 \end{array}\right)
-\] +\]" src="form_2245.png"/>

      -

      Solving for $\beta$ and $\sigma$ is just a linear regression fit and to do that we will use FESeries::linear_regression().

      -

      While we are not particularly interested in the actual value of $\beta$, the formula above gives us a means to calculate the value of the exponent $\sigma$ that we can then use to determine that $u(\hat{\bf x})$ is in $H^s(K)$ with $s=\sigma-\frac d2$. The decay rates $\sigma$ will suffice as our smoothness indicators and will be calculated on each cell for any provided solution.

      +

      Solving for $\beta$ and $\sigma$ is just a linear regression fit and to do that we will use FESeries::linear_regression().

      +

      While we are not particularly interested in the actual value of $\beta$, the formula above gives us a means to calculate the value of the exponent $\sigma$ that we can then use to determine that $u(\hat{\bf x})$ is in $H^s(K)$ with $s=\sigma-\frac d2$. The decay rates $\sigma$ will suffice as our smoothness indicators and will be calculated on each cell for any provided solution.

      Note
      An extensive demonstration of the use of these functions is provided in step-27.

      Function Documentation

      @@ -228,17 +228,17 @@
      const bool only_flagged_cells = false&#href_anchor"memdoc"> -

      In this variant of the estimation strategy for higher dimensions, we will consider all mode vectors $\bf k$ describing Fourier polynomials $P_{\bf k}$ and perform one least-squares fit over all coefficients at once. If there are multiple coefficients corresponding to the same absolute value of modes $\|\bf k\|_2$, we take the maximum among those. Thus, the least-squares fit is performed on

      -\[
+<p>In this variant of the estimation strategy for higher dimensions, we will consider all mode vectors <picture><source srcset=$\bf k$ describing Fourier polynomials $P_{\bf k}$ and perform one least-squares fit over all coefficients at once. If there are multiple coefficients corresponding to the same absolute value of modes $\|\bf k\|_2$, we take the maximum among those. Thus, the least-squares fit is performed on

      +\[
   \ln \left( \max\limits_{\|{\bf k}\|_2} |a_{\bf k}| \right) \sim
     C - \sigma \ln \|{\bf k}\|_2
-\] +\]" src="form_2259.png"/>

      -

      for ${\bf k}=(k_1,\ldots,k_d)$ and $k_i=0,\ldots,p$, with $p$ the polynomial degree of the finite element. We exclude the $\|{\bf k}\|_2=0$ modes to avoid the singularity of the logarithm.

      -

      The regression_strategy parameter determines which norm will be used on the subset of coefficients $\bf k$ with the same absolute value $\|{\bf k}\|_2$. Default is VectorTools::Linfty_norm for a maximum approximation.

      -

      For a provided solution vector solution defined on a DoFHandler dof_handler, this function returns a vector smoothness_indicators with as many elements as there are cells where each element contains the estimated regularity $\sigma$.

      +

      for ${\bf k}=(k_1,\ldots,k_d)$ and $k_i=0,\ldots,p$, with $p$ the polynomial degree of the finite element. We exclude the $\|{\bf k}\|_2=0$ modes to avoid the singularity of the logarithm.

      +

      The regression_strategy parameter determines which norm will be used on the subset of coefficients $\bf k$ with the same absolute value $\|{\bf k}\|_2$. Default is VectorTools::Linfty_norm for a maximum approximation.

      +

      For a provided solution vector solution defined on a DoFHandler dof_handler, this function returns a vector smoothness_indicators with as many elements as there are cells where each element contains the estimated regularity $\sigma$.

      A series expansion object fe_fourier has to be supplied, which needs to be constructed with the same FECollection object as the dof_handler.

      -

      The parameter smallest_abs_coefficient allows to ignore small (in absolute value) coefficients within the linear regression fit. In case there are less than two nonzero coefficients for a coordinate direction, this direction will be skipped. If all coefficients are zero, the returned value for this cell will be $\sigma=\infty$.

      +

      The parameter smallest_abs_coefficient allows to ignore small (in absolute value) coefficients within the linear regression fit. In case there are less than two nonzero coefficients for a coordinate direction, this direction will be skipped. If all coefficients are zero, the returned value for this cell will be $\sigma=\infty$.

      Smoothness indicators are usually used to decide whether to perform h- or p-adaptation. So in most cases, we only need to calculate those indicators on cells flagged for refinement or coarsening. The parameter only_flagged_cells controls whether this particular subset or all cells will be considered. By default, all cells will be considered. When only flagged cells are supposed to be considered, smoothness indicators will only be set on those vector entries of flagged cells; the others will be set to a signaling NaN.

      Definition at line 370 of file smoothness_estimator.cc.

      @@ -287,11 +287,11 @@
      const bool only_flagged_cells = false&#href_anchor"memdoc"> -

      In this variant of the estimation strategy for higher dimensions, we only consider modes in each coordinate direction, i.e., only mode vectors $\bf k$ with one nonzero entry. We perform the least-squares fit in each coordinate direction separately and take the lowest decay rate $\sigma$ among them.

      -

      The coefficients_predicate parameter selects Fourier coefficients $a_j$, $j=0,\ldots,p$ for linear regression in each coordinate direction. The user is responsible for updating the vector of flags provided to this function. Note that its size is $p+1$, where $p$ is the polynomial degree of the FE basis on a given element. The default implementation will use all Fourier coefficients in each coordinate direction, i.e., set all the elements of the vector to true.

      -

      For a provided solution vector solution defined on a DoFHandler dof_handler, this function returns a vector smoothness_indicators with as many elements as there are cells where each element contains the estimated regularity $\sigma$.

      +

      In this variant of the estimation strategy for higher dimensions, we only consider modes in each coordinate direction, i.e., only mode vectors $\bf k$ with one nonzero entry. We perform the least-squares fit in each coordinate direction separately and take the lowest decay rate $\sigma$ among them.

      +

      The coefficients_predicate parameter selects Fourier coefficients $a_j$, $j=0,\ldots,p$ for linear regression in each coordinate direction. The user is responsible for updating the vector of flags provided to this function. Note that its size is $p+1$, where $p$ is the polynomial degree of the FE basis on a given element. The default implementation will use all Fourier coefficients in each coordinate direction, i.e., set all the elements of the vector to true.

      +

      For a provided solution vector solution defined on a DoFHandler dof_handler, this function returns a vector smoothness_indicators with as many elements as there are cells where each element contains the estimated regularity $\sigma$.

      A series expansion object fe_fourier has to be supplied, which needs to be constructed with the same FECollection object as the dof_handler.

      -

      The parameter smallest_abs_coefficient allows to ignore small (in absolute value) coefficients within the linear regression fit. In case there are fewer than two nonzero coefficients for a coordinate direction, this direction will be skipped. If all coefficients are zero, the returned value for this cell will be $\sigma=\infty$.

      +

      The parameter smallest_abs_coefficient allows to ignore small (in absolute value) coefficients within the linear regression fit. In case there are fewer than two nonzero coefficients for a coordinate direction, this direction will be skipped. If all coefficients are zero, the returned value for this cell will be $\sigma=\infty$.

      Smoothness indicators are usually used to decide whether to perform h- or p-adaptation. So in most cases, we only need to calculate those indicators on cells flagged for refinement or coarsening. The parameter only_flagged_cells controls whether this particular subset or all cells will be considered. By default, all cells will be considered. When only flagged cells are supposed to be considered, smoothness indicators will only be set on those vector entries of flagged cells; the others will be set to a signaling NaN.

      Definition at line 468 of file smoothness_estimator.cc.

      @@ -319,7 +319,7 @@

      Returns a FESeries::Fourier object for Fourier series expansions with the default configuration for smoothness estimation purposes.

      -

      For each finite element of the provided fe_collection, we use as many modes as its polynomial degree plus two. Further for each element, we use a 5-point Gaussian quarature iterated in each dimension by the maximal wave number, which is the number of modes decreased by one since we start with $k = 0$.

      +

      For each finite element of the provided fe_collection, we use as many modes as its polynomial degree plus two. Further for each element, we use a 5-point Gaussian quarature iterated in each dimension by the maximal wave number, which is the number of modes decreased by one since we start with $k = 0$.

      As the Fourier expansion can only be performed on scalar fields, this class does not operate on vector-valued finite elements and will therefore throw an assertion. However, each component of a finite element field can be treated as a scalar field, respectively, on which Fourier expansions are again possible. For this purpose, the optional parameter component defines which component of each FiniteElement will be used. The default value of component only applies to scalar FEs, in which case it indicates that the sole component is to be decomposed. For vector-valued FEs, a non-default value must be explicitly provided.

      Definition at line 577 of file smoothness_estimator.cc.

      /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSmoothnessEstimator_1_1Legendre.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSmoothnessEstimator_1_1Legendre.html 2024-01-30 03:04:50.276863287 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSmoothnessEstimator_1_1Legendre.html 2024-01-30 03:04:50.276863287 +0000 @@ -109,25 +109,25 @@

      Detailed Description

      Smoothness estimation strategy based on the decay of Legendre expansion coefficients.

      -

      In one dimension, the finite element solution on cell $K$ with polynomial degree $p$ can be written as

      -\begin{eqnarray*}
+<p>In one dimension, the finite element solution on cell <picture><source srcset=$K$ with polynomial degree $p$ can be written as

      +\begin{eqnarray*}
    u_h(x) &=& \sum_j u_j \varphi_j (x) \\
    u_{h, k}(x) &=& \sum_{k=0}^{p} a_k \widetilde P_k (x),
    \quad a_k = \sum_j {\cal L}_{k,j} u_j
-\end{eqnarray*} +\end{eqnarray*}" src="form_2217.png"/>

      -

      where $u_j$ are degrees of freedom and $\varphi_j$ are the corresponding shape functions. $\{\widetilde P_k(x)\}$ are Legendre polynomials on cell $K$. $a_k$ and ${\cal L}_{k,j}$ are coefficients and transformation matrices from the Legendre expansion of each shape function. For practical reasons, we will perform the calculation of these matrices and coefficients only on the reference cell $\hat K$. We only have to calculate the transformation matrices once this way. However, results are only applicable if the mapping from the reference cell to the actual cell is affine. We use the class FESeries::Legendre to determine all coefficients $a_k$.

      +

      where $u_j$ are degrees of freedom and $\varphi_j$ are the corresponding shape functions. $\{\widetilde P_k(x)\}$ are Legendre polynomials on cell $K$. $a_k$ and ${\cal L}_{k,j}$ are coefficients and transformation matrices from the Legendre expansion of each shape function. For practical reasons, we will perform the calculation of these matrices and coefficients only on the reference cell $\hat K$. We only have to calculate the transformation matrices once this way. However, results are only applicable if the mapping from the reference cell to the actual cell is affine. We use the class FESeries::Legendre to determine all coefficients $a_k$.

      A function is analytic, i.e., representable by a power series, if and only if their Legendre expansion coefficients decay as (see [eibner2007hp])

      -\[
+<picture><source srcset=\[
   |a_k| \sim c \, \exp(-\sigma k)
-\] +\]" src="form_2222.png"/>

      -

      We determine their decay rate $\sigma$ by performing the linear regression fit of

      -\[
+<p> We determine their decay rate <picture><source srcset=$\sigma$ by performing the linear regression fit of

      +\[
   \ln |a_k| \sim C - \sigma k
-\] +\]" src="form_2224.png"/>

      -

      for $k=0,\ldots,p$, with $p$ the polynomial degree of the finite element. The rate of the decay $\sigma$ can be used to estimate the smoothness. For example, one strategy to implement hp-refinement criteria is to perform p-refinement if $\sigma>1$ (see [mavriplis1994hp]).

      +

      for $k=0,\ldots,p$, with $p$ the polynomial degree of the finite element. The rate of the decay $\sigma$ can be used to estimate the smoothness. For example, one strategy to implement hp-refinement criteria is to perform p-refinement if $\sigma>1$ (see [mavriplis1994hp]).

      Function Documentation

      ◆ coefficient_decay()

      @@ -171,24 +171,24 @@
      const bool only_flagged_cells = false&#href_anchor"memdoc"> -

      In this variant of the estimation strategy for higher dimensions, we will consider all mode vectors $\bf k$ describing Legendre polynomials $\widetilde P_{\bf k}$ and perform one least-squares fit over all coefficients at once. If there are multiple coefficients corresponding to the same absolute value of modes $\|{\bf k}\|_1$, we take the maximum among those. Thus, the least-squares fit is performed on

      -\begin{eqnarray*}
+<p>In this variant of the estimation strategy for higher dimensions, we will consider all mode vectors <picture><source srcset=$\bf k$ describing Legendre polynomials $\widetilde P_{\bf k}$ and perform one least-squares fit over all coefficients at once. If there are multiple coefficients corresponding to the same absolute value of modes $\|{\bf k}\|_1$, we take the maximum among those. Thus, the least-squares fit is performed on

      +\begin{eqnarray*}
   \widetilde P_{\bf k}({\bf x}) &=&
     \widetilde P_{k_1} (x_1) \ldots \widetilde P_{k_d} (x_d) \\
   \ln \left( \max\limits_{\|{\bf k}\|_1} |a_{\bf k}| \right) &\sim&
     C - \sigma \|{\bf k}\|_1
-\end{eqnarray*} +\end{eqnarray*}" src="form_2251.png"/>

      -

      for ${\bf k}=(k_1,\ldots,k_d)$ and $k_i=0,\ldots,p$, with $p$ the polynomial degree of the finite element.

      +

      for ${\bf k}=(k_1,\ldots,k_d)$ and $k_i=0,\ldots,p$, with $p$ the polynomial degree of the finite element.

      For a finite element approximation solution, this function writes the decay rate for every cell into the output vector smoothness_indicators.

      Parameters
      - + - - + +
      [in]fe_legendreFESeries::Legendre object to calculate coefficients. This object needs to be initialized to have at least $p+1$ coefficients in each direction for every finite element in the collection, where $p$ is its polynomial degree.
      [in]fe_legendreFESeries::Legendre object to calculate coefficients. This object needs to be initialized to have at least $p+1$ coefficients in each direction for every finite element in the collection, where $p$ is its polynomial degree.
      [in]dof_handlerA DoFHandler.
      [in]solutionA solution vector.
      [out]smoothness_indicatorsA vector for smoothness indicators.
      [in]regression_strategyDetermines which norm will be used on the subset of coefficients $\mathbf{k}$ with the same absolute value $\|{\bf k}\|_1$. Default is VectorTools::Linfty_norm for a maximum approximation.
      [in]smallest_abs_coefficientThe smallest absolute value of the coefficient to be used in linear regression. Note that Legendre coefficients of some functions may have a repeating pattern of zero coefficients (i.e. for functions that are locally symmetric or antisymmetric about the midpoint of the element in any coordinate direction). Thus this parameters allows to ignore small (in absolute value) coefficients within the linear regression fit. In case there are less than two nonzero coefficients, the returned value for this cell will be $\sigma=\infty$.
      [in]regression_strategyDetermines which norm will be used on the subset of coefficients $\mathbf{k}$ with the same absolute value $\|{\bf k}\|_1$. Default is VectorTools::Linfty_norm for a maximum approximation.
      [in]smallest_abs_coefficientThe smallest absolute value of the coefficient to be used in linear regression. Note that Legendre coefficients of some functions may have a repeating pattern of zero coefficients (i.e. for functions that are locally symmetric or antisymmetric about the midpoint of the element in any coordinate direction). Thus this parameters allows to ignore small (in absolute value) coefficients within the linear regression fit. In case there are less than two nonzero coefficients, the returned value for this cell will be $\sigma=\infty$.
      [in]only_flagged_cellsSmoothness indicators are usually used to decide whether to perform h- or p-adaptation. So in most cases, we only need to calculate those indicators on cells flagged for refinement or coarsening. This parameter controls whether this particular subset or all cells will be considered. By default, all cells will be considered. When only flagged cells are supposed to be considered, smoothness indicators will only be set on those vector entries of flagged cells; the others will be set to a signaling NaN.
      @@ -241,16 +241,16 @@
      const bool only_flagged_cells = false&#href_anchor"memdoc"> -

      In this variant of the estimation strategy for higher dimensions, we only consider modes in each coordinate direction, i.e., only mode vectors $\bf k$ with one nonzero entry. We perform the least-squares fit in each coordinate direction separately and take the lowest decay rate $\sigma$ among them.

      +

      In this variant of the estimation strategy for higher dimensions, we only consider modes in each coordinate direction, i.e., only mode vectors $\bf k$ with one nonzero entry. We perform the least-squares fit in each coordinate direction separately and take the lowest decay rate $\sigma$ among them.

      For a finite element approximation solution, this function writes the decay rate for every cell into the output vector smoothness_indicators.

      Parameters
      - + - - + +
      [in]fe_legendreFESeries::Legendre object to calculate coefficients. This object needs to be initialized to have at least $p+1$ coefficients in each direction, where $p$ is the maximum polynomial degree to be used.
      [in]fe_legendreFESeries::Legendre object to calculate coefficients. This object needs to be initialized to have at least $p+1$ coefficients in each direction, where $p$ is the maximum polynomial degree to be used.
      [in]dof_handlerA DoFHandler
      [in]solutionA solution vector
      [out]smoothness_indicatorsA vector for smoothness indicators
      [in]coefficients_predicateA predicate to select Legendre coefficients $a_j$, $j=0,\ldots,p$ for linear regression in each coordinate direction. The user is responsible for updating the vector of flags provided to this function. Note that its size is $p+1$, where $p$ is the polynomial degree of the FE basis on a given element. The default implementation will use all Legendre coefficients in each coordinate direction, i.e. set all elements of the vector to true.
      [in]smallest_abs_coefficientThe smallest absolute value of the coefficient to be used in linear regression in each coordinate direction. Note that Legendre coefficients of some functions may have a repeating pattern of zero coefficients (i.e. for functions that are locally symmetric or antisymmetric about the midpoint of the element in any coordinate direction). Thus this parameters allows to ignore small (in absolute value) coefficients within the linear regression fit. In case there are less than two nonzero coefficients for a coordinate direction, this direction will be skipped. If all coefficients are zero, the returned value for this cell will be $\sigma=\infty$.
      [in]coefficients_predicateA predicate to select Legendre coefficients $a_j$, $j=0,\ldots,p$ for linear regression in each coordinate direction. The user is responsible for updating the vector of flags provided to this function. Note that its size is $p+1$, where $p$ is the polynomial degree of the FE basis on a given element. The default implementation will use all Legendre coefficients in each coordinate direction, i.e. set all elements of the vector to true.
      [in]smallest_abs_coefficientThe smallest absolute value of the coefficient to be used in linear regression in each coordinate direction. Note that Legendre coefficients of some functions may have a repeating pattern of zero coefficients (i.e. for functions that are locally symmetric or antisymmetric about the midpoint of the element in any coordinate direction). Thus this parameters allows to ignore small (in absolute value) coefficients within the linear regression fit. In case there are less than two nonzero coefficients for a coordinate direction, this direction will be skipped. If all coefficients are zero, the returned value for this cell will be $\sigma=\infty$.
      [in]only_flagged_cellsSmoothness indicators are usually used to decide whether to perform h- or p-adaptation. So in most cases, we only need to calculate those indicators on cells flagged for refinement or coarsening. This parameter controls whether this particular subset or all cells will be considered. By default, all cells will be considered. When only flagged cells are supposed to be considered, smoothness indicators will only be set on those vector entries of flagged cells; the others will be set to NaN.
      /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSparseMatrixTools.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSparseMatrixTools.html 2024-01-30 03:04:50.296863453 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSparseMatrixTools.html 2024-01-30 03:04:50.296863453 +0000 @@ -139,18 +139,18 @@
      SparsityPatternType2 & sparsity_pattern_out&#href_anchor"memdoc">

      Given a sparse matrix (system_matrix, sparsity_pattern), construct a new sparse matrix (system_matrix_out, sparsity_pattern_out) by restriction

      -\[
+<picture><source srcset=\[
  A_i = R_i A R_i^T,
-\] +\]" src="form_1926.png"/>

      -

      where the Boolean matrix $R_i$ is defined by the entries of requested_is.

      -

      The function can be called by multiple processes with different sets of indices, allowing to assign each process a different $A_i$.

      +

      where the Boolean matrix $R_i$ is defined by the entries of requested_is.

      +

      The function can be called by multiple processes with different sets of indices, allowing to assign each process a different $A_i$.

      Such a function is useful to implement Schwarz methods, where operations of type

      -\[
+<picture><source srcset=\[
  u^{n} = u^{n-1} + \sum_{i} R_i^T A_i^{-1} R_i (f - A u^{n-1})
-\] +\]" src="form_1928.png"/>

      -

      are performed to iteratively solve a system of type $Au=f$.

      +

      are performed to iteratively solve a system of type $Au=f$.

      Warning
      This is a collective call that needs to be executed by all processes in the communicator of sparse_matrix_in.
      /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceTensorAccessors.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceTensorAccessors.html 2024-01-30 03:04:50.316863620 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceTensorAccessors.html 2024-01-30 03:04:50.316863620 +0000 @@ -177,7 +177,7 @@
      Note
      This function returns an internal class object consisting of an array subscript operator operator[](unsigned int) and an alias value_type describing its return value.
      Template Parameters
      - +
      indexThe index to be shifted to the end. Indices are counted from 0, thus the valid range is $0\le\text{index}<\text{rank}$.
      indexThe index to be shifted to the end. Indices are counted from 0, thus the valid range is $0\le\text{index}<\text{rank}$.
      rankRank of the tensorial object t
      TA tensorial object of rank rank. T must provide a local alias value_type and an index operator operator[]() that returns a (const or non-const) reference of value_type.
      @@ -261,12 +261,12 @@

      This function contracts two tensorial objects left and right and stores the result in result. The contraction is done over the last no_contr indices of both tensorial objects:

      -\[
+<picture><source srcset=\[
   \text{result}_{i_1,..,i_{r1},j_1,..,j_{r2}}
   = \sum_{k_1,..,k_{\mathrm{no\_contr}}}
     \mathrm{left}_{i_1,..,i_{r1},k_1,..,k_{\mathrm{no\_contr}}}
     \mathrm{right}_{j_1,..,j_{r2},k_1,..,k_{\mathrm{no\_contr}}}
-\] +\]" src="form_865.png"/>

      Calling this function is equivalent of writing the following low level code:

      for(unsigned int i_0 = 0; i_0 < dim; ++i_0)
      ...
      @@ -321,12 +321,12 @@

      Full contraction of three tensorial objects:

      -\[
+<picture><source srcset=\[
   \sum_{i_1,..,i_{r1},j_1,..,j_{r2}}
   \text{left}_{i_1,..,i_{r1}}
   \text{middle}_{i_1,..,i_{r1},j_1,..,j_{r2}}
   \text{right}_{j_1,..,j_{r2}}
-\] +\]" src="form_866.png"/>

      Calling this function is equivalent of writing the following low level code:

      T1 result = T1();
      for(unsigned int i_0 = 0; i_0 < dim; ++i_0)
      /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceUtilities_1_1LinearAlgebra.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceUtilities_1_1LinearAlgebra.html 2024-01-30 03:04:50.332863753 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceUtilities_1_1LinearAlgebra.html 2024-01-30 03:04:50.332863753 +0000 @@ -134,8 +134,8 @@

      Return the elements of a continuous Givens rotation matrix and the norm of the input vector.

      -

      That is for a given pair x and y, return $c$ , $s$ and $\sqrt{x^2+y^2}$ such that

      -\[
+<p>That is for a given pair <code>x</code> and <code>y</code>, return <picture><source srcset=$c$ , $s$ and $\sqrt{x^2+y^2}$ such that

      +\[
 \begin{bmatrix}
 c  & s \\
 -s & c
@@ -149,7 +149,7 @@
 \sqrt{x^2+y^2} \\
 0
 \end{bmatrix}
-\] +\]" src="form_1964.png"/>

      Note
      The function is implemented for real valued numbers only.
      @@ -176,8 +176,8 @@

      Return the elements of a hyperbolic rotation matrix.

      -

      That is for a given pair x and y, return $c$ , $s$ and $r$ such that

      -\[
+<p>That is for a given pair <code>x</code> and <code>y</code>, return <picture><source srcset=$c$ , $s$ and $r$ such that

      +\[
 \begin{bmatrix}
 c  & -s \\
 -s & c
@@ -191,9 +191,9 @@
 r \\
 0
 \end{bmatrix}
-\] +\]" src="form_1965.png"/>

      -

      Real valued solution only exists if $|x|>|g|$, the function will throw an error otherwise.

      +

      Real valued solution only exists if $|x|>|g|$, the function will throw an error otherwise.

      Note
      The function is implemented for real valued numbers only.
      @@ -230,7 +230,7 @@ std::vector< double > * eigenvalues = nullptr&#href_anchor"memdoc"> -

      Estimate an upper bound for the largest eigenvalue of H by a k -step Lanczos process starting from the initial vector v0. Typical values of k are below 10. This estimator computes a k-step Lanczos decomposition $H V_k=V_k T_k+f_k e_k^T$ where $V_k$ contains k Lanczos basis, $V_k^TV_k=I_k$, $T_k$ is the tridiagonal Lanczos matrix, $f_k$ is a residual vector $f_k^TV_k=0$, and $e_k$ is the k-th canonical basis of $R^k$. The returned value is $ ||T_k||_2 + ||f_k||_2$. If eigenvalues is not nullptr, the eigenvalues of $T_k$ will be written there.

      +

      Estimate an upper bound for the largest eigenvalue of H by a k -step Lanczos process starting from the initial vector v0. Typical values of k are below 10. This estimator computes a k-step Lanczos decomposition $H V_k=V_k T_k+f_k e_k^T$ where $V_k$ contains k Lanczos basis, $V_k^TV_k=I_k$, $T_k$ is the tridiagonal Lanczos matrix, $f_k$ is a residual vector $f_k^TV_k=0$, and $e_k$ is the k-th canonical basis of $R^k$. The returned value is $ ||T_k||_2 + ||f_k||_2$. If eigenvalues is not nullptr, the eigenvalues of $T_k$ will be written there.

      vector_memory is used to allocate memory for temporary vectors. OperatorType has to provide vmult operation with VectorType.

      This function implements the algorithm from

      @article{Zhou2006,
      Title = {Self-consistent-field Calculations Using Chebyshev-filtered
      @@ -242,7 +242,7 @@
      Volume = {219},
      Pages = {172--184},
      }
      -
      Note
      This function uses Lapack routines to compute the largest eigenvalue of $T_k$.
      +
      Note
      This function uses Lapack routines to compute the largest eigenvalue of $T_k$.
      This function provides an alternate estimate to that obtained from several steps of SolverCG with SolverCG<VectorType>::connect_eigenvalues_slot().
      @@ -285,19 +285,19 @@ VectorMemory< VectorType > & vector_memory&#href_anchor"memdoc"> -

      Apply Chebyshev polynomial of the operator H to x. For a non-defective operator $H$ with a complete set of eigenpairs $H \psi_i = \lambda_i \psi_i$, the action of a polynomial filter $p$ is given by $p(H)x =\sum_i a_i p(\lambda_i) \psi_i$, where $x=: \sum_i a_i
-\psi_i$. Thus by appropriately choosing the polynomial filter, one can alter the eigenmodes contained in $x$.

      -

      This function uses Chebyshev polynomials of first kind. Below is an example of polynomial $T_n(x)$ of degree $n=8$ normalized to unity at $-1.2$.

      +

      Apply Chebyshev polynomial of the operator H to x. For a non-defective operator $H$ with a complete set of eigenpairs $H \psi_i = \lambda_i \psi_i$, the action of a polynomial filter $p$ is given by $p(H)x =\sum_i a_i p(\lambda_i) \psi_i$, where $x=: \sum_i a_i
+\psi_i$. Thus by appropriately choosing the polynomial filter, one can alter the eigenmodes contained in $x$.

      +

      This function uses Chebyshev polynomials of first kind. Below is an example of polynomial $T_n(x)$ of degree $n=8$ normalized to unity at $-1.2$.

      -

      By introducing a linear mapping $L$ from unwanted_spectrum to $[-1,1]$, we can dump the corresponding modes in x. The higher the polynomial degree $n$, the more rapid it grows outside of the $[-1,1]$. In order to avoid numerical overflow, we normalize polynomial filter to unity at tau. Thus, the filtered operator is $p(H) = T_n(L(H))/T_n(L(\tau))$.

      -

      The action of the Chebyshev filter only requires evaluation of vmult() of H and is based on the recursion equation for Chebyshev polynomial of degree $n$: $T_{n}(x) = 2x T_{n-1}(x) - T_{n-2}(x)$ with $T_0(x)=1$ and $T_1(x)=x$.

      +

      By introducing a linear mapping $L$ from unwanted_spectrum to $[-1,1]$, we can dump the corresponding modes in x. The higher the polynomial degree $n$, the more rapid it grows outside of the $[-1,1]$. In order to avoid numerical overflow, we normalize polynomial filter to unity at tau. Thus, the filtered operator is $p(H) = T_n(L(H))/T_n(L(\tau))$.

      +

      The action of the Chebyshev filter only requires evaluation of vmult() of H and is based on the recursion equation for Chebyshev polynomial of degree $n$: $T_{n}(x) = 2x T_{n-1}(x) - T_{n-2}(x)$ with $T_0(x)=1$ and $T_1(x)=x$.

      vector_memory is used to allocate memory for temporary objects.

      -

      This function implements the algorithm (with a minor fix of sign of $\sigma_1$) from

      @article{Zhou2014,
      +

      This function implements the algorithm (with a minor fix of sign of $\sigma_1$) from

      @article{Zhou2014,
      Title = {Chebyshev-filtered subspace iteration method free of sparse
      diagonalization for solving the Kohn--Sham equation},
      Author = {Zhou, Yunkai and Chelikowsky, James R and Saad, Yousef},
      /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceUtilities_1_1MPI_1_1ConsensusAlgorithms.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceUtilities_1_1MPI_1_1ConsensusAlgorithms.html 2024-01-30 03:04:50.356863953 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceUtilities_1_1MPI_1_1ConsensusAlgorithms.html 2024-01-30 03:04:50.356863953 +0000 @@ -137,7 +137,7 @@

      Detailed Description

      A namespace for algorithms that implement the task of communicating in a dynamic-sparse way. In computer science, this is often called a consensus problem.

      -

      The problem consensus algorithms are trying to solve is this: Let's say you have $P$ processes that work together via MPI. Each (or at least some) of these want to send information to some of the other processes, or request information from other processes. No process knows which other process wants to communicate with them. The challenge is to determine who needs to talk to whom and what information needs to be sent, and to come up with an algorithm that ensures that this communication happens.

      +

      The problem consensus algorithms are trying to solve is this: Let's say you have $P$ processes that work together via MPI. Each (or at least some) of these want to send information to some of the other processes, or request information from other processes. No process knows which other process wants to communicate with them. The challenge is to determine who needs to talk to whom and what information needs to be sent, and to come up with an algorithm that ensures that this communication happens.

      That this is not a trivial problem can be seen by an analogy of the postal service. There, some senders may request information from some other participants in the postal service. So they send a letter that requests the information, but the recipients do not know how many such letters they need to expect (or that they should expect any at all). They also do not know how long they need to keep checking their mailbox for incoming requests. The recipients can be considered reliable, however: We can assume that everyone who is sent a request puts a letter with the answer in the mail. This time at least the recipients of these answers know that they are waiting for these answers because they have previously sent a request. They do not know in advance, however, when the answer will arrive and how long to wait. The goal of a consensus algorithm is then to come up with a strategy in which every participant can say who they want to send requests to, what that request is, and is then guaranteed an answer. The algorithm will only return when all requests by all participants have been answered and the answer delivered to the requesters.

      The problem is generally posed in terms of requests and answers. In practice, either of these two may be empty messages. For example, processes may simply want to send information to others that they know these others need; in this case, the "answer" message may be empty and its meaning is simply an affirmation that the information was received. Similarly, in some cases processes simply need to inform others that they want information, but the destination process knows what information is being requested (based on where in the program the request happens) and can send that information without there be any identifying information in the request; in that case, the request message may be empty and simply serve to identify the requester. (Each message can be queried for its sender.)

      As mentioned in the first paragraph, the algorithms we are interested in are "dynamic-sparse":

        /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceVectorTools.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceVectorTools.html 2024-01-30 03:04:50.472864920 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceVectorTools.html 2024-01-30 03:04:50.472864920 +0000 @@ -332,7 +332,7 @@

      • -

        Projection: compute the L2-projection of the given function onto the finite element space, i.e. if f is the function to be projected, compute fh in Vh such that (fh,vh)=(f,vh) for all discrete test functions vh. This is done through the solution of the linear system of equations M v = f where M is the mass matrix $m_{ij} = \int_\Omega \phi_i(x) \phi_j(x) dx$ and $f_i = \int_\Omega f(x) \phi_i(x) dx$. The solution vector $v$ then is the nodal representation of the projection fh. The project() functions are used in the step-21 and step-23 tutorial programs.

        +

        Projection: compute the L2-projection of the given function onto the finite element space, i.e. if f is the function to be projected, compute fh in Vh such that (fh,vh)=(f,vh) for all discrete test functions vh. This is done through the solution of the linear system of equations M v = f where M is the mass matrix $m_{ij} = \int_\Omega \phi_i(x) \phi_j(x) dx$ and $f_i = \int_\Omega f(x) \phi_i(x) dx$. The solution vector $v$ then is the nodal representation of the projection fh. The project() functions are used in the step-21 and step-23 tutorial programs.

        In order to get proper results, it be may necessary to treat boundary conditions right. Below are listed some cases where this may be needed. If needed, this is done by L2-projection of the trace of the given function onto the finite element space restricted to the boundary of the domain, then taking this information and using it to eliminate the boundary nodes from the mass matrix of the whole domain, using the MatrixTools::apply_boundary_values() function. The projection of the trace of the function to the boundary is done with the VectorTools::project_boundary_values() (see below) function, which is called with a map of boundary functions std::map<types::boundary_id, const Function<spacedim,number>*> in which all boundary indicators from zero to numbers::internal_face_boundary_id-1 (numbers::internal_face_boundary_id is used for other purposes, see the Triangulation class documentation) point to the function to be projected. The projection to the boundary takes place using a second quadrature formula on the boundary given to the project() function. The first quadrature formula is used to compute the right hand side and for numerical quadrature of the mass matrix.

        The projection of the boundary values first, then eliminating them from the global system of equations is not needed usually. It may be necessary if you want to enforce special restrictions on the boundary values of the projected function, for example in time dependent problems: you may want to project the initial values but need consistency with the boundary values for later times. Since the latter are projected onto the boundary in each time step, it is necessary that we also project the boundary values of the initial values, before projecting them to the whole domain.

        Obviously, the results of the two schemes for projection are different. Usually, when projecting to the boundary first, the L2-norm of the difference between original function and projection over the whole domain will be larger (factors of five have been observed) while the L2-norm of the error integrated over the boundary should of course be less. The reverse should also hold if no projection to the boundary is performed.

        @@ -342,17 +342,17 @@

      • -

        Creation of right hand side vectors: The create_right_hand_side() function computes the vector $f_i = \int_\Omega f(x) \phi_i(x) dx$. This is the same as what the MatrixCreator::create_* functions which take a right hand side do, but without assembling a matrix.

        +

        Creation of right hand side vectors: The create_right_hand_side() function computes the vector $f_i = \int_\Omega f(x) \phi_i(x) dx$. This is the same as what the MatrixCreator::create_* functions which take a right hand side do, but without assembling a matrix.

      • -

        Creation of right hand side vectors for point sources: The create_point_source_vector() function computes the vector $F_i =
-\int_\Omega \delta(x-x_0) \phi_i(x) dx$.

        +

        Creation of right hand side vectors for point sources: The create_point_source_vector() function computes the vector $F_i =
+\int_\Omega \delta(x-x_0) \phi_i(x) dx$.

      • -

        Creation of boundary right hand side vectors: The create_boundary_right_hand_side() function computes the vector $f_i =
-\int_{\partial\Omega} g(x) \phi_i(x) dx$. This is the right hand side contribution of boundary forces when having inhomogeneous Neumann boundary values in Laplace's equation or other second order operators. This function also takes an optional argument denoting over which parts of the boundary the integration shall extend. If the default argument is used, it is applied to all boundaries.

        +

        Creation of boundary right hand side vectors: The create_boundary_right_hand_side() function computes the vector $f_i =
+\int_{\partial\Omega} g(x) \phi_i(x) dx$. This is the right hand side contribution of boundary forces when having inhomogeneous Neumann boundary values in Laplace's equation or other second order operators. This function also takes an optional argument denoting over which parts of the boundary the integration shall extend. If the default argument is used, it is applied to all boundaries.

      • @@ -395,220 +395,220 @@
      -

      Denote which norm/integral is to be computed by the integrate_difference() function on each cell and compute_global_error() for the whole domain. Let $f:\Omega \rightarrow \mathbb{R}^c$ be a finite element function with $c$ components where component $c$ is denoted by $f_c$ and $\hat{f}$ be the reference function (the fe_function and exact_solution arguments to integrate_difference()). Let $e_c = \hat{f}_c - f_c$ be the difference or error between the two. Further, let $w:\Omega \rightarrow \mathbb{R}^c$ be the weight function of integrate_difference(), which is assumed to be equal to one if not supplied. Finally, let $p$ be the exponent argument (for $L_p$-norms).

      -

      In the following,we denote by $E_K$ the local error computed by integrate_difference() on cell $K$, whereas $E$ is the global error computed by compute_global_error(). Note that integrals are approximated by quadrature in the usual way:

      -\[
+<p>Denote which norm/integral is to be computed by the <a class=integrate_difference() function on each cell and compute_global_error() for the whole domain. Let $f:\Omega \rightarrow \mathbb{R}^c$ be a finite element function with $c$ components where component $c$ is denoted by $f_c$ and $\hat{f}$ be the reference function (the fe_function and exact_solution arguments to integrate_difference()). Let $e_c = \hat{f}_c - f_c$ be the difference or error between the two. Further, let $w:\Omega \rightarrow \mathbb{R}^c$ be the weight function of integrate_difference(), which is assumed to be equal to one if not supplied. Finally, let $p$ be the exponent argument (for $L_p$-norms).

      +

      In the following,we denote by $E_K$ the local error computed by integrate_difference() on cell $K$, whereas $E$ is the global error computed by compute_global_error(). Note that integrals are approximated by quadrature in the usual way:

      +\[
 \int_A f(x) dx \approx \sum_q f(x_q) \omega_q.
-\] +\]" src="form_2308.png"/>

      -

      Similarly for suprema over a cell $T$:

      -\[
+<p> Similarly for suprema over a cell <picture><source srcset=$T$:

      +\[
 \sup_{x\in T} |f(x)| dx \approx \max_q |f(x_q)|.
-\] +\]" src="form_2309.png"/>

      -
      Enumerator
      mean 

      The function or difference of functions is integrated on each cell $K$:

      -\[
+<picture><source srcset=\[
   E_K
 = \int_K \sum_c (\hat{f}_c - f_c) \, w_c
 = \int_K \sum_c e_c \, w_c
-\] +\]" src="form_2310.png"/>

      and summed up to get

      -\[
+<picture><source srcset=\[
   E = \sum_K E_K
     = \int_\Omega \sum_c (\hat{f}_c - f_c) \, w_c
-\] +\]" src="form_2311.png"/>

      -

      or, for $w \equiv 1$:

      -\[
+<p> or, for <picture><source srcset=$w \equiv 1$:

      +\[
   E = \int_\Omega (\hat{f} - f)
     = \int_\Omega e.
-\] +\]" src="form_2313.png"/>

      -

      Note: This differs from what is typically known as the mean of a function by a factor of $\frac{1}{|\Omega|}$. To compute the mean you can also use compute_mean_value(). Finally, pay attention to the sign: if $\hat{f}=0$, this will compute the negative of the mean of $f$.

      +

      Note: This differs from what is typically known as the mean of a function by a factor of $\frac{1}{|\Omega|}$. To compute the mean you can also use compute_mean_value(). Finally, pay attention to the sign: if $\hat{f}=0$, this will compute the negative of the mean of $f$.

      L1_norm 

      The absolute value of the function is integrated:

      -\[
+<picture><source srcset=\[
   E_K = \int_K \sum_c |e_c| \, w_c
-\] +\]" src="form_2316.png"/>

      and

      -\[
+<picture><source srcset=\[
   E = \sum_K E_K = \int_\Omega \sum_c |e_c| w_c,
-\] +\]" src="form_2317.png"/>

      -

      or, for $w \equiv 1$:

      -\[
+<p> or, for <picture><source srcset=$w \equiv 1$:

      +\[
   E  = \| e \|_{L^1}.
-\] +\]" src="form_2318.png"/>

      L2_norm 

      The square of the function is integrated and the square root of the result is computed on each cell:

      -\[
+<picture><source srcset=\[
   E_K = \sqrt{ \int_K \sum_c e_c^2 \, w_c }
-\] +\]" src="form_2319.png"/>

      and

      -\[
+<picture><source srcset=\[
   E = \sqrt{\sum_K E_K^2} = \sqrt{ \int_\Omega  \sum_c e_c^2 \, w_c }
-\] +\]" src="form_2320.png"/>

      -

      or, for $w \equiv 1$:

      -\[
+<p> or, for <picture><source srcset=$w \equiv 1$:

      +\[
   E = \sqrt{ \int_\Omega e^2 }
     = \| e \|_{L^2}
-\] +\]" src="form_2321.png"/>

      Lp_norm 

      The absolute value to the $p$-th power is integrated and the $p$-th root is computed on each cell. The exponent $p$ is the exponent argument of integrate_difference() and compute_mean_value():

      -\[
+<tr><td class=Lp_norm 

      The absolute value to the $p$-th power is integrated and the $p$-th root is computed on each cell. The exponent $p$ is the exponent argument of integrate_difference() and compute_mean_value():

      +\[
   E_K = \left( \int_K \sum_c |e_c|^p \, w_c \right)^{1/p}
-\] +\]" src="form_2322.png"/>

      and

      -\[
+<picture><source srcset=\[
   E = \left( \sum_K E_K^p \right)^{1/p}
-\] +\]" src="form_2323.png"/>

      -

      or, for $w \equiv 1$:

      -\[
+<p> or, for <picture><source srcset=$w \equiv 1$:

      +\[
   E = \| e \|_{L^p}.
-\] +\]" src="form_2324.png"/>

      Linfty_norm 

      The maximum absolute value of the function:

      -\[
+<picture><source srcset=\[
   E_K = \sup_K \max_c |e_c| \, w_c
-\] +\]" src="form_2325.png"/>

      and

      -\[
+<picture><source srcset=\[
   E = \max_K E_K
 = \sup_\Omega \max_c |e_c| \, w_c
-\] +\]" src="form_2326.png"/>

      -

      or, for $w \equiv 1$:

      -\[
+<p> or, for <picture><source srcset=$w \equiv 1$:

      +\[
   E  = \sup_\Omega \|e\|_\infty = \| e \|_{L^\infty}.
-\] +\]" src="form_2327.png"/>

      H1_seminorm 

      L2_norm of the gradient:

      -\[
+<picture><source srcset=\[
   E_K = \sqrt{ \int_K \sum_c (\nabla e_c)^2 \, w_c }
-\] +\]" src="form_2328.png"/>

      and

      -\[
+<picture><source srcset=\[
   E = \sqrt{\sum_K E_K^2} = \sqrt{ \int_\Omega \sum_c (\nabla e_c)^2 \,
 w_c }
-\] +\]" src="form_2329.png"/>

      -

      or, for $w \equiv 1$:

      -\[
+<p> or, for <picture><source srcset=$w \equiv 1$:

      +\[
/usr/share/doc/packages/dealii/doxygen/deal.II/namespacehp_1_1Refinement.html differs (HTML document, ASCII text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespacehp_1_1Refinement.html	2024-01-30 03:04:50.508865220 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespacehp_1_1Refinement.html	2024-01-30 03:04:50.508865220 +0000
@@ -529,9 +529,9 @@
         </tr>
       </table>
 </div><div class= -

      Predict how the current error_indicators will adapt after refinement and coarsening were to happen on the provided dof_handler, and write its results to predicted_errors. Each entry of error_indicators and predicted_errors corresponds to an active cell on the underlying Triangulation, thus each container has to be of size Triangulation::n_active_cells(). The errors are interpreted to be measured in the energy norm; this assumption enters the rate of convergence that is used in the prediction. The $l_2$-norm of the output argument predicted_errors corresponds to the predicted global error after adaptation.

      +

      Predict how the current error_indicators will adapt after refinement and coarsening were to happen on the provided dof_handler, and write its results to predicted_errors. Each entry of error_indicators and predicted_errors corresponds to an active cell on the underlying Triangulation, thus each container has to be of size Triangulation::n_active_cells(). The errors are interpreted to be measured in the energy norm; this assumption enters the rate of convergence that is used in the prediction. The $l_2$-norm of the output argument predicted_errors corresponds to the predicted global error after adaptation.

      For p-adaptation, the local error is expected to converge exponentially with the polynomial degree of the assigned finite element. Each increase or decrease of the degree will thus change its value by a user-defined control parameter gamma_p.

      -

      For h-adaptation, we expect the local error $\eta_K$ on cell $K$ to be proportional to $(h_K)^{p_K}$ in the energy norm, where $h_K$ denotes the cell diameter and $p_K$ the polynomial degree of the currently assigned finite element on cell $K$.

      +

      For h-adaptation, we expect the local error $\eta_K$ on cell $K$ to be proportional to $(h_K)^{p_K}$ in the energy norm, where $h_K$ denotes the cell diameter and $p_K$ the polynomial degree of the currently assigned finite element on cell $K$.

      During h-coarsening, the finite elements on siblings may be different, and their parent cell will be assigned to their least dominating finite element that belongs to its most general child. Thus, we will always interpolate on an enclosing finite element space. Additionally assuming that the finite elements on the cells to be coarsened are sufficient to represent the solution correctly (e.g. at least quadratic basis functions for a quadratic solution), we are confident to say that the error will not change by sole interpolation on the larger finite element space.

      For p-adaptation, the local error is expected to converge exponentially with the polynomial degree of the assigned finite element. Each increase or decrease of the degree will thus change its value by a user-defined control parameter gamma_p. The assumption of exponential convergence is only valid if both h- and p-adaptive methods are combined in a sense that they are both utilized throughout a mesh, but do not have to be applied both on a cell simultaneously.

      The prediction algorithm is formulated as follows with control parameters gamma_p, gamma_h and gamma_n that may be used to influence prediction for each adaptation type individually. The results for each individual cell are stored in the predicted_errors output argument.

      @@ -553,7 +553,7 @@ \gamma_\text{p}^{(p_{K,\text{future}} - p_{K})}$" src="form_1500.png"/>

      On basis of the refinement history, we use the predicted error estimates to decide how cells will be adapted in the next adaptation step. Comparing the predicted error from the previous adaptation step to the error estimates of the current step allows us to justify whether our previous choice of adaptation was justified, and lets us decide how to adapt in the next one.

      -

      We thus have to transfer the predicted error from the old to the adapted mesh. When transferring the predicted error to the adapted mesh, make sure to configure your CellDataTransfer object with AdaptationStrategies::Refinement::l2_norm() as a refinement strategy and AdaptationStrategies::Coarsening::l2_norm() as a coarsening strategy. This ensures that the $l_2$-norm of the predict errors is preserved on both meshes.

      +

      We thus have to transfer the predicted error from the old to the adapted mesh. When transferring the predicted error to the adapted mesh, make sure to configure your CellDataTransfer object with AdaptationStrategies::Refinement::l2_norm() as a refinement strategy and AdaptationStrategies::Coarsening::l2_norm() as a coarsening strategy. This ensures that the $l_2$-norm of the predict errors is preserved on both meshes.

      In this context, we assume that the local error on a cell to be h-refined will be divided equally on all of its $n_{K_c}$ children, whereas local errors on siblings will be summed up on the parent cell in case of h-coarsening. This assumption is often not satisfied in practice: For example, if a cell is at a corner singularity, then the one child cell that ends up closest to the singularity will inherit the majority of the remaining error – but this function can not know where the singularity will be, and consequently assumes equal distribution.

      Incorporating the transfer from the old to the adapted mesh, the complete error prediction algorithm reads as follows:

      /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceinternal.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceinternal.html 2024-01-30 03:04:50.612866086 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceinternal.html 2024-01-30 03:04:50.612866086 +0000 @@ -853,8 +853,8 @@
      const double coordinate_value&#href_anchor"memdoc">

      Creates a (dim + 1)-dimensional point by copying over the coordinates of the incoming dim-dimensional point and setting the "missing" (dim + 1)-dimensional component to the incoming coordinate value.

      -

      For example, given the input $\{(x, y), 2, z \}$ this function creates the point $(x, y, z)$.

      -

      The coordinates of the dim-dimensional point are written to the coordinates of the (dim + 1)-dimensional point in the order of the convention given by the function coordinate_to_one_dim_higher. Thus, the order of coordinates on the lower-dimensional point are not preserved: $\{(z, x), 1, y \}$ creates the point $(x, y, z)$.

      +

      For example, given the input $\{(x, y), 2, z \}$ this function creates the point $(x, y, z)$.

      +

      The coordinates of the dim-dimensional point are written to the coordinates of the (dim + 1)-dimensional point in the order of the convention given by the function coordinate_to_one_dim_higher. Thus, the order of coordinates on the lower-dimensional point are not preserved: $\{(z, x), 1, y \}$ creates the point $(x, y, z)$.

      Definition at line 24 of file function_restriction.cc.

      @@ -1725,7 +1725,7 @@
      -

      Compute the polynomial interpolation of a tensor product shape function $\varphi_i$ given a vector of coefficients $u_i$ in the form $u_h(\mathbf{x}) = \sum_{i=1}^{k^d} \varphi_i(\mathbf{x}) u_i$. The shape functions $\varphi_i(\mathbf{x}) =
+<p>Compute the polynomial interpolation of a tensor product shape function <picture><source srcset=$\varphi_i$ given a vector of coefficients $u_i$ in the form $u_h(\mathbf{x}) = \sum_{i=1}^{k^d} \varphi_i(\mathbf{x}) u_i$. The shape functions $\varphi_i(\mathbf{x}) =
 \prod_{d=1}^{\text{dim}}\varphi_{i_d}^\text{1d}(x_d)$ represent a tensor product. The function returns a pair with the value of the interpolation as the first component and the gradient in reference coordinates as the second component. Note that for compound types (e.g. the values field begin a Point<spacedim> argument), the components of the gradient are sorted as Tensor<1, dim, Tensor<1, spacedim>> with the derivatives as the first index; this is a consequence of the generic arguments in the function.

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceparallel.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceparallel.html 2024-01-30 03:04:50.640866319 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceparallel.html 2024-01-30 03:04:50.640866319 +0000 @@ -383,7 +383,7 @@
      const unsigned int grainsize&#href_anchor"memdoc">

      This function works a lot like the apply_to_subranges() function, but it allows to accumulate numerical results computed on each subrange into one number. The type of this number is given by the ResultType template argument that needs to be explicitly specified.

      -

      An example of use of this function is to compute the value of the expression $x^T A x$ for a square matrix $A$ and a vector $x$. The sum over rows can be parallelized and the whole code might look like this:

      void matrix_norm (const FullMatrix &A,
      +

      An example of use of this function is to compute the value of the expression $x^T A x$ for a square matrix $A$ and a vector $x$. The sum over rows can be parallelized and the whole code might look like this:

      void matrix_norm (const FullMatrix &A,
      const Vector &x)
      {
      return
      /usr/share/doc/packages/dealii/doxygen/deal.II/step_1.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_1.html 2024-01-30 03:04:50.664866519 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_1.html 2024-01-30 03:04:50.664866519 +0000 @@ -303,7 +303,7 @@

      This program obviously does not have a whole lot of functionality, but in particular the second_grid function has a bunch of places where you can play with it. For example, you could modify the criterion by which we decide which cells to refine. An example would be to change the condition to this:

      for (auto &cell: triangulation.active_cell_iterators())
      if (cell->center()[1] > 0)
      cell->set_refine_flag ();
      -

      This would refine all cells for which the $y$-coordinate of the cell's center is greater than zero (the TriaAccessor::center function that we call by dereferencing the cell iterator returns a Point<2> object; subscripting [0] would give the $x$-coordinate, subscripting [1] the $y$-coordinate). By looking at the functions that TriaAccessor provides, you can also use more complicated criteria for refinement.

      +

      This would refine all cells for which the $y$-coordinate of the cell's center is greater than zero (the TriaAccessor::center function that we call by dereferencing the cell iterator returns a Point<2> object; subscripting [0] would give the $x$-coordinate, subscripting [1] the $y$-coordinate). By looking at the functions that TriaAccessor provides, you can also use more complicated criteria for refinement.

      In general, what you can do with operations of the form cell->something() is a bit difficult to find in the documentation because cell is not a pointer but an iterator. The functions you can call on a cell can be found in the documentation of the classes TriaAccessor (which has functions that can also be called on faces of cells or, more generally, all sorts of geometric objects that appear in a triangulation), and CellAccessor (which adds a few functions that are specific to cells).

      A more thorough description of the whole iterator concept can be found in the Iterators on mesh-like containers documentation module.

      Different geometries

      /usr/share/doc/packages/dealii/doxygen/deal.II/step_10.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_10.html 2024-01-30 03:04:50.696866786 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_10.html 2024-01-30 03:04:50.696866786 +0000 @@ -110,10 +110,10 @@
    3. The plain program
    4. Introduction

      -

      This is a rather short example which only shows some aspects of using higher order mappings. By mapping we mean the transformation between the unit cell (i.e. the unit line, square, or cube) to the cells in real space. In all the previous examples, we have implicitly used linear or d-linear mappings; you will not have noticed this at all, since this is what happens if you do not do anything special. However, if your domain has curved boundaries, there are cases where the piecewise linear approximation of the boundary (i.e. by straight line segments) is not sufficient, and you want that your computational domain is an approximation to the real domain using curved boundaries as well. If the boundary approximation uses piecewise quadratic parabolas to approximate the true boundary, then we say that this is a quadratic or $Q_2$ approximation. If we use piecewise graphs of cubic polynomials, then this is a $Q_3$ approximation, and so on.

      -

      For some differential equations, it is known that piecewise linear approximations of the boundary, i.e. $Q_1$ mappings, are not sufficient if the boundary of the exact domain is curved. Examples are the biharmonic equation using $C^1$ elements, or the Euler equations of gas dynamics on domains with curved reflective boundaries. In these cases, it is necessary to compute the integrals using a higher order mapping. If we do not use such a higher order mapping, the order of approximation of the boundary dominates the order of convergence of the entire numerical scheme, irrespective of the order of convergence of the discretization in the interior of the domain.

      +

      This is a rather short example which only shows some aspects of using higher order mappings. By mapping we mean the transformation between the unit cell (i.e. the unit line, square, or cube) to the cells in real space. In all the previous examples, we have implicitly used linear or d-linear mappings; you will not have noticed this at all, since this is what happens if you do not do anything special. However, if your domain has curved boundaries, there are cases where the piecewise linear approximation of the boundary (i.e. by straight line segments) is not sufficient, and you want that your computational domain is an approximation to the real domain using curved boundaries as well. If the boundary approximation uses piecewise quadratic parabolas to approximate the true boundary, then we say that this is a quadratic or $Q_2$ approximation. If we use piecewise graphs of cubic polynomials, then this is a $Q_3$ approximation, and so on.

      +

      For some differential equations, it is known that piecewise linear approximations of the boundary, i.e. $Q_1$ mappings, are not sufficient if the boundary of the exact domain is curved. Examples are the biharmonic equation using $C^1$ elements, or the Euler equations of gas dynamics on domains with curved reflective boundaries. In these cases, it is necessary to compute the integrals using a higher order mapping. If we do not use such a higher order mapping, the order of approximation of the boundary dominates the order of convergence of the entire numerical scheme, irrespective of the order of convergence of the discretization in the interior of the domain.

      Rather than demonstrating the use of higher order mappings with one of these more complicated examples, we do only a brief computation: calculating the value of $\pi=3.141592653589793238462643\ldots$ by two different methods.

      -

      The first method uses a triangulated approximation of the circle with unit radius and integrates a unit magnitude constant function ( $f = 1$) over it. Of course, if the domain were the exact unit circle, then the area would be $\pi$, but since we only use an approximation by piecewise polynomial segments, the value of the area we integrate over is not exactly $\pi$. However, it is known that as we refine the triangulation, a $Q_p$ mapping approximates the boundary with an order $h^{p+1}$, where $h$ is the mesh size. We will check the values of the computed area of the circle and their convergence towards $\pi$ under mesh refinement for different mappings. We will also find a convergence behavior that is surprising at first, but has a good explanation.

      +

      The first method uses a triangulated approximation of the circle with unit radius and integrates a unit magnitude constant function ( $f = 1$) over it. Of course, if the domain were the exact unit circle, then the area would be $\pi$, but since we only use an approximation by piecewise polynomial segments, the value of the area we integrate over is not exactly $\pi$. However, it is known that as we refine the triangulation, a $Q_p$ mapping approximates the boundary with an order $h^{p+1}$, where $h$ is the mesh size. We will check the values of the computed area of the circle and their convergence towards $\pi$ under mesh refinement for different mappings. We will also find a convergence behavior that is surprising at first, but has a good explanation.

      The second method works similarly, but this time does not use the area of the triangulated unit circle, but rather its perimeter. $\pi$ is then approximated by half of the perimeter, as we choose the radius equal to one.

      Note
      This tutorial shows in essence how to choose a particular mapping for integrals, by attaching a particular geometry to the triangulation (as had already been done in step-1, for example) and then passing a mapping argument to the FEValues class that is used for all integrals in deal.II. The geometry we choose is a circle, for which deal.II already has a class (SphericalManifold) that can be used. If you want to define your own geometry, for example because it is complicated and cannot be described by the classes already available in deal.II, you will want to read through step-53.

      The commented program

      @@ -157,7 +157,7 @@
      void hyper_ball(Triangulation< dim > &tria, const Point< dim > &center=Point< dim >(), const double radius=1., const bool attach_spherical_manifold_on_boundary_cells=false)
      const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation
      -

      Then alternate between generating output on the current mesh for $Q_1$, $Q_2$, and $Q_3$ mappings, and (at the end of the loop body) refining the mesh once globally.

      +

      Then alternate between generating output on the current mesh for $Q_1$, $Q_2$, and $Q_3$ mappings, and (at the end of the loop body) refining the mesh once globally.

        for (unsigned int refinement = 0; refinement < 2; ++refinement)
        {
        std::cout << "Refinement level: " << refinement << std::endl;
      @@ -196,7 +196,7 @@
       

      Now we proceed with the main part of the code, the approximation of $\pi$. The area of a circle is of course given by $\pi r^2$, so having a circle of radius 1, the area represents just the number that is searched for. The numerical computation of the area is performed by integrating the constant function of value 1 over the whole computational domain, i.e. by computing the areas $\int_K 1 dx=\int_{\hat K} 1
    \ \textrm{det}\ J(\hat x) d\hat x \approx \sum_i \textrm{det}
-   \ J(\hat x_i)w(\hat x_i)$, where the sum extends over all quadrature points on all active cells in the triangulation, with $w(x_i)$ being the weight of quadrature point $x_i$. The integrals on each cell are approximated by numerical quadrature, hence the only additional ingredient we need is to set up a FEValues object that provides the corresponding JxW values of each cell. (Note that JxW is meant to abbreviate Jacobian determinant times weight; since in numerical quadrature the two factors always occur at the same places, we only offer the combined quantity, rather than two separate ones.) We note that here we won't use the FEValues object in its original purpose, i.e. for the computation of values of basis functions of a specific finite element at certain quadrature points. Rather, we use it only to gain the JxW at the quadrature points, irrespective of the (dummy) finite element we will give to the constructor of the FEValues object. The actual finite element given to the FEValues object is not used at all, so we could give any.

      + \ J(\hat x_i)w(\hat x_i)$" src="form_2751.png"/>, where the sum extends over all quadrature points on all active cells in the triangulation, with $w(x_i)$ being the weight of quadrature point $x_i$. The integrals on each cell are approximated by numerical quadrature, hence the only additional ingredient we need is to set up a FEValues object that provides the corresponding JxW values of each cell. (Note that JxW is meant to abbreviate Jacobian determinant times weight; since in numerical quadrature the two factors always occur at the same places, we only offer the combined quantity, rather than two separate ones.) We note that here we won't use the FEValues object in its original purpose, i.e. for the computation of values of basis functions of a specific finite element at certain quadrature points. Rather, we use it only to gain the JxW at the quadrature points, irrespective of the (dummy) finite element we will give to the constructor of the FEValues object. The actual finite element given to the FEValues object is not used at all, so we could give any.

        template <int dim>
        void compute_pi_by_area()
        {
      @@ -404,11 +404,11 @@
      unset ytics
      plot [-1:1][-1:1] "ball_0_mapping_q_1.dat" lw 4 lt rgb "black"

      or using one of the other filenames. The second line makes sure that the aspect ratio of the generated output is actually 1:1, i.e. a circle is drawn as a circle on your screen, rather than as an ellipse. The third line switches off the key in the graphic, as that will only print information (the filename) which is not that important right now. Similarly, the fourth and fifth disable tick marks. The plot is then generated with a specific line width ("lw", here set to 4) and line type ("lt", here chosen by saying that the line should be drawn using the RGB color "black").

      -

      The following table shows the triangulated computational domain for $Q_1$, $Q_2$, and $Q_3$ mappings, for the original coarse grid (left), and a once uniformly refined grid (right).

      +

      The following table shows the triangulated computational domain for $Q_1$, $Q_2$, and $Q_3$ mappings, for the original coarse grid (left), and a once uniformly refined grid (right).

      Five-cell discretization of the disk.
      20-cell discretization of the disk (i.e., five cells
               refined once).
      Five-cell discretization of the disk with quadratic edges. The
               boundary is nearly indistinguishable from the actual circle.
      20-cell discretization with quadratic edges.
      Five-cell discretization of the disk with cubic edges. The
-              boundary is nearly indistinguishable from the actual circle.
      20-cell discretization with cubic edges.

      These pictures show the obvious advantage of higher order mappings: they approximate the true boundary quite well also on rather coarse meshes. To demonstrate this a little further, here is part of the upper right quarter circle of the coarse meshes with $Q_2$ and $Q_3$ mappings, where the dashed red line marks the actual circle:

      + boundary is nearly indistinguishable from the actual circle." style="pointer-events: none;" width="400" height="400" class="inline"/>
      20-cell discretization with cubic edges.

      These pictures show the obvious advantage of higher order mappings: they approximate the true boundary quite well also on rather coarse meshes. To demonstrate this a little further, here is part of the upper right quarter circle of the coarse meshes with $Q_2$ and $Q_3$ mappings, where the dashed red line marks the actual circle:

      Close-up of quadratic discretization. The distance between the
          quadratic interpolant and the actual circle is small.
      Close-up of cubic discretization. The distance between the
          cubic interpolant and the actual circle is very small.

      Obviously the quadratic mapping approximates the boundary quite well, while for the cubic mapping the difference between approximated domain and true one is hardly visible already for the coarse grid. You can also see that the mapping only changes something at the outer boundaries of the triangulation. In the interior, all lines are still represented by linear functions, resulting in additional computations only on cells at the boundary. Higher order mappings are therefore usually not noticeably slower than lower order ones, because the additional computations are only performed on a small subset of all cells.

      @@ -501,8 +501,8 @@
      5120 3.1415926535897940 8.8818e-16 2.00
      unsigned int level
      Definition grid_out.cc:4618
      Note
      Once the error reaches a level on the order of $10^{-13}$ to $10^{-15}$, it is essentially dominated by round-off and consequently dominated by what precisely the library is doing in internal computations. Since these things change, the precise values and errors change from release to release at these round-off levels, though the overall order of errors should of course remain the same. See also the comment below in the section on Possibilities for extensions about how to compute these results more accurately.
      -

      One of the immediate observations from the output above is that in all cases the values converge quickly to the true value of $\pi=3.141592653589793238462643$. Note that for the $Q_4$ mapping, we are already in the regime of roundoff errors and the convergence rate levels off, which is already quite a lot. However, also note that for the $Q_1$ mapping, even on the finest grid the accuracy is significantly worse than on the coarse grid for a $Q_3$ mapping!

      -

      The last column of the output shows the convergence order, in powers of the mesh width $h$. In the introduction, we had stated that the convergence order for a $Q_p$ mapping should be $h^{p+1}$. However, in the example shown, the order is rather $h^{2p}$! This at first surprising fact is explained by the properties of the $Q_p$ mapping. At order p, it uses support points that are based on the p+1 point Gauss-Lobatto quadrature rule that selects the support points in such a way that the quadrature rule converges at order 2p. Even though these points are here only used for interpolation of a pth order polynomial, we get a superconvergence effect when numerically evaluating the integral, resulting in the observed high order of convergence. (This effect is also discussed in detail in the following publication: A. Bonito, A. Demlow, and J. Owen: "A priori error +

      One of the immediate observations from the output above is that in all cases the values converge quickly to the true value of $\pi=3.141592653589793238462643$. Note that for the $Q_4$ mapping, we are already in the regime of roundoff errors and the convergence rate levels off, which is already quite a lot. However, also note that for the $Q_1$ mapping, even on the finest grid the accuracy is significantly worse than on the coarse grid for a $Q_3$ mapping!

      +

      The last column of the output shows the convergence order, in powers of the mesh width $h$. In the introduction, we had stated that the convergence order for a $Q_p$ mapping should be $h^{p+1}$. However, in the example shown, the order is rather $h^{2p}$! This at first surprising fact is explained by the properties of the $Q_p$ mapping. At order p, it uses support points that are based on the p+1 point Gauss-Lobatto quadrature rule that selects the support points in such a way that the quadrature rule converges at order 2p. Even though these points are here only used for interpolation of a pth order polynomial, we get a superconvergence effect when numerically evaluating the integral, resulting in the observed high order of convergence. (This effect is also discussed in detail in the following publication: A. Bonito, A. Demlow, and J. Owen: "A priori error estimates for finite element approximations to eigenvalues and eigenfunctions of the Laplace-Beltrami operator", submitted, 2018.)

      Possibilities for extensions

      /usr/share/doc/packages/dealii/doxygen/deal.II/step_11.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_11.html 2024-01-30 03:04:50.728867052 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_11.html 2024-01-30 03:04:50.728867052 +0000 @@ -121,7 +121,7 @@ \int_\Omega f\; dx + \int_{\partial\Omega} g\; ds = 0. \]" src="form_2761.png"/>

      -

      We will consider the special case that $\Omega$ is the circle of radius 1 around the origin, and $f=-2$, $g=1$. This choice satisfies the compatibility condition.

      +

      We will consider the special case that $\Omega$ is the circle of radius 1 around the origin, and $f=-2$, $g=1$. This choice satisfies the compatibility condition.

      The compatibility condition allows a solution of the above equation, but it nevertheless retains an ambiguity: since only derivatives of the solution appear in the equations, the solution is only determined up to a constant. For this reason, we have to pose another condition for the numerical solution, which fixes this constant.

      For this, there are various possibilities:

      1. @@ -325,7 +325,7 @@

        That's quite simple, right?

        Two remarks are in order, though: First, these functions are used in a lot of contexts. Maybe you want to create a Laplace or mass matrix for a vector values finite element; or you want to use the default Q1 mapping; or you want to assembled the matrix with a coefficient in the Laplace operator. For this reason, there are quite a large number of variants of these functions in the MatrixCreator and MatrixTools namespaces. Whenever you need a slightly different version of these functions than the ones called above, it is certainly worthwhile to take a look at the documentation and to check whether something fits your needs.

        The second remark concerns the quadrature formula we use: we want to integrate over bilinear shape functions, so we know that we have to use at least an order two Gauss quadrature formula. On the other hand, we want the quadrature rule to have at least the order of the boundary approximation. Since the order of Gauss rule with $r$ points is $2r -
-   1$, and the order of the boundary approximation using polynomials of degree $p$ is $p+1$, we know that $2r \geq p$. Since r has to be an integer and (as mentioned above) has to be at least $2$, this makes up for the formula above computing gauss_degree.

        + 1$" src="form_2772.png"/>, and the order of the boundary approximation using polynomials of degree $p$ is $p+1$, we know that $2r \geq p$. Since r has to be an integer and (as mentioned above) has to be at least $2$, this makes up for the formula above computing gauss_degree.

        Since the generation of the body force contributions to the right hand side vector was so simple, we do that all over again for the boundary forces as well: allocate a vector of the right size and call the right function. The boundary function has constant values, so we can generate an object from the library on the fly, and we use the same quadrature formula as above, but this time of lower dimension since we integrate over faces now instead of cells:

          Vector<double> tmp(system_rhs.size());
        /usr/share/doc/packages/dealii/doxygen/deal.II/step_12.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_12.html 2024-01-30 03:04:50.776867452 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_12.html 2024-01-30 03:04:50.776867452 +0000 @@ -145,21 +145,21 @@ u=g\quad\mbox{on }\Gamma_-, \]" src="form_2775.png"/>

        -

        on the inflow part $\Gamma_-$ of the boundary $\Gamma=\partial\Omega$ of the domain. Here, ${\mathbf \beta}={\mathbf \beta}({\bf x})$ denotes a vector field, $u$ the (scalar) solution function, $g$ a boundary value function,

        +

        on the inflow part $\Gamma_-$ of the boundary $\Gamma=\partial\Omega$ of the domain. Here, ${\mathbf \beta}={\mathbf \beta}({\bf x})$ denotes a vector field, $u$ the (scalar) solution function, $g$ a boundary value function,

        \[
 \Gamma_- \dealcoloneq \{{\bf x}\in\Gamma, {\mathbf \beta}({\bf x})\cdot{\bf n}({\bf x})<0\}
 \]

        the inflow part of the boundary of the domain and ${\bf n}$ denotes the unit outward normal to the boundary $\Gamma$. This equation is the conservative version of the advection equation already considered in step-9 of this tutorial.

        -

        On each cell $T$, we multiply by a test function $v_h$ from the left and integrate by parts to get:

        +

        On each cell $T$, we multiply by a test function $v_h$ from the left and integrate by parts to get:

        \[
   \left( v_h, \nabla \cdot (\beta u_h) \right)_T
 = -(\nabla v_h, \beta u_h) + \int_{\partial T} v_h u_h \beta \cdot n
 \]

        -

        When summing this expression over all cells $T$, the boundary integral is done over all internal and external faces and as such there are three cases:

          +

          When summing this expression over all cells $T$, the boundary integral is done over all internal and external faces and as such there are three cases:

          1. -outer boundary on the inflow (we replace $u_h$ by given $g$): $\int_{\Gamma_-} v_h g \beta \cdot n$
          2. +outer boundary on the inflow (we replace $u_h$ by given $g$): $\int_{\Gamma_-} v_h g \beta \cdot n$
          3. outer boundary on the outflow: $\int_{\Gamma_+} v_h u_h \beta \cdot n$
          4. @@ -824,7 +824,7 @@

            There are a number of strategies to stabilize the cG method, if one wants to use continuous elements for some reason. Discussing these methods is beyond the scope of this tutorial program; an interested reader could, for example, take a look at step-31.

            Possibilities for extensions

            Given that the exact solution is known in this case, one interesting avenue for further extensions would be to confirm the order of convergence for this program. In the current case, the solution is non-smooth, and so we can not expect to get a particularly high order of convergence, even if we used higher order elements. But even if the solution is smooth, the equation is not elliptic and so it is not immediately clear that we should obtain a convergence order that equals that of the optimal interpolation estimates (i.e. for example that we would get $h^3$ convergence in the $L^2$ norm by using quadratic elements).

            -

            In fact, for hyperbolic equations, theoretical predictions often indicate that the best one can hope for is an order one half below the interpolation estimate. For example, for the streamline diffusion method (an alternative method to the DG method used here to stabilize the solution of the transport equation), one can prove that for elements of degree $p$, the order of convergence is $p+\frac 12$ on arbitrary meshes. While the observed order is frequently $p+1$ on uniformly refined meshes, one can construct so-called Peterson meshes on which the worse theoretical bound is actually attained. This should be relatively simple to verify, for example using the VectorTools::integrate_difference function.

            +

            In fact, for hyperbolic equations, theoretical predictions often indicate that the best one can hope for is an order one half below the interpolation estimate. For example, for the streamline diffusion method (an alternative method to the DG method used here to stabilize the solution of the transport equation), one can prove that for elements of degree $p$, the order of convergence is $p+\frac 12$ on arbitrary meshes. While the observed order is frequently $p+1$ on uniformly refined meshes, one can construct so-called Peterson meshes on which the worse theoretical bound is actually attained. This should be relatively simple to verify, for example using the VectorTools::integrate_difference function.

            A different direction is to observe that the solution of transport problems often has discontinuities and that therefore a mesh in which we bisect every cell in every coordinate direction may not be optimal. Rather, a better strategy would be to only cut cells in the direction parallel to the discontinuity. This is called anisotropic mesh refinement and is the subject of step-30.

            The plain program

            /* ---------------------------------------------------------------------
            /usr/share/doc/packages/dealii/doxygen/deal.II/step_14.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_14.html 2024-01-30 03:04:50.904868519 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_14.html 2024-01-30 03:04:50.904868519 +0000 @@ -163,30 +163,30 @@

            The Heidelberg group of Professor Rolf Rannacher, to which the three initial authors of the deal.II library belonged during their PhD time and partly also afterwards, has been involved with adaptivity and error estimation for finite element discretizations since the mid-1990ies. The main achievement is the development of error estimates for arbitrary functionals of the solution, and of optimal mesh refinement for its computation.

            We will not discuss the derivation of these concepts in too great detail, but will implement the main ideas in the present example program. For a thorough introduction into the general idea, we refer to the seminal work of Becker and Rannacher [BR95], [BR96r], and the overview article of the same authors in Acta Numerica [BR01]; the first introduces the concept of error estimation and adaptivity for general functional output for the Laplace equation, while the second gives many examples of applications of these concepts to a large number of other, more complicated equations. For applications to individual types of equations, see also the publications by Becker [Bec95], [Bec98], Kanschat [Kan96], [FK97], Suttmeier [Sut96], [RS97], [RS98c], [RS99], Bangerth [BR99b], [Ban00w], [BR01a], [Ban02], and Hartmann [Har02], [HH01], [HH01b]. All of these works, from the original introduction by Becker and Rannacher to individual contributions to particular equations, have later been summarized in a book by Bangerth and Rannacher that covers all of these topics, see [BR03].

            The basic idea is the following: in applications, one is not usually interested in the solution per se, but rather in certain aspects of it. For example, in simulations of flow problems, one may want to know the lift or drag of a body immersed in the fluid; it is this quantity that we want to know to best accuracy, and whether the rest of the solution of the describing equations is well resolved is not of primary interest. Likewise, in elasticity one might want to know about values of the stress at certain points to guess whether maximal load values of joints are safe, for example. Or, in radiative transfer problems, mean flux intensities are of interest.

            -

            In all the cases just listed, it is the evaluation of a functional $J(u)$ of the solution which we are interested in, rather than the values of $u$ everywhere. Since the exact solution $u$ is not available, but only its numerical approximation $u_h$, it is sensible to ask whether the computed value $J(u_h)$ is within certain limits of the exact value $J(u)$, i.e. we want to bound the error with respect to this functional, $J(u)-J(u_h)$.

            -

            For simplicity of exposition, we henceforth assume that both the quantity of interest $J$ as well as the equation are linear, and we will in particular show the derivation for the Laplace equation with homogeneous Dirichlet boundary conditions, although the concept is much more general. For this general case, we refer to the references listed above. The goal is to obtain bounds on the error, $J(e)=J(u)-J(u_h)$. For this, let us denote by $z$ the solution of a dual problem, defined as follows:

            -\[
+<p>In all the cases just listed, it is the evaluation of a functional <picture><source srcset=$J(u)$ of the solution which we are interested in, rather than the values of $u$ everywhere. Since the exact solution $u$ is not available, but only its numerical approximation $u_h$, it is sensible to ask whether the computed value $J(u_h)$ is within certain limits of the exact value $J(u)$, i.e. we want to bound the error with respect to this functional, $J(u)-J(u_h)$.

            +

            For simplicity of exposition, we henceforth assume that both the quantity of interest $J$ as well as the equation are linear, and we will in particular show the derivation for the Laplace equation with homogeneous Dirichlet boundary conditions, although the concept is much more general. For this general case, we refer to the references listed above. The goal is to obtain bounds on the error, $J(e)=J(u)-J(u_h)$. For this, let us denote by $z$ the solution of a dual problem, defined as follows:

            +\[
   a(\varphi,z) = J(\varphi) \qquad \forall \varphi,
-\] +\]" src="form_2820.png"/>

            -

            where $a(\cdot,\cdot)$ is the bilinear form associated with the differential equation, and the test functions are chosen from the corresponding solution space. Then, taking as special test function $\varphi=e$ the error, we have that

            -\[
+<p> where <picture><source srcset=$a(\cdot,\cdot)$ is the bilinear form associated with the differential equation, and the test functions are chosen from the corresponding solution space. Then, taking as special test function $\varphi=e$ the error, we have that

            +\[
   J(e) = a(e,z)
-\] +\]" src="form_2823.png"/>

            and we can, by Galerkin orthogonality, rewrite this as

            -\[
+<picture><source srcset=\[
   J(e) = a(e,z-\varphi_h)
-\] +\]" src="form_2824.png"/>

            -

            where $\varphi_h$ can be chosen from the discrete test space in whatever way we find convenient.

            +

            where $\varphi_h$ can be chosen from the discrete test space in whatever way we find convenient.

            Concretely, for Laplace's equation, the error identity reads

            -\[
+<picture><source srcset=\[
   J(e) = (\nabla e, \nabla(z-\varphi_h)).
-\] +\]" src="form_2826.png"/>

            Because we want to use this formula not only to compute error, but also to refine the mesh, we need to rewrite the expression above as a sum over cells where each cell's contribution can then be used as an error indicator for this cell. Thus, we split the scalar products into terms for each cell, and integrate by parts on each of them:

            -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   J(e)
   &=&
   \sum_K (\nabla (u-u_h), \nabla (z-\varphi_h))_K
@@ -194,54 +194,54 @@
   &=&
   \sum_K (-\Delta (u-u_h), z-\varphi_h)_K
   + (\partial_n (u-u_h), z-z_h)_{\partial K}.
-\end{eqnarray*} +\end{eqnarray*}" src="form_2827.png"/>

            -

            Next we use that $-\Delta u=f$, and that the solution of the Laplace equation is smooth enough that $\partial_n u$ is continuous almost everywhere – so the terms involving $\partial_n u$ on one cell cancels with that on its neighbor, where the normal vector has the opposite sign. (The same is not true for $\partial_n u_h$, though.) At the boundary of the domain, where there is no neighbor cell with which this term could cancel, the weight $z-\varphi_h$ can be chosen as zero, and the whole term disappears.

            +

            Next we use that $-\Delta u=f$, and that the solution of the Laplace equation is smooth enough that $\partial_n u$ is continuous almost everywhere – so the terms involving $\partial_n u$ on one cell cancels with that on its neighbor, where the normal vector has the opposite sign. (The same is not true for $\partial_n u_h$, though.) At the boundary of the domain, where there is no neighbor cell with which this term could cancel, the weight $z-\varphi_h$ can be chosen as zero, and the whole term disappears.

            Thus, we have

            -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   J(e)
   &=&
   \sum_K (f+\Delta u_h, z-\varphi_h)_K
   - (\partial_n u_h, z-\varphi_h)_{\partial K\backslash \partial\Omega}.
-\end{eqnarray*} +\end{eqnarray*}" src="form_2832.png"/>

            -

            In a final step, note that when taking the normal derivative of $u_h$, we mean the value of this quantity as taken from this side of the cell (for the usual Lagrange elements, derivatives are not continuous across edges). We then rewrite the above formula by exchanging half of the edge integral of cell $K$ with the neighbor cell $K'$, to obtain

            -\begin{eqnarray*}
+<p> In a final step, note that when taking the normal derivative of <picture><source srcset=$u_h$, we mean the value of this quantity as taken from this side of the cell (for the usual Lagrange elements, derivatives are not continuous across edges). We then rewrite the above formula by exchanging half of the edge integral of cell $K$ with the neighbor cell $K'$, to obtain

            +\begin{eqnarray*}
   J(e)
   &=&
   \sum_K (f+\Delta u_h, z-\varphi_h)_K
   - \frac 12 (\partial_n u_h|_K + \partial_{n'} u_h|_{K'},
               z-\varphi_h)_{\partial K\backslash \partial\Omega}.
-\end{eqnarray*} +\end{eqnarray*}" src="form_2833.png"/>

            -

            Using that for the normal vectors on adjacent cells we have $n'=-n$, we define the jump of the normal derivative by

            -\[
+<p> Using that for the normal vectors on adjacent cells we have <picture><source srcset=$n'=-n$, we define the jump of the normal derivative by

            +\[
   [\partial_n u_h] \dealcoloneq \partial_n u_h|_K + \partial_{n'} u_h|_{K'}
   =
   \partial_n u_h|_K - \partial_n u_h|_{K'},
-\] +\]" src="form_2835.png"/>

            -

            and get the final form after setting the discrete function $\varphi_h$, which is by now still arbitrary, to the point interpolation of the dual solution, $\varphi_h=I_h z$:

            -\begin{eqnarray*}
+<p> and get the final form after setting the discrete function <picture><source srcset=$\varphi_h$, which is by now still arbitrary, to the point interpolation of the dual solution, $\varphi_h=I_h z$:

            +\begin{eqnarray*}
   J(e)
   &=&
   \sum_K (f+\Delta u_h, z-I_h z)_K
   - \frac 12 ([\partial_n u_h],
               z-I_h z)_{\partial K\backslash \partial\Omega}.
-\end{eqnarray*} +\end{eqnarray*}" src="form_2837.png"/>

            -

            With this, we have obtained an exact representation of the error of the finite element discretization with respect to arbitrary (linear) functionals $J(\cdot)$. Its structure is a weighted form of a residual estimator, as both $f+\Delta u_h$ and $[\partial_n u_h]$ are cell and edge residuals that vanish on the exact solution, and $z-I_h z$ are weights indicating how important the residual on a certain cell is for the evaluation of the given functional. Furthermore, it is a cell-wise quantity, so we can use it as a mesh refinement criterion. The question is: how to evaluate it? After all, the evaluation requires knowledge of the dual solution $z$, which carries the information about the quantity we want to know to best accuracy.

            -

            In some, very special cases, this dual solution is known. For example, if the functional $J(\cdot)$ is the point evaluation, $J(\varphi)=\varphi(x_0)$, then the dual solution has to satisfy

            -\[
+<p>With this, we have obtained an exact representation of the error of the finite element discretization with respect to arbitrary (linear) functionals <picture><source srcset=$J(\cdot)$. Its structure is a weighted form of a residual estimator, as both $f+\Delta u_h$ and $[\partial_n u_h]$ are cell and edge residuals that vanish on the exact solution, and $z-I_h z$ are weights indicating how important the residual on a certain cell is for the evaluation of the given functional. Furthermore, it is a cell-wise quantity, so we can use it as a mesh refinement criterion. The question is: how to evaluate it? After all, the evaluation requires knowledge of the dual solution $z$, which carries the information about the quantity we want to know to best accuracy.

            +

            In some, very special cases, this dual solution is known. For example, if the functional $J(\cdot)$ is the point evaluation, $J(\varphi)=\varphi(x_0)$, then the dual solution has to satisfy

            +\[
   -\Delta z = \delta(x-x_0),
-\] +\]" src="form_2843.png"/>

            with the Dirac delta function on the right hand side, and the dual solution is the Green's function with respect to the point $x_0$. For simple geometries, this function is analytically known, and we could insert it into the error representation formula.

            -

            However, we do not want to restrict ourselves to such special cases. Rather, we will compute the dual solution numerically, and approximate $z$ by some numerically obtained $\tilde z$. We note that it is not sufficient to compute this approximation $\tilde z$ using the same method as used for the primal solution $u_h$, since then $\tilde z-I_h \tilde z=0$, and the overall error estimate would be zero. Rather, the approximation $\tilde z$ has to be from a larger space than the primal finite element space. There are various ways to obtain such an approximation (see the cited literature), and we will choose to compute it with a higher order finite element space. While this is certainly not the most efficient way, it is simple since we already have all we need to do that in place, and it also allows for simple experimenting. For more efficient methods, again refer to the given literature, in particular [BR95], [BR03].

            +

            However, we do not want to restrict ourselves to such special cases. Rather, we will compute the dual solution numerically, and approximate $z$ by some numerically obtained $\tilde z$. We note that it is not sufficient to compute this approximation $\tilde z$ using the same method as used for the primal solution $u_h$, since then $\tilde z-I_h \tilde z=0$, and the overall error estimate would be zero. Rather, the approximation $\tilde z$ has to be from a larger space than the primal finite element space. There are various ways to obtain such an approximation (see the cited literature), and we will choose to compute it with a higher order finite element space. While this is certainly not the most efficient way, it is simple since we already have all we need to do that in place, and it also allows for simple experimenting. For more efficient methods, again refer to the given literature, in particular [BR95], [BR03].

            With this, we end the discussion of the mathematical side of this program and turn to the actual implementation.

            -
            Note
            There are two steps above that do not seem necessary if all you care about is computing the error: namely, (i) the subtraction of $\phi_h$ from $z$, and (ii) splitting the integral into a sum of cells and integrating by parts on each. Indeed, neither of these two steps change $J(e)$ at all, as we only ever consider identities above until the substitution of $z$ by $\tilde z$. In other words, if you care only about estimating the global error $J(e)$, then these steps are not necessary. On the other hand, if you want to use the error estimate also as a refinement criterion for each cell of the mesh, then it is necessary to (i) break the estimate into a sum of cells, and (ii) massage the formulas in such a way that each cell's contributions have something to do with the local error. (While the contortions above do not change the value of the sum $J(e)$, they change the values we compute for each cell $K$.) To this end, we want to write everything in the form "residual times dual weight" where a "residual" is something that goes to zero as the approximation becomes $u_h$ better and better. For example, the quantity $\partial_n
-u_h$ is not a residual, since it simply converges to the (normal component of) the gradient of the exact solution. On the other hand, $[\partial_n u_h]$ is a residual because it converges to $[\partial_n
-u]=0$. All of the steps we have taken above in developing the final form of $J(e)$ have indeed had the goal of bringing the final formula into a form where each term converges to zero as the discrete solution $u_h$ converges to $u$. This then allows considering each cell's contribution as an "error indicator" that also converges to zero – as it should as the mesh is refined.
            +
            Note
            There are two steps above that do not seem necessary if all you care about is computing the error: namely, (i) the subtraction of $\phi_h$ from $z$, and (ii) splitting the integral into a sum of cells and integrating by parts on each. Indeed, neither of these two steps change $J(e)$ at all, as we only ever consider identities above until the substitution of $z$ by $\tilde z$. In other words, if you care only about estimating the global error $J(e)$, then these steps are not necessary. On the other hand, if you want to use the error estimate also as a refinement criterion for each cell of the mesh, then it is necessary to (i) break the estimate into a sum of cells, and (ii) massage the formulas in such a way that each cell's contributions have something to do with the local error. (While the contortions above do not change the value of the sum $J(e)$, they change the values we compute for each cell $K$.) To this end, we want to write everything in the form "residual times dual weight" where a "residual" is something that goes to zero as the approximation becomes $u_h$ better and better. For example, the quantity $\partial_n
+u_h$ is not a residual, since it simply converges to the (normal component of) the gradient of the exact solution. On the other hand, $[\partial_n u_h]$ is a residual because it converges to $[\partial_n
+u]=0$. All of the steps we have taken above in developing the final form of $J(e)$ have indeed had the goal of bringing the final formula into a form where each term converges to zero as the discrete solution $u_h$ converges to $u$. This then allows considering each cell's contribution as an "error indicator" that also converges to zero – as it should as the mesh is refined.

            The software

            The step-14 example program builds heavily on the techniques already used in the step-13 program. Its implementation of the dual weighted residual error estimator explained above is done by deriving a second class, properly called DualSolver, from the Solver base class, and having a class (WeightedResidual) that joins the two again and controls the solution of the primal and dual problem, and then uses both to compute the error indicator for mesh refinement.

            The program continues the modular concept of the previous example, by implementing the dual functional, describing quantity of interest, by an abstract base class, and providing two different functionals which implement this interface. Adding a different quantity of interest is thus simple.

            @@ -2576,15 +2576,15 @@

      Note the subtle interplay between resolving the corner singularities, and resolving around the point of evaluation. It will be rather difficult to generate such a mesh by hand, as this would involve to judge quantitatively how much which of the four corner singularities should be resolved, and to set the weight compared to the vicinity of the evaluation point.

      The program prints the point value and the estimated error in this quantity. From extrapolating it, we can guess that the exact value is somewhere close to 0.0334473, plus or minus 0.0000001 (note that we get almost 6 valid digits from only 22,000 (primal) degrees of freedom. This number cannot be obtained from the value of the functional alone, but I have used the assumption that the error estimator is mostly exact, and extrapolated the computed value plus the estimated error, to get an approximation of the true value. Computing with more degrees of freedom shows that this assumption is indeed valid.

      -

      From the computed results, we can generate two graphs: one that shows the convergence of the error $J(u)-J(u_h)$ (taking the extrapolated value as correct) in the point value, and the value that we get by adding up computed value $J(u_h)$ and estimated error eta (if the error estimator $eta$ were exact, then the value $J(u_h)+\eta$ would equal the exact point value, and the error in this quantity would always be zero; however, since the error estimator is only a - good - approximation to the true error, we can by this only reduce the size of the error). In this graph, we also indicate the complexity ${\cal O}(1/N)$ to show that mesh refinement acts optimal in this case. The second chart compares true and estimated error, and shows that the two are actually very close to each other, even for such a complicated quantity as the point value:

      +

      From the computed results, we can generate two graphs: one that shows the convergence of the error $J(u)-J(u_h)$ (taking the extrapolated value as correct) in the point value, and the value that we get by adding up computed value $J(u_h)$ and estimated error eta (if the error estimator $eta$ were exact, then the value $J(u_h)+\eta$ would equal the exact point value, and the error in this quantity would always be zero; however, since the error estimator is only a - good - approximation to the true error, we can by this only reduce the size of the error). In this graph, we also indicate the complexity ${\cal O}(1/N)$ to show that mesh refinement acts optimal in this case. The second chart compares true and estimated error, and shows that the two are actually very close to each other, even for such a complicated quantity as the point value:

      Comparing refinement criteria

      -

      Since we have accepted quite some effort when using the mesh refinement driven by the dual weighted error estimator (for solving the dual problem, and for evaluating the error representation), it is worth while asking whether that effort was successful. To this end, we first compare the achieved error levels for different mesh refinement criteria. To generate this data, simply change the value of the mesh refinement criterion variable in the main program. The results are thus (for the weight in the Kelly indicator, we have chosen the function $1/(r^2+0.1^2)$, where $r$ is the distance to the evaluation point; it can be shown that this is the optimal weight if we neglect the effects of boundaries):

      +

      Since we have accepted quite some effort when using the mesh refinement driven by the dual weighted error estimator (for solving the dual problem, and for evaluating the error representation), it is worth while asking whether that effort was successful. To this end, we first compare the achieved error levels for different mesh refinement criteria. To generate this data, simply change the value of the mesh refinement criterion variable in the main program. The results are thus (for the weight in the Kelly indicator, we have chosen the function $1/(r^2+0.1^2)$, where $r$ is the distance to the evaluation point; it can be shown that this is the optimal weight if we neglect the effects of boundaries):

      -

      Checking these numbers, we see that for global refinement, the error is proportional to $O(1/(sqrt(N) log(N)))$, and for the dual estimator $O(1/N)$. Generally speaking, we see that the dual weighted error estimator is better than the other refinement indicators, at least when compared with those that have a similarly regular behavior. The Kelly indicator produces smaller errors, but jumps about the picture rather irregularly, with the error also changing signs sometimes. Therefore, its behavior does not allow to extrapolate the results to larger values of N. Furthermore, if we trust the error estimates of the dual weighted error estimator, the results can be improved by adding the estimated error to the computed values. In terms of reliability, the weighted estimator is thus better than the Kelly indicator, although the latter sometimes produces smaller errors.

      +

      Checking these numbers, we see that for global refinement, the error is proportional to $O(1/(sqrt(N) log(N)))$, and for the dual estimator $O(1/N)$. Generally speaking, we see that the dual weighted error estimator is better than the other refinement indicators, at least when compared with those that have a similarly regular behavior. The Kelly indicator produces smaller errors, but jumps about the picture rather irregularly, with the error also changing signs sometimes. Therefore, its behavior does not allow to extrapolate the results to larger values of N. Furthermore, if we trust the error estimates of the dual weighted error estimator, the results can be improved by adding the estimated error to the computed values. In terms of reliability, the weighted estimator is thus better than the Kelly indicator, although the latter sometimes produces smaller errors.

      Evaluation of point stresses

      Besides evaluating the values of the solution at a certain point, the program also offers the possibility to evaluate the x-derivatives at a certain point, and also to tailor mesh refinement for this. To let the program compute these quantities, simply replace the two occurrences of PointValueEvaluation in the main function by PointXDerivativeEvaluation, and let the program run:

      Refinement cycle: 0
      Number of degrees of freedom=72
      @@ -2636,16 +2636,16 @@ -

      Note the asymmetry of the grids compared with those we obtained for the point evaluation. This is due to the fact that the domain and the primal solution may be symmetric about the diagonal, but the $x$-derivative is not, and the latter enters the refinement criterion.

      -

      Then, it is interesting to compare actually computed values of the quantity of interest (i.e. the x-derivative of the solution at one point) with a reference value of -0.0528223... plus or minus 0.0000005. We get this reference value by computing on finer grid after some more mesh refinements, with approximately 130,000 cells. Recall that if the error is $O(1/N)$ in the optimal case, then taking a mesh with ten times more cells gives us one additional digit in the result.

      +

      Note the asymmetry of the grids compared with those we obtained for the point evaluation. This is due to the fact that the domain and the primal solution may be symmetric about the diagonal, but the $x$-derivative is not, and the latter enters the refinement criterion.

      +

      Then, it is interesting to compare actually computed values of the quantity of interest (i.e. the x-derivative of the solution at one point) with a reference value of -0.0528223... plus or minus 0.0000005. We get this reference value by computing on finer grid after some more mesh refinements, with approximately 130,000 cells. Recall that if the error is $O(1/N)$ in the optimal case, then taking a mesh with ten times more cells gives us one additional digit in the result.

      In the left part of the following chart, you again see the convergence of the error towards this extrapolated value, while on the right you see a comparison of true and estimated error:

      -

      After an initial phase where the true error changes its sign, the estimated error matches it quite well, again. Also note the dramatic improvement in the error when using the estimated error to correct the computed value of $J(u_h)$.

      +

      After an initial phase where the true error changes its sign, the estimated error matches it quite well, again. Also note the dramatic improvement in the error when using the estimated error to correct the computed value of $J(u_h)$.

      step-13 revisited

      -

      If instead of the Exercise_2_3 data set, we choose CurvedRidges in the main function, and choose $(0.5,0.5)$ as the evaluation point, then we can redo the computations of the previous example program, to compare whether the results obtained with the help of the dual weighted error estimator are better than those we had previously.

      +

      If instead of the Exercise_2_3 data set, we choose CurvedRidges in the main function, and choose $(0.5,0.5)$ as the evaluation point, then we can redo the computations of the previous example program, to compare whether the results obtained with the help of the dual weighted error estimator are better than those we had previously.

      First, the meshes after 9 adaptive refinement cycles obtained with the point evaluation and derivative evaluation refinement criteria, respectively, look like this:

      /usr/share/doc/packages/dealii/doxygen/deal.II/step_15.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_15.html 2024-01-30 03:04:50.960868986 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_15.html 2024-01-30 03:04:50.960868986 +0000 @@ -144,41 +144,41 @@

      Introduction

      Foreword

      -

      This program deals with an example of a non-linear elliptic partial differential equation, the minimal surface equation. You can imagine the solution of this equation to describe the surface spanned by a soap film that is enclosed by a closed wire loop. We imagine the wire to not just be a planar loop, but in fact curved. The surface tension of the soap film will then reduce the surface to have minimal surface. The solution of the minimal surface equation describes this shape with the wire's vertical displacement as a boundary condition. For simplicity, we will here assume that the surface can be written as a graph $u=u(x,y)$ although it is clear that it is not very hard to construct cases where the wire is bent in such a way that the surface can only locally be constructed as a graph but not globally.

      +

      This program deals with an example of a non-linear elliptic partial differential equation, the minimal surface equation. You can imagine the solution of this equation to describe the surface spanned by a soap film that is enclosed by a closed wire loop. We imagine the wire to not just be a planar loop, but in fact curved. The surface tension of the soap film will then reduce the surface to have minimal surface. The solution of the minimal surface equation describes this shape with the wire's vertical displacement as a boundary condition. For simplicity, we will here assume that the surface can be written as a graph $u=u(x,y)$ although it is clear that it is not very hard to construct cases where the wire is bent in such a way that the surface can only locally be constructed as a graph but not globally.

      Because the equation is non-linear, we can't solve it directly. Rather, we have to use Newton's method to compute the solution iteratively.

      Note
      The material presented here is also discussed in video lecture 31.5, video lecture 31.55, video lecture 31.6. (All video lectures are also available here.) (See also video lecture 31.65, video lecture 31.7.)

      Classical formulation

      In a classical sense, the problem is given in the following form:

      -\begin{align*}
+<picture><source srcset=\begin{align*}
     -\nabla \cdot \left( \frac{1}{\sqrt{1+|\nabla u|^{2}}}\nabla u \right) &= 0 \qquad
     \qquad &&\textrm{in} ~ \Omega
     \\
     u&=g \qquad\qquad &&\textrm{on} ~ \partial \Omega.
-  \end{align*} + \end{align*}" src="form_2858.png"/>

      -

      $\Omega$ is the domain we get by projecting the wire's positions into $x-y$ space. In this example, we choose $\Omega$ as the unit disk.

      -

      As described above, we solve this equation using Newton's method in which we compute the $n$th approximate solution from the $(n-1)$th one, and use a damping parameter $\alpha^n$ to get better global convergence behavior:

      -\begin{align*}
+<p><picture><source srcset=$\Omega$ is the domain we get by projecting the wire's positions into $x-y$ space. In this example, we choose $\Omega$ as the unit disk.

      +

      As described above, we solve this equation using Newton's method in which we compute the $n$th approximate solution from the $(n-1)$th one, and use a damping parameter $\alpha^n$ to get better global convergence behavior:

      +\begin{align*}
     F'(u^{n},\delta u^{n})&=- F(u^{n})
     \\
     u^{n+1}&=u^{n}+\alpha^n \delta u^{n}
-  \end{align*} + \end{align*}" src="form_2862.png"/>

      with

      -\[
+<picture><source srcset=\[
     F(u) \dealcoloneq -\nabla \cdot \left( \frac{1}{\sqrt{1+|\nabla u|^{2}}}\nabla u \right)
-  \] + \]" src="form_2863.png"/>

      -

      and $F'(u,\delta u)$ the derivative of F in direction of $\delta u$:

      -\[
+<p> and <picture><source srcset=$F'(u,\delta u)$ the derivative of F in direction of $\delta u$:

      +\[
   F'(u,\delta u)=\lim \limits_{\epsilon \rightarrow 0}{\frac{F(u+\epsilon \delta u)-
   F(u)}{\epsilon}}.
-\] +\]" src="form_2866.png"/>

      -

      Going through the motions to find out what $F'(u,\delta u)$ is, we find that we have to solve a linear elliptic PDE in every Newton step, with $\delta u^n$ as the solution of:

      +

      Going through the motions to find out what $F'(u,\delta u)$ is, we find that we have to solve a linear elliptic PDE in every Newton step, with $\delta u^n$ as the solution of:

      -\[
+<picture><source srcset=\[
   - \nabla \cdot \left( \frac{1}{\left(1+|\nabla u^{n}|^{2}\right)^{\frac{1}{2}}}\nabla
   \delta u^{n} \right) +
   \nabla \cdot \left( \frac{\nabla u^{n} \cdot
@@ -186,62 +186,62 @@
   \right)  =
   -\left( - \nabla \cdot \left( \frac{1}{\left(1+|\nabla u^{n}|^{2}\right)^{\frac{1}{2}}}
   \nabla u^{n} \right) \right)
-  \] + \]" src="form_2868.png"/>

      -

      In order to solve the minimal surface equation, we have to solve this equation repeatedly, once per Newton step. To solve this, we have to take a look at the boundary condition of this problem. Assuming that $u^{n}$ already has the right boundary values, the Newton update $\delta u^{n}$ should have zero boundary conditions, in order to have the right boundary condition after adding both. In the first Newton step, we are starting with the solution $u^{0}\equiv 0$, the Newton update still has to deliver the right boundary condition to the solution $u^{1}$.

      -

      Summing up, we have to solve the PDE above with the boundary condition $\delta
-u^{0}=g$ in the first step and with $\delta u^{n}=0$ in all the following steps.

      -
      Note
      In some sense, one may argue that if the program already implements $F(u)$, it is duplicative to also have to implement $F'(u,\delta)$. As always, duplication tempts bugs and we would like to avoid it. While we do not explore this issue in this program, we will come back to it at the end of the Possibilities for extensions section below, and specifically in step-72.
      +

      In order to solve the minimal surface equation, we have to solve this equation repeatedly, once per Newton step. To solve this, we have to take a look at the boundary condition of this problem. Assuming that $u^{n}$ already has the right boundary values, the Newton update $\delta u^{n}$ should have zero boundary conditions, in order to have the right boundary condition after adding both. In the first Newton step, we are starting with the solution $u^{0}\equiv 0$, the Newton update still has to deliver the right boundary condition to the solution $u^{1}$.

      +

      Summing up, we have to solve the PDE above with the boundary condition $\delta
+u^{0}=g$ in the first step and with $\delta u^{n}=0$ in all the following steps.

      +
      Note
      In some sense, one may argue that if the program already implements $F(u)$, it is duplicative to also have to implement $F'(u,\delta)$. As always, duplication tempts bugs and we would like to avoid it. While we do not explore this issue in this program, we will come back to it at the end of the Possibilities for extensions section below, and specifically in step-72.

      Weak formulation of the problem

      -

      Starting with the strong formulation above, we get the weak formulation by multiplying both sides of the PDE with a test function $\varphi$ and integrating by parts on both sides:

      -\[
+<p>Starting with the strong formulation above, we get the weak formulation by multiplying both sides of the PDE with a test function <picture><source srcset=$\varphi$ and integrating by parts on both sides:

      +\[
   \left( \nabla \varphi , \frac{1}{\left(1+|\nabla u^{n}|^{2}\right)^{\frac{1}{2}}}\nabla
   \delta u^{n} \right)-\left(\nabla \varphi ,\frac{\nabla u^{n} \cdot \nabla
   \delta u^{n}}{\left(1+|\nabla u^{n}|^{2}\right)^{\frac{3}{2}}}\nabla u^{n}  \right)
   = -\left(\nabla \varphi , \frac{1}{\left(1+|\nabla u^{n}|^{2}\right)^{\frac{1}{2}}} \nabla u^{n}
    \right).
-  \] + \]" src="form_2876.png"/>

      -

      Here the solution $\delta u^{n}$ is a function in $H^{1}(\Omega)$, subject to the boundary conditions discussed above. Reducing this space to a finite dimensional space with basis $\left\{
-\varphi_{0},\dots , \varphi_{N-1}\right\}$, we can write the solution:

      +

      Here the solution $\delta u^{n}$ is a function in $H^{1}(\Omega)$, subject to the boundary conditions discussed above. Reducing this space to a finite dimensional space with basis $\left\{
+\varphi_{0},\dots , \varphi_{N-1}\right\}$, we can write the solution:

      -\[
+<picture><source srcset=\[
   \delta u^{n}=\sum_{j=0}^{N-1} \delta U_{j} \varphi_{j}.
-\] +\]" src="form_2879.png"/>

      -

      Using the basis functions as test functions and defining $a_{n} \dealcoloneq \frac{1}
-{\sqrt{1+|\nabla u^{n}|^{2}}}$, we can rewrite the weak formulation:

      +

      Using the basis functions as test functions and defining $a_{n} \dealcoloneq \frac{1}
+{\sqrt{1+|\nabla u^{n}|^{2}}}$, we can rewrite the weak formulation:

      -\[
+<picture><source srcset=\[
   \sum_{j=0}^{N-1}\left[ \left( \nabla \varphi_{i} , a_{n} \nabla \varphi_{j} \right) -
   \left(\nabla u^{n}\cdot \nabla \varphi_{i} , a_{n}^{3} \nabla u^{n} \cdot \nabla
   \varphi_{j} \right) \right] \cdot \delta U_{j}=-\left( \nabla \varphi_{i} , a_{n}
   \nabla u^{n}\right) \qquad \forall i=0,\dots ,N-1,
-\] +\]" src="form_2881.png"/>

      -

      where the solution $\delta u^{n}$ is given by the coefficients $\delta U^{n}_{j}$. This linear system of equations can be rewritten as:

      +

      where the solution $\delta u^{n}$ is given by the coefficients $\delta U^{n}_{j}$. This linear system of equations can be rewritten as:

      -\[
+<picture><source srcset=\[
   A^{n}\; \delta U^{n}=b^{n},
-\] +\]" src="form_2883.png"/>

      -

      where the entries of the matrix $A^{n}$ are given by:

      +

      where the entries of the matrix $A^{n}$ are given by:

      -\[
+<picture><source srcset=\[
   A^{n}_{ij} \dealcoloneq \left( \nabla \varphi_{i} , a_{n} \nabla \varphi_{j} \right) -
   \left(\nabla u^{n}\cdot \nabla \varphi_{i} , a_{n}^{3} \nabla u^{n} \cdot \nabla
   \varphi_{j} \right),
-\] +\]" src="form_2885.png"/>

      -

      and the right hand side $b^{n}$ is given by:

      +

      and the right hand side $b^{n}$ is given by:

      -\[
+<picture><source srcset=\[
   b^{n}_{i} \dealcoloneq -\left( \nabla \varphi_{i} , a_{n} \nabla u^{n}\right).
-\] +\]" src="form_2887.png"/>

      Questions about the appropriate solver

      The matrix that corresponds to the Newton step above can be reformulated to show its structure a bit better. Rewriting it slightly, we get that it has the form

      -\[
+<picture><source srcset=\[
   A_{ij}
   =
   \left(
@@ -249,10 +249,10 @@
     B
     \nabla \varphi_j
   \right),
-\] +\]" src="form_2888.png"/>

      -

      where the matrix $B$ (of size $d \times d$ in $d$ space dimensions) is given by the following expression:

      -\[
+<p> where the matrix <picture><source srcset=$B$ (of size $d \times d$ in $d$ space dimensions) is given by the following expression:

      +\[
   B
   =
   a_n \left\{
@@ -267,44 +267,44 @@
   \frac{\nabla u_n}{\sqrt{1+|\nabla u^{n}|^{2}}} \otimes
   \frac{\nabla u_n}{\sqrt{1+|\nabla u^{n}|^{2}}}
   \right\}.
-\] +\]" src="form_2890.png"/>

      -

      From this expression, it is obvious that $B$ is symmetric, and so $A$ is symmetric as well. On the other hand, $B$ is also positive definite, which confers the same property onto $A$. This can be seen by noting that the vector $v_1 =
-\frac{\nabla u^n}{|\nabla u^n|}$ is an eigenvector of $B$ with eigenvalue $\lambda_1=a_n \left(1-\frac{|\nabla u^n|^2}{1+|\nabla u^n|^2}\right) > 0$ while all vectors $v_2\ldots v_d$ that are perpendicular to $v_1$ and each other are eigenvectors with eigenvalue $a_n$. Since all eigenvalues are positive, $B$ is positive definite and so is $A$. We can thus use the CG method for solving the Newton steps. (The fact that the matrix $A$ is symmetric and positive definite should not come as a surprise. It results from taking the derivative of an operator that results from taking the derivative of an energy functional: the minimal surface equation simply minimizes some non-quadratic energy. Consequently, the Newton matrix, as the matrix of second derivatives of a scalar energy, must be symmetric since the derivative with regard to the $i$th and $j$th degree of freedom should clearly commute. Likewise, if the energy functional is convex, then the matrix of second derivatives must be positive definite, and the direct calculation above simply reaffirms this.)

      -

      It is worth noting, however, that the positive definiteness degenerates for problems where $\nabla u$ becomes large. In other words, if we simply multiply all boundary values by 2, then to first order $u$ and $\nabla u$ will also be multiplied by two, but as a consequence the smallest eigenvalue of $B$ will become smaller and the matrix will become more ill-conditioned. (More specifically, for $|\nabla u^n|\rightarrow\infty$ we have that $\lambda_1 \propto a_n \frac{1}{|\nabla u^n|^2}$ whereas $\lambda_2\ldots \lambda_d=a_n$; thus, the condition number of $B$, which is a multiplicative factor in the condition number of $A$ grows like ${\cal O}(|\nabla u^n|^2)$.) It is simple to verify with the current program that indeed multiplying the boundary values used in the current program by larger and larger values results in a problem that will ultimately no longer be solvable using the simple preconditioned CG method we use here.

      +

      From this expression, it is obvious that $B$ is symmetric, and so $A$ is symmetric as well. On the other hand, $B$ is also positive definite, which confers the same property onto $A$. This can be seen by noting that the vector $v_1 =
+\frac{\nabla u^n}{|\nabla u^n|}$ is an eigenvector of $B$ with eigenvalue $\lambda_1=a_n \left(1-\frac{|\nabla u^n|^2}{1+|\nabla u^n|^2}\right) > 0$ while all vectors $v_2\ldots v_d$ that are perpendicular to $v_1$ and each other are eigenvectors with eigenvalue $a_n$. Since all eigenvalues are positive, $B$ is positive definite and so is $A$. We can thus use the CG method for solving the Newton steps. (The fact that the matrix $A$ is symmetric and positive definite should not come as a surprise. It results from taking the derivative of an operator that results from taking the derivative of an energy functional: the minimal surface equation simply minimizes some non-quadratic energy. Consequently, the Newton matrix, as the matrix of second derivatives of a scalar energy, must be symmetric since the derivative with regard to the $i$th and $j$th degree of freedom should clearly commute. Likewise, if the energy functional is convex, then the matrix of second derivatives must be positive definite, and the direct calculation above simply reaffirms this.)

      +

      It is worth noting, however, that the positive definiteness degenerates for problems where $\nabla u$ becomes large. In other words, if we simply multiply all boundary values by 2, then to first order $u$ and $\nabla u$ will also be multiplied by two, but as a consequence the smallest eigenvalue of $B$ will become smaller and the matrix will become more ill-conditioned. (More specifically, for $|\nabla u^n|\rightarrow\infty$ we have that $\lambda_1 \propto a_n \frac{1}{|\nabla u^n|^2}$ whereas $\lambda_2\ldots \lambda_d=a_n$; thus, the condition number of $B$, which is a multiplicative factor in the condition number of $A$ grows like ${\cal O}(|\nabla u^n|^2)$.) It is simple to verify with the current program that indeed multiplying the boundary values used in the current program by larger and larger values results in a problem that will ultimately no longer be solvable using the simple preconditioned CG method we use here.

      Choice of step length and globalization

      -

      As stated above, Newton's method works by computing a direction $\delta u^n$ and then performing the update $u^{n+1} = u^{n}+\alpha^n
-\delta u^{n}$ with a step length $0 < \alpha^n \le 1$. It is a common observation that for strongly nonlinear models, Newton's method does not converge if we always choose $\alpha^n=1$ unless one starts with an initial guess $u^0$ that is sufficiently close to the solution $u$ of the nonlinear problem. In practice, we don't always have such an initial guess, and consequently taking full Newton steps (i.e., using $\alpha=1$) does frequently not work.

      -

      A common strategy therefore is to use a smaller step length for the first few steps while the iterate $u^n$ is still far away from the solution $u$ and as we get closer use larger values for $\alpha^n$ until we can finally start to use full steps $\alpha^n=1$ as we are close enough to the solution. The question is of course how to choose $\alpha^n$. There are basically two widely used approaches: line search and trust region methods.

      +

      As stated above, Newton's method works by computing a direction $\delta u^n$ and then performing the update $u^{n+1} = u^{n}+\alpha^n
+\delta u^{n}$ with a step length $0 < \alpha^n \le 1$. It is a common observation that for strongly nonlinear models, Newton's method does not converge if we always choose $\alpha^n=1$ unless one starts with an initial guess $u^0$ that is sufficiently close to the solution $u$ of the nonlinear problem. In practice, we don't always have such an initial guess, and consequently taking full Newton steps (i.e., using $\alpha=1$) does frequently not work.

      +

      A common strategy therefore is to use a smaller step length for the first few steps while the iterate $u^n$ is still far away from the solution $u$ and as we get closer use larger values for $\alpha^n$ until we can finally start to use full steps $\alpha^n=1$ as we are close enough to the solution. The question is of course how to choose $\alpha^n$. There are basically two widely used approaches: line search and trust region methods.

      In this program, we simply always choose the step length equal to 0.1. This makes sure that for the testcase at hand we do get convergence although it is clear that by not eventually reverting to full step lengths we forego the rapid, quadratic convergence that makes Newton's method so appealing. Obviously, this is a point one eventually has to address if the program was made into one that is meant to solve more realistic problems. We will comment on this issue some more in the results section, and use an even better approach in step-77.

      Summary of the algorithm and testcase

      Overall, the program we have here is not unlike step-6 in many regards. The layout of the main class is essentially the same. On the other hand, the driving algorithm in the run() function is different and works as follows:

      1. -

        Start with the function $u^{0}\equiv 0$ and modify it in such a way that the values of $u^0$ along the boundary equal the correct boundary values $g$ (this happens in MinimalSurfaceProblem::set_boundary_values). Set $n=0$.

        +

        Start with the function $u^{0}\equiv 0$ and modify it in such a way that the values of $u^0$ along the boundary equal the correct boundary values $g$ (this happens in MinimalSurfaceProblem::set_boundary_values). Set $n=0$.

      2. -

        Compute the Newton update by solving the system $A^{n}\;\delta
-  U^{n}=b^{n}$ with boundary condition $\delta u^{n}=0$ on $\partial \Omega$.

        +

        Compute the Newton update by solving the system $A^{n}\;\delta
+  U^{n}=b^{n}$ with boundary condition $\delta u^{n}=0$ on $\partial \Omega$.

      3. -

        Compute a step length $\alpha^n$. In this program, we always set $\alpha^n=0.1$. To make things easier to extend later on, this happens in a function of its own, namely in MinimalSurfaceProblem::determine_step_length. (The strategy of always choosing $\alpha^n=0.1$ is of course not optimal – we should choose a step length that works for a given search direction – but it requires a bit of work to do that. In the end, we leave these sorts of things to external packages: step-77 does that.)

        +

        Compute a step length $\alpha^n$. In this program, we always set $\alpha^n=0.1$. To make things easier to extend later on, this happens in a function of its own, namely in MinimalSurfaceProblem::determine_step_length. (The strategy of always choosing $\alpha^n=0.1$ is of course not optimal – we should choose a step length that works for a given search direction – but it requires a bit of work to do that. In the end, we leave these sorts of things to external packages: step-77 does that.)

        /usr/share/doc/packages/dealii/doxygen/deal.II/step_16.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_16.html 2024-01-30 03:04:51.008869386 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_16.html 2024-01-30 03:04:51.008869386 +0000 @@ -141,7 +141,7 @@
        -

        The fine level in this mesh consists only of the degrees of freedom that are defined on the refined cells, but does not extend to that part of the domain that is not refined. While this guarantees that the overall effort grows as ${\cal O}(N)$ as necessary for optimal multigrid complexity, it leads to problems when defining where to smooth and what boundary conditions to pose for the operators defined on individual levels if the level boundary is not an external boundary. These questions are discussed in detail in the article cited above.

        +

        The fine level in this mesh consists only of the degrees of freedom that are defined on the refined cells, but does not extend to that part of the domain that is not refined. While this guarantees that the overall effort grows as ${\cal O}(N)$ as necessary for optimal multigrid complexity, it leads to problems when defining where to smooth and what boundary conditions to pose for the operators defined on individual levels if the level boundary is not an external boundary. These questions are discussed in detail in the article cited above.

        The testcase

        The problem we solve here is similar to step-6, with two main differences: first, the multigrid preconditioner, obviously. We also change the discontinuity of the coefficients such that the local assembler does not look more complicated than necessary.

        The commented program

        /usr/share/doc/packages/dealii/doxygen/deal.II/step_18.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_18.html 2024-01-30 03:04:51.092870085 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_18.html 2024-01-30 03:04:51.092870085 +0000 @@ -154,23 +154,23 @@

        Quasistatic elastic deformation

        Motivation of the model

        In general, time-dependent small elastic deformations are described by the elastic wave equation

        -\[
+<picture><source srcset=\[
   \rho \frac{\partial^2 \mathbf{u}}{\partial t^2}
   + c \frac{\partial \mathbf{u}}{\partial t}
   - \textrm{div}\  ( C \varepsilon(\mathbf{u})) = \mathbf{f}
   \qquad
   \textrm{in}\ \Omega,
-\] +\]" src="form_2939.png"/>

        -

        where $\mathbf{u}=\mathbf{u} (\mathbf{x},t)$ is the deformation of the body, $\rho$ and $c$ the density and attenuation coefficient, and $\mathbf{f}$ external forces. In addition, initial conditions

        -\[
+<p> where <picture><source srcset=$\mathbf{u}=\mathbf{u} (\mathbf{x},t)$ is the deformation of the body, $\rho$ and $c$ the density and attenuation coefficient, and $\mathbf{f}$ external forces. In addition, initial conditions

        +\[
   \mathbf{u}(\cdot, 0) = \mathbf{u}_0(\cdot)
   \qquad
   \textrm{on}\ \Omega,
-\] +\]" src="form_2942.png"/>

        and Dirichlet (displacement) or Neumann (traction) boundary conditions need to be specified for a unique solution:

        -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   \mathbf{u}(\mathbf{x},t) &=& \mathbf{d}(\mathbf{x},t)
   \qquad
   \textrm{on}\ \Gamma_D\subset\partial\Omega,
@@ -178,12 +178,12 @@
   \mathbf{n} \ C \varepsilon(\mathbf{u}(\mathbf{x},t)) &=& \mathbf{b}(\mathbf{x},t)
   \qquad
   \textrm{on}\ \Gamma_N=\partial\Omega\backslash\Gamma_D.
-\end{eqnarray*} +\end{eqnarray*}" src="form_2943.png"/>

        -

        In above formulation, $\varepsilon(\mathbf{u})= \frac 12 (\nabla \mathbf{u} + \nabla
-\mathbf{u}^T)$ is the symmetric gradient of the displacement, also called the strain. $C$ is a tensor of rank 4, called the stress-strain tensor (the inverse of the compliance tensor) that contains knowledge of the elastic strength of the material; its symmetry properties make sure that it maps symmetric tensors of rank 2 (“matrices” of dimension $d$, where $d$ is the spatial dimensionality) onto symmetric tensors of the same rank. We will comment on the roles of the strain and stress tensors more below. For the moment it suffices to say that we interpret the term $\textrm{div}\  ( C \varepsilon(\mathbf{u}))$ as the vector with components $\frac \partial{\partial x_j} C_{ijkl} \varepsilon(\mathbf{u})_{kl}$, where summation over indices $j,k,l$ is implied.

        -

        The quasistatic limit of this equation is motivated as follows: each small perturbation of the body, for example by changes in boundary condition or the forcing function, will result in a corresponding change in the configuration of the body. In general, this will be in the form of waves radiating away from the location of the disturbance. Due to the presence of the damping term, these waves will be attenuated on a time scale of, say, $\tau$. Now, assume that all changes in external forcing happen on times scales that are much larger than $\tau$. In that case, the dynamic nature of the change is unimportant: we can consider the body to always be in static equilibrium, i.e. we can assume that at all times the body satisfies

        -\begin{eqnarray*}
+<p> In above formulation, <picture><source srcset=$\varepsilon(\mathbf{u})= \frac 12 (\nabla \mathbf{u} + \nabla
+\mathbf{u}^T)$ is the symmetric gradient of the displacement, also called the strain. $C$ is a tensor of rank 4, called the stress-strain tensor (the inverse of the compliance tensor) that contains knowledge of the elastic strength of the material; its symmetry properties make sure that it maps symmetric tensors of rank 2 (“matrices” of dimension $d$, where $d$ is the spatial dimensionality) onto symmetric tensors of the same rank. We will comment on the roles of the strain and stress tensors more below. For the moment it suffices to say that we interpret the term $\textrm{div}\  ( C \varepsilon(\mathbf{u}))$ as the vector with components $\frac \partial{\partial x_j} C_{ijkl} \varepsilon(\mathbf{u})_{kl}$, where summation over indices $j,k,l$ is implied.

        +

        The quasistatic limit of this equation is motivated as follows: each small perturbation of the body, for example by changes in boundary condition or the forcing function, will result in a corresponding change in the configuration of the body. In general, this will be in the form of waves radiating away from the location of the disturbance. Due to the presence of the damping term, these waves will be attenuated on a time scale of, say, $\tau$. Now, assume that all changes in external forcing happen on times scales that are much larger than $\tau$. In that case, the dynamic nature of the change is unimportant: we can consider the body to always be in static equilibrium, i.e. we can assume that at all times the body satisfies

        +\begin{eqnarray*}
   - \textrm{div}\  ( C \varepsilon(\mathbf{u})) &=& \mathbf{f}(\mathbf{x},t)
   \qquad
   \textrm{in}\ \Omega,
@@ -195,13 +195,13 @@
   \mathbf{n} \ C \varepsilon(\mathbf{u}(\mathbf{x},t)) &=& \mathbf{b}(\mathbf{x},t)
   \qquad
   \textrm{on}\ \Gamma_N.
-\end{eqnarray*} +\end{eqnarray*}" src="form_2949.png"/>

        -

        Note that the differential equation does not contain any time derivatives any more – all time dependence is introduced through boundary conditions and a possibly time-varying force function $\mathbf{f}(\mathbf{x},t)$. The changes in configuration can therefore be considered as being stationary instantaneously. An alternative view of this is that $t$ is not really a time variable, but only a time-like parameter that governs the evolution of the problem.

        +

        Note that the differential equation does not contain any time derivatives any more – all time dependence is introduced through boundary conditions and a possibly time-varying force function $\mathbf{f}(\mathbf{x},t)$. The changes in configuration can therefore be considered as being stationary instantaneously. An alternative view of this is that $t$ is not really a time variable, but only a time-like parameter that governs the evolution of the problem.

        While these equations are sufficient to describe small deformations, computing large deformations is a little more complicated and, in general, leads to nonlinear equations such as those treated in step-44. In the following, let us consider some of the tools one would employ when simulating problems in which the deformation becomes large.

        Note
        The model we will consider below is not founded on anything that would be mathematically sound: we will consider a model in which we produce a small deformation, deform the physical coordinates of the body by this deformation, and then consider the next loading step again as a linear problem. This isn't consistent, since the assumption of linearity implies that deformations are infinitesimal and so moving around the vertices of our mesh by a finite amount before solving the next linear problem is an inconsistent approach. We should therefore note that it is not surprising that the equations discussed below can't be found in the literature: The model considered here has little to do with reality! On the other hand, the implementation techniques we consider are very much what one would need to use when implementing a real model, as we will see in step-44.
        -

        To come back to defining our "artificial" model, let us first introduce a tensorial stress variable $\sigma$, and write the differential equations in terms of the stress:

        -\begin{eqnarray*}
+<p>To come back to defining our $\sigma$, and write the differential equations in terms of the stress:

        +\begin{eqnarray*}
   - \textrm{div}\  \sigma &=& \mathbf{f}(\mathbf{x},t)
   \qquad
   \textrm{in}\ \Omega(t),
@@ -213,30 +213,30 @@
   \mathbf{n} \ C \varepsilon(\mathbf{u}(\mathbf{x},t)) &=& \mathbf{b}(\mathbf{x},t)
   \qquad
   \textrm{on}\ \Gamma_N=\partial\Omega(t)\backslash\Gamma_D.
-\end{eqnarray*} +\end{eqnarray*}" src="form_2951.png"/>

        -

        Note that these equations are posed on a domain $\Omega(t)$ that changes with time, with the boundary moving according to the displacements $\mathbf{u}(\mathbf{x},t)$ of the points on the boundary. To complete this system, we have to specify the incremental relationship between the stress and the strain, as follows:

        -\[
+<p> Note that these equations are posed on a domain <picture><source srcset=$\Omega(t)$ that changes with time, with the boundary moving according to the displacements $\mathbf{u}(\mathbf{x},t)$ of the points on the boundary. To complete this system, we have to specify the incremental relationship between the stress and the strain, as follows:

        +\[
   \dot\sigma = C \varepsilon (\dot{\mathbf{u}}),
   \qquad
   \qquad
   \textrm{[stress-strain]}
-\] +\]" src="form_2954.png"/>

        -

        where a dot indicates a time derivative. Both the stress $\sigma$ and the strain $\varepsilon(\mathbf{u})$ are symmetric tensors of rank 2.

        +

        where a dot indicates a time derivative. Both the stress $\sigma$ and the strain $\varepsilon(\mathbf{u})$ are symmetric tensors of rank 2.

        Time discretization

        -

        Numerically, this system is solved as follows: first, we discretize the time component using a backward Euler scheme. This leads to a discrete equilibrium of force at time step $n$:

        -\[
+<p>Numerically, this system is solved as follows: first, we discretize the time component using a backward Euler scheme. This leads to a discrete equilibrium of force at time step <picture><source srcset=$n$:

        +\[
   -\textrm{div}\  \sigma^n = f^n,
-\] +\]" src="form_2956.png"/>

        where

        -\[
+<picture><source srcset=\[
   \sigma^n = \sigma^{n-1} + C \varepsilon (\Delta \mathbf{u}^n),
-\] +\]" src="form_2957.png"/>

        -

        and $\Delta \mathbf{u}^n$ the incremental displacement for time step $n$. In addition, we have to specify initial data $\mathbf{u}(\cdot,0)=\mathbf{u}_0$. This way, if we want to solve for the displacement increment, we have to solve the following system:

        -\begin{align*}
+<p> and <picture><source srcset=$\Delta \mathbf{u}^n$ the incremental displacement for time step $n$. In addition, we have to specify initial data $\mathbf{u}(\cdot,0)=\mathbf{u}_0$. This way, if we want to solve for the displacement increment, we have to solve the following system:

        +\begin{align*}
   - \textrm{div}\   C \varepsilon(\Delta\mathbf{u}^n) &= \mathbf{f} + \textrm{div}\  \sigma^{n-1}
   \qquad
   &&\textrm{in}\ \Omega(t_{n-1}),
@@ -248,11 +248,11 @@
   \mathbf{n} \ C \varepsilon(\Delta \mathbf{u}^n(\mathbf{x},t)) &= \mathbf{b}(\mathbf{x},t_n)-\mathbf{b}(\mathbf{x},t_{n-1})
   \qquad
   &&\textrm{on}\ \Gamma_N=\partial\Omega(t_{n-1})\backslash\Gamma_D.
-\end{align*} +\end{align*}" src="form_2960.png"/>

        -

        The weak form of this set of equations, which as usual is the basis for the finite element formulation, reads as follows: find $\Delta \mathbf{u}^n \in
-\{v\in H^1(\Omega(t_{n-1}))^d: v|_{\Gamma_D}=\mathbf{d}(\cdot,t_n) - \mathbf{d}(\cdot,t_{n-1})\}$ such that

        -\begin{align*}
+<p> The weak form of this set of equations, which as usual is the basis for the finite element formulation, reads as follows: find <picture><source srcset=$\Delta \mathbf{u}^n \in
+\{v\in H^1(\Omega(t_{n-1}))^d: v|_{\Gamma_D}=\mathbf{d}(\cdot,t_n) - \mathbf{d}(\cdot,t_{n-1})\}$ such that

        +\begin{align*}
   (C \varepsilon(\Delta\mathbf{u}^n), \varepsilon(\varphi) )_{\Omega(t_{n-1})}
   &=
   (\mathbf{f}, \varphi)_{\Omega(t_{n-1})}
@@ -264,12 +264,12 @@
   \\
   &\qquad\qquad
   \forall \varphi \in \{\mathbf{v}\in H^1(\Omega(t_{n-1}))^d: \mathbf{v}|_{\Gamma_D}=0\}.
-\end{align*} +\end{align*}" src="form_2962.png"/>

        -

        Using that $\sigma^{n-1} \mathbf{n}
+<p> Using that <picture><source srcset=$\sigma^{n-1} \mathbf{n}
             = [C \varepsilon(\mathbf{u}^{n-1})] \mathbf{n}
-            = \mathbf{b}(\mathbf x, t_{n-1})$, these equations can be simplified to

        -\begin{align*}
+            = \mathbf{b}(\mathbf x, t_{n-1})$, these equations can be simplified to

        +\begin{align*}
   (C \varepsilon(\Delta\mathbf{u}^n), \varepsilon(\varphi) )_{\Omega(t_{n-1})}
   &=
   (\mathbf{f}, \varphi)_{\Omega(t_{n-1})}
@@ -281,32 +281,32 @@
   \qquad
   \qquad
   \textrm{[linear-system]}
-\end{align*} +\end{align*}" src="form_2964.png"/>

        -

        We note that, for simplicity, in the program we will always assume that there are no boundary forces, i.e. $\mathbf{b} = 0$, and that the deformation of the body is driven by body forces $\mathbf{f}$ and prescribed boundary displacements $\mathbf{d}$ alone. It is also worth noting that when integrating by parts, we would get terms of the form $(C \varepsilon(\Delta\mathbf{u}^n), \nabla \varphi
-)_{\Omega(t_{n-1})}$, but that we replace them with the term involving the symmetric gradient $\varepsilon(\varphi)$ instead of $\nabla\varphi$. Due to the symmetry of $C$, the two terms are mathematically equivalent, but the symmetric version avoids the potential for round-off errors making the resulting matrix slightly non-symmetric.

        -

        The system at time step $n$, to be solved on the old domain $\Omega(t_{n-1})$, has exactly the form of a stationary elastic problem, and is therefore similar to what we have already implemented in previous example programs. We will therefore not comment on the space discretization beyond saying that we again use lowest order continuous finite elements.

        +

        We note that, for simplicity, in the program we will always assume that there are no boundary forces, i.e. $\mathbf{b} = 0$, and that the deformation of the body is driven by body forces $\mathbf{f}$ and prescribed boundary displacements $\mathbf{d}$ alone. It is also worth noting that when integrating by parts, we would get terms of the form $(C \varepsilon(\Delta\mathbf{u}^n), \nabla \varphi
+)_{\Omega(t_{n-1})}$, but that we replace them with the term involving the symmetric gradient $\varepsilon(\varphi)$ instead of $\nabla\varphi$. Due to the symmetry of $C$, the two terms are mathematically equivalent, but the symmetric version avoids the potential for round-off errors making the resulting matrix slightly non-symmetric.

        +

        The system at time step $n$, to be solved on the old domain $\Omega(t_{n-1})$, has exactly the form of a stationary elastic problem, and is therefore similar to what we have already implemented in previous example programs. We will therefore not comment on the space discretization beyond saying that we again use lowest order continuous finite elements.

        There are differences, however:

        1. We have to move (update) the mesh after each time step, in order to be able to solve the next time step on a new domain;

        2. -We need to know $\sigma^{n-1}$ to compute the next incremental displacement, i.e. we need to compute it at the end of the time step to make sure it is available for the next time step. Essentially, the stress variable is our window to the history of deformation of the body.
        3. +We need to know $\sigma^{n-1}$ to compute the next incremental displacement, i.e. we need to compute it at the end of the time step to make sure it is available for the next time step. Essentially, the stress variable is our window to the history of deformation of the body.

        These two operations are done in the functions move_mesh and update_quadrature_point_history in the program. While moving the mesh is only a technicality, updating the stress is a little more complicated and will be discussed in the next section.

        Updating the stress variable

        -

        As indicated above, we need to have the stress variable $\sigma^n$ available when computing time step $n+1$, and we can compute it using

        -\[
+<p>As indicated above, we need to have the stress variable <picture><source srcset=$\sigma^n$ available when computing time step $n+1$, and we can compute it using

        +\[
   \sigma^n = \sigma^{n-1} + C \varepsilon (\Delta \mathbf{u}^n).
   \qquad
   \qquad
   \textrm{[stress-update]}
-\] +\]" src="form_2973.png"/>

        -

        There are, despite the apparent simplicity of this equation, two questions that we need to discuss. The first concerns the way we store $\sigma^n$: even if we compute the incremental updates $\Delta\mathbf{u}^n$ using lowest-order finite elements, then its symmetric gradient $\varepsilon(\Delta\mathbf{u}^n)$ is in general still a function that is not easy to describe. In particular, it is not a piecewise constant function, and on general meshes (with cells that are not rectangles parallel to the coordinate axes) or with non-constant stress-strain tensors $C$ it is not even a bi- or trilinear function. Thus, it is a priori not clear how to store $\sigma^n$ in a computer program.

        -

        To decide this, we have to see where it is used. The only place where we require the stress is in the term $(\sigma^{n-1},\varepsilon(\varphi))_{\Omega(t_{n-1})}$. In practice, we of course replace this term by numerical quadrature:

        -\[
+<p> There are, despite the apparent simplicity of this equation, two questions that we need to discuss. The first concerns the way we store <picture><source srcset=$\sigma^n$: even if we compute the incremental updates $\Delta\mathbf{u}^n$ using lowest-order finite elements, then its symmetric gradient $\varepsilon(\Delta\mathbf{u}^n)$ is in general still a function that is not easy to describe. In particular, it is not a piecewise constant function, and on general meshes (with cells that are not rectangles parallel to the coordinate axes) or with non-constant stress-strain tensors $C$ it is not even a bi- or trilinear function. Thus, it is a priori not clear how to store $\sigma^n$ in a computer program.

        +

        To decide this, we have to see where it is used. The only place where we require the stress is in the term $(\sigma^{n-1},\varepsilon(\varphi))_{\Omega(t_{n-1})}$. In practice, we of course replace this term by numerical quadrature:

        +\[
   (\sigma^{n-1},\varepsilon(\varphi))_{\Omega(t_{n-1})}
   =
   \sum_{K\subset {T}}
@@ -315,12 +315,12 @@
   \sum_{K\subset {T}}
   \sum_q
   w_q \ \sigma^{n-1}(\mathbf{x}_q) : \varepsilon(\varphi(\mathbf{x}_q),
-\] +\]" src="form_2977.png"/>

        -

        where $w_q$ are the quadrature weights and $\mathbf{x}_q$ the quadrature points on cell $K$. This should make clear that what we really need is not the stress $\sigma^{n-1}$ in itself, but only the values of the stress in the quadrature points on all cells. This, however, is a simpler task: we only have to provide a data structure that is able to hold one symmetric tensor of rank 2 for each quadrature point on all cells (or, since we compute in parallel, all quadrature points of all cells that the present MPI process “owns”). At the end of each time step we then only have to evaluate $\varepsilon(\Delta \mathbf{u}^n(\mathbf{x}_q))$, multiply it by the stress-strain tensor $C$, and use the result to update the stress $\sigma^n(\mathbf{x}_q)$ at quadrature point $q$.

        -

        The second complication is not visible in our notation as chosen above. It is due to the fact that we compute $\Delta u^n$ on the domain $\Omega(t_{n-1})$, and then use this displacement increment to both update the stress as well as move the mesh nodes around to get to $\Omega(t_n)$ on which the next increment is computed. What we have to make sure, in this context, is that moving the mesh does not only involve moving around the nodes, but also making corresponding changes to the stress variable: the updated stress is a variable that is defined with respect to the coordinate system of the material in the old domain, and has to be transferred to the new domain. The reason for this can be understood as follows: locally, the incremental deformation $\Delta\mathbf{u}$ can be decomposed into three parts, a linear translation (the constant part of the displacement increment field in the neighborhood of a point), a dilational component (that part of the gradient of the displacement field that has a nonzero divergence), and a rotation. A linear translation of the material does not affect the stresses that are frozen into it – the stress values are simply translated along. The dilational or compressional change produces a corresponding stress update. However, the rotational component does not necessarily induce a nonzero stress update (think, in 2d, for example of the situation where $\Delta\mathbf{u}=(y, -x)^T$, with which $\varepsilon(\Delta
-\mathbf{u})=0$). Nevertheless, if the material was prestressed in a certain direction, then this direction will be rotated along with the material. To this end, we have to define a rotation matrix $R(\Delta \mathbf{u}^n)$ that describes, in each point the rotation due to the displacement increments. It is not hard to see that the actual dependence of $R$ on $\Delta \mathbf{u}^n$ can only be through the curl of the displacement, rather than the displacement itself or its full gradient (as mentioned above, the constant components of the increment describe translations, its divergence the dilational modes, and the curl the rotational modes). Since the exact form of $R$ is cumbersome, we only state it in the program code, and note that the correct updating formula for the stress variable is then

        -\[
+<p> where <picture><source srcset=$w_q$ are the quadrature weights and $\mathbf{x}_q$ the quadrature points on cell $K$. This should make clear that what we really need is not the stress $\sigma^{n-1}$ in itself, but only the values of the stress in the quadrature points on all cells. This, however, is a simpler task: we only have to provide a data structure that is able to hold one symmetric tensor of rank 2 for each quadrature point on all cells (or, since we compute in parallel, all quadrature points of all cells that the present MPI process “owns”). At the end of each time step we then only have to evaluate $\varepsilon(\Delta \mathbf{u}^n(\mathbf{x}_q))$, multiply it by the stress-strain tensor $C$, and use the result to update the stress $\sigma^n(\mathbf{x}_q)$ at quadrature point $q$.

        /usr/share/doc/packages/dealii/doxygen/deal.II/step_19.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_19.html 2024-01-30 03:04:51.164870685 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_19.html 2024-01-30 03:04:51.164870685 +0000 @@ -148,135 +148,135 @@

        The finite element method in general, and deal.II in particular, were invented to solve partial differential equations – in other words, to solve continuum mechanics problems. On the other hand, sometimes one wants to solve problems in which it is useful to track individual objects ("particles") and how their positions evolve. If this simply leads to a set of ordinary differential equations, for example if you want to track the positions of the planets in the solar system over time, then deal.II is clearly not the right tool. On the other hand, if this evolution is due to the interaction with the solution of partial differential equations, or if having a mesh to determine which particles interact with others (such as in the smoothed particle hydrodynamics (SPH) method), then deal.II has support for you.

        The case we will consider here is how electrically charged particles move through an electric field. As motivation, we will consider cathode rays: Electrons emitted by a heated piece of metal that is negatively charged (the "cathode"), and that are then accelerated by an electric field towards the positively charged electrode (the "anode"). The anode is typically ring-shaped so that the majority of electrons can fly through the hole in the form of an electron beam. In the olden times, they might then have illuminated the screen of a TV built from a cathode ray tube. Today, instead, electron beams are useful in X-ray machines, electron beam lithography, electron beam welding, and a number of other areas.

        The equations we will then consider are as follows: First, we need to describe the electric field. This is most easily accomplished by noting that the electric potential $V$ satisfied the equation

        -\[
+<picture><source srcset=\[
   -\epsilon_0 \Delta V = \rho
-\] +\]" src="form_3008.png"/>

        -

        where $\epsilon_0$ is the dielectric constant of vacuum, and $\rho$ is the charge density. This is augmented by boundary conditions that we will choose as follows:

        -\begin{align*}
+<p> where <picture><source srcset=$\epsilon_0$ is the dielectric constant of vacuum, and $\rho$ is the charge density. This is augmented by boundary conditions that we will choose as follows:

        +\begin{align*}
   V &= -V_0 && \text{on}\; \Gamma_\text{cathode}\subset\partial\Omega \\
   V &= +V_0 && \text{on}\; \Gamma_\text{anode}\subset\partial\Omega \\
   \epsilon\frac{\partial V}{\partial n} &= 0
    && \text{on}\; \partial\Omega\setminus\Gamma_\text{cathode}\setminus\Gamma_\text{anode}.
-\end{align*} +\end{align*}" src="form_3010.png"/>

        -

        In other words, we prescribe voltages $+V_0$ and $-V_0$ at the two electrodes and insulating (Neumann) boundary conditions elsewhere. Since the dynamics of the particles are purely due to the electric field $\mathbf E=\nabla V$, we could as well have prescribed $2V_0$ and $0$ at the two electrodes – all that matters is the voltage difference at the two electrodes.

        -

        Given this electric potential $V$ and the electric field $\mathbf E=\nabla V$, we can describe the trajectory of the $i$th particle using the differential equation

        -\[
+<p> In other words, we prescribe voltages <picture><source srcset=$+V_0$ and $-V_0$ at the two electrodes and insulating (Neumann) boundary conditions elsewhere. Since the dynamics of the particles are purely due to the electric field $\mathbf E=\nabla V$, we could as well have prescribed $2V_0$ and $0$ at the two electrodes – all that matters is the voltage difference at the two electrodes.

        +

        Given this electric potential $V$ and the electric field $\mathbf E=\nabla V$, we can describe the trajectory of the $i$th particle using the differential equation

        +\[
   m {\ddot {\mathbf x}}_i = e\mathbf E,
-\] +\]" src="form_3015.png"/>

        -

        where $m,e$ are the mass and electric charge of each particle. In practice, it is convenient to write this as a system of first-order differential equations in the position $\mathbf x$ and velocity $\mathbf v$:

        -\begin{align*}
+<p> where <picture><source srcset=$m,e$ are the mass and electric charge of each particle. In practice, it is convenient to write this as a system of first-order differential equations in the position $\mathbf x$ and velocity $\mathbf v$:

        +\begin{align*}
   {\dot {\mathbf v}}_i &= \frac{e\mathbf E}{m}, \\
   {\dot {\mathbf x}}_i &= {\mathbf v}_i.
-\end{align*} +\end{align*}" src="form_3017.png"/>

        -

        The deal.II class we will use to deal with particles, Particles::ParticleHandler, stores particles in a way so that the position $\mathbf x_i$ is part of the Particles::ParticleHandler data structures. (It stores particles sorted by cell they are in, and consequently needs to know where each particle is.) The velocity $\mathbf v_i$, on the other hand, is of no concern to Particles::ParticleHandler and consequently we will store it as a "property" of each particle that we will update in each time step. Properties can also be used to store any other quantity we might care about each particle: its charge, or if they were larger than just an electron, its color, mass, attitude in space, chemical composition, etc.

        -

        There remain two things to discuss to complete the model: Where particles start and what the charge density $\rho$ is.

        -

        First, historically, cathode rays used very large electric fields to pull electrons out of the metal. This produces only a relatively small current. One can do better by heating the cathode: a statistical fraction of electrons in that case have enough thermal energy to leave the metal; the electric field then just has to be strong enough to pull them away from the attraction of their host body. We will model this in the following way: We will create a new particle if (i) the electric field points away from the electrode, i.e., if $\mathbf E \cdot \mathbf n < 0$ where $\mathbf n$ is the normal vector at a face pointing out of the domain (into the electrode), and (ii) the electric field exceeds a threshold value $|\mathbf E|\ge E_\text{threshold}$. This is surely not a sufficiently accurate model for what really happens, but is good enough for our current tutorial program.

        +

        The deal.II class we will use to deal with particles, Particles::ParticleHandler, stores particles in a way so that the position $\mathbf x_i$ is part of the Particles::ParticleHandler data structures. (It stores particles sorted by cell they are in, and consequently needs to know where each particle is.) The velocity $\mathbf v_i$, on the other hand, is of no concern to Particles::ParticleHandler and consequently we will store it as a "property" of each particle that we will update in each time step. Properties can also be used to store any other quantity we might care about each particle: its charge, or if they were larger than just an electron, its color, mass, attitude in space, chemical composition, etc.

        +

        There remain two things to discuss to complete the model: Where particles start and what the charge density $\rho$ is.

        +

        First, historically, cathode rays used very large electric fields to pull electrons out of the metal. This produces only a relatively small current. One can do better by heating the cathode: a statistical fraction of electrons in that case have enough thermal energy to leave the metal; the electric field then just has to be strong enough to pull them away from the attraction of their host body. We will model this in the following way: We will create a new particle if (i) the electric field points away from the electrode, i.e., if $\mathbf E \cdot \mathbf n < 0$ where $\mathbf n$ is the normal vector at a face pointing out of the domain (into the electrode), and (ii) the electric field exceeds a threshold value $|\mathbf E|\ge E_\text{threshold}$. This is surely not a sufficiently accurate model for what really happens, but is good enough for our current tutorial program.

        Second, in principle we would have to model the charge density via

        -\[
+<picture><source srcset=\[
   \rho(\mathbf x) = \sum_i e\delta(\mathbf x-\mathbf x_i).
-\] +\]" src="form_3022.png"/>

        -

        The issue now is that in reality, a cathode ray tube in an old television yields a current of somewhere around a few milli-Amperes. In the much higher energy beams of particle accelerators, the current may only be a few nano-Ampere. But an Ampere is $6\times 10^{18}$ electrons flowing per second. Now, as you will see in the results section, we really only simulate a few microseconds ( $10^{-6}$ seconds), but that still results in very very large numbers of electrons – far more than we can hope to simulate with a program as small as the current one. As a consequence, let us presume that each particle represents $N$ electrons. Then the particle mass and charge are also $Nm$ and $Ne$ and the equations we have to solve are

        -\[
+<p> The issue now is that in reality, a cathode ray tube in an old television yields a current of somewhere around a few milli-Amperes. In the much higher energy beams of particle accelerators, the current may only be a few nano-Ampere. But an Ampere is <picture><source srcset=$6\times 10^{18}$ electrons flowing per second. Now, as you will see in the results section, we really only simulate a few microseconds ( $10^{-6}$ seconds), but that still results in very very large numbers of electrons – far more than we can hope to simulate with a program as small as the current one. As a consequence, let us presume that each particle represents $N$ electrons. Then the particle mass and charge are also $Nm$ and $Ne$ and the equations we have to solve are

        +\[
   (Nm) {\ddot {\mathbf x}}_i = (Ne)\mathbf E,
-\] +\]" src="form_3026.png"/>

        -

        which is of course exactly the same as above after dividing both sides by $N$. On the other hand, the charge density for these "clumps" of electrons is given by

        -\[
+<p> which is of course exactly the same as above after dividing both sides by <picture><source srcset=$N$. On the other hand, the charge density for these "clumps" of electrons is given by

        +\[
   \rho(\mathbf x) = \sum_i (Ne)\delta(\mathbf x-\mathbf x_i).
-\] +\]" src="form_3027.png"/>

        -

        It is this form that we will implement in the program, where $N$ is chosen rather large in the program to ensure that the particles actually affect the electric field. (This may not be realistic in practice: In most cases, there are just not enough electrons to actually affect the overall electric field. But realism is not our goal here.)

        -

        As a final thought about the model, one may wonder why the equation for the electric field (or, rather, the electric potential) has no time derivative whereas the equations for the electron positions do. In essence, this is a modeling assumption: We assume that the particles move so slowly that at any given time the electric field is in equilibrium. This is saying, in other words, that the velocity of the electrons is much less than the speed of light. In yet other words, we can rephrase this in terms of the electrode voltage $V_0$: Since every volt of electric potential accelerates electrons by approximately 600 km/s (neglecting relativistic effects), requiring $|\mathbf v_i\|\ll c$ is equivalent to saying that $2V_0 \ll 500 \text{V}$. Under this assumption (and the assumption that the total number of electrons is small), one can also neglect the creation of magnetic fields by the moving charges, which would otherwise also affect the movement of the electrons.

        +

        It is this form that we will implement in the program, where $N$ is chosen rather large in the program to ensure that the particles actually affect the electric field. (This may not be realistic in practice: In most cases, there are just not enough electrons to actually affect the overall electric field. But realism is not our goal here.)

        +

        As a final thought about the model, one may wonder why the equation for the electric field (or, rather, the electric potential) has no time derivative whereas the equations for the electron positions do. In essence, this is a modeling assumption: We assume that the particles move so slowly that at any given time the electric field is in equilibrium. This is saying, in other words, that the velocity of the electrons is much less than the speed of light. In yet other words, we can rephrase this in terms of the electrode voltage $V_0$: Since every volt of electric potential accelerates electrons by approximately 600 km/s (neglecting relativistic effects), requiring $|\mathbf v_i\|\ll c$ is equivalent to saying that $2V_0 \ll 500 \text{V}$. Under this assumption (and the assumption that the total number of electrons is small), one can also neglect the creation of magnetic fields by the moving charges, which would otherwise also affect the movement of the electrons.

        Time discretization

        The equations outlined above then form a set of coupled differential equations. Let us bring them all together in one place again to make that clear:

        -\begin{align*}
+<picture><source srcset=\begin{align*}
   -\epsilon_0 \Delta V &= \sum_i e\delta(\mathbf x-\mathbf x_i)
   \\
   {\dot {\mathbf x}}_i &= {\mathbf v}_i,
   \\
   {\dot {\mathbf v}}_i &= \frac{e\mathbf E}{m} = \frac{e\mathbf \nabla V}{m}.
-\end{align*} +\end{align*}" src="form_3031.png"/>

        Because of the awkward dependence of the electric potential on the particle locations, we don't want to solve this as a coupled system but instead use a decoupled approach where we first solve for the potential in each time step and then the particle locations. (One could also do it the other way around, of course.) This is very much in the same spirit as we do in step-21, step-31, and step-32, to name just a few, and can all be understood in the context of the operator splitting methods discussed in step-58.

        -

        So, if we denote by an upper index $n$ the time step, and if we use a simple time discretization for the ODE, then this means that we have to solve the following set of equations in each time step:

        -\begin{align*}
+<p>So, if we denote by an upper index <picture><source srcset=$n$ the time step, and if we use a simple time discretization for the ODE, then this means that we have to solve the following set of equations in each time step:

        +\begin{align*}
   -\epsilon_0 \Delta V^{(n)} &= \sum_i e\delta(\mathbf x-\mathbf x_i^{(n-1)})
   \\
   \frac{{\mathbf v}_i^{(n)}-{\mathbf v}_i^{(n-1)}}{\Delta t} &= \frac{e\nabla V^{(n)}}{m}
   \\
   \frac{{\mathbf x}_i^{(n)}-{\mathbf x}_i^{(n-1)}}{\Delta t} &= {\mathbf v}_i^{(n)}.
-\end{align*} +\end{align*}" src="form_3032.png"/>

        -

        This scheme can be understood in the framework of operator splitting methods (specifically, the "Lie splitting" method) wherein a coupled system is solved by updating one variable at a time, using either the old values of other variables (e.g., using $\mathbf x_i^{(n-1)}$ in the first equation) or the values of variables that have already been updated in a previous sub-step (e.g., using $V^{(n)}$ in the second equation). There are of course many better ways to do a time discretization (for example the simple leapfrog scheme when updating the velocity, or more general Strang splitting methods for the coupled system) but this isn't the point of the tutorial program, and so we will be content with what we have here. (We will comment on a piece of this puzzle in the possibilities for extensions section of this program, however.)

        +

        This scheme can be understood in the framework of operator splitting methods (specifically, the "Lie splitting" method) wherein a coupled system is solved by updating one variable at a time, using either the old values of other variables (e.g., using $\mathbf x_i^{(n-1)}$ in the first equation) or the values of variables that have already been updated in a previous sub-step (e.g., using $V^{(n)}$ in the second equation). There are of course many better ways to do a time discretization (for example the simple leapfrog scheme when updating the velocity, or more general Strang splitting methods for the coupled system) but this isn't the point of the tutorial program, and so we will be content with what we have here. (We will comment on a piece of this puzzle in the possibilities for extensions section of this program, however.)

        There remains the question of how we should choose the time step size $\Delta t$. The limitation here is that the Particles::ParticleHandler class needs to keep track of which cell each particle is in. This is particularly an issue if we are running computations in parallel (say, in step-70) because in that case every process only stores those cells it owns plus one layer of "ghost cells". That's not relevant here, but in general we should make sure that over the course of each time step, a particle moves only from one cell to any of its immediate neighbors (face, edge, or vertex neighbors). If we can ensure that, then Particles::ParticleHandler is guaranteed to be able to figure out which cell a particle ends up in. To do this, a useful rule of thumb is that we should choose the time step so that for all particles the expected distance the particle moves by is less than one cell diameter:

        -\[
+<picture><source srcset=\[
   \Delta t \le \frac{h_i}{\|\mathbf v_i\|} \qquad\qquad \forall i,
-\] +\]" src="form_3035.png"/>

        or equivalently

        -\[
+<picture><source srcset=\[
   \Delta t \le \min_i \frac{h_i}{\|\mathbf v_i\|}.
-\] +\]" src="form_3036.png"/>

        -

        Here, $h_i$ is the length of the shortest edge of the cell on which particle $i$ is located – in essence, a measure of the size of a cell.

        +

        Here, $h_i$ is the length of the shortest edge of the cell on which particle $i$ is located – in essence, a measure of the size of a cell.

        On the other hand, a particle might already be at the boundary of one cell and the neighboring cell might be once further refined. So then the time to cross that neighboring cell would actually be half the amount above, suggesting

        -\[
+<picture><source srcset=\[
   \Delta t \le \min_i \frac{\tfrac 12 h_i}{\|\mathbf v_i\|}.
-\] +\]" src="form_3037.png"/>

        But even that is not good enough: The formula above updates the particle positions in each time using the formula

        -\[
+<picture><source srcset=\[
 \frac{{\mathbf x}_i^{(n)}-{\mathbf x}_i^{(n-1)}}{\Delta t} = {\mathbf v}_i^{(n)},
-\] +\]" src="form_3038.png"/>

        -

        that is, using the current velocity ${\mathbf v}_i^{n}$. But we don't have the current velocity yet at the time when we need to choose $\Delta t$ – which is after we have updated the potential $V^{(n)}$ but before we update the velocity from ${\mathbf v}_i^{(n-1)}$ to ${\mathbf v}_i^{(n)}$. All we have is ${\mathbf v}_i^{(n-1)}$. So we need an additional safety factor for our final choice:

        -\[
+<p> that is, using the <em>current</em> velocity <picture><source srcset=${\mathbf v}_i^{n}$. But we don't have the current velocity yet at the time when we need to choose $\Delta t$ – which is after we have updated the potential $V^{(n)}$ but before we update the velocity from ${\mathbf v}_i^{(n-1)}$ to ${\mathbf v}_i^{(n)}$. All we have is ${\mathbf v}_i^{(n-1)}$. So we need an additional safety factor for our final choice:

        +\[
   \Delta t^{(n)} =
   c_\text{safety} \min_i \frac{\tfrac 12 h_i}{\|\mathbf v_i^{(n-1)}\|}.
-\] +\]" src="form_3042.png"/>

        -

        How large should $c_\text{safety}$ be? That depends on how much of underestimate $\|\mathbf v_i^{(n-1)}\|$ might be compared to $\|\mathbf v_i^{(n)}\|$, and that is actually quite easy to assess: A particle created in one time step with zero velocity will roughly pick up equal velocity increments in each successive time step if the electric field it encounters along the way were roughly constant. So the maximal difference between $\|\mathbf v_i^{(n-1)}\|$ and $\|\mathbf v_i^{(n)}\|$ would be a factor of two. As a consequence, we will choose $c_\text{safety}=0.5$.

        +

        How large should $c_\text{safety}$ be? That depends on how much of underestimate $\|\mathbf v_i^{(n-1)}\|$ might be compared to $\|\mathbf v_i^{(n)}\|$, and that is actually quite easy to assess: A particle created in one time step with zero velocity will roughly pick up equal velocity increments in each successive time step if the electric field it encounters along the way were roughly constant. So the maximal difference between $\|\mathbf v_i^{(n-1)}\|$ and $\|\mathbf v_i^{(n)}\|$ would be a factor of two. As a consequence, we will choose $c_\text{safety}=0.5$.

        There is only one other case we ought to consider: What happens in the very first time step? There, any particles to be moved along have just been created, but they have a zero velocity. So we don't know what velocity we should choose for them. Of course, in all other time steps there are also particles that have just been created, but in general, the particles with the highest velocity limit the time step size and so the newly created particles with their zero velocity don't matter. But if we only have such particles?

        -

        In that case, we can use the following approximation: If a particle starts at $\mathbf v^{(0)}=0$, then the update formula tells us that

        -\[
+<p>In that case, we can use the following approximation: If a particle starts at <picture><source srcset=$\mathbf v^{(0)}=0$, then the update formula tells us that

        +\[
   {\mathbf v}_i^{(1)} = \frac{e\nabla V^{(1)}}{m} \Delta t,
-\] +\]" src="form_3048.png"/>

        and consequently

        -\[
+<picture><source srcset=\[
     \frac{{\mathbf x}_i^{(1)}-{\mathbf x}_i^{(0)}}{\Delta t} = {\mathbf v}_i^{(1)},
-\] +\]" src="form_3049.png"/>

        which we can write as

        -\[
+<picture><source srcset=\[
     {\mathbf x}_i^{(1)} - {\mathbf x}_i^{(0)} = \frac{e\nabla V^{(1)}}{m} \Delta t^2.
-\] +\]" src="form_3050.png"/>

        -

        Not wanting to move a particle by more than $\frac 12 h_i$ then implies that we should choose the time step as

        -\[
+<p> Not wanting to move a particle by more than <picture><source srcset=$\frac 12 h_i$ then implies that we should choose the time step as

        +\[
   \Delta t
   \le
   \min_i
   \sqrt{ \frac{h_i m}{e \|\nabla V^{(1)}\| }}.
-\] +\]" src="form_3052.png"/>

        Using the same argument about neighboring cells possibly being smaller by a factor of two then leads to the final formula for time step zero:

        -\[
+<picture><source srcset=\[
   \Delta t
   =
   \min_i
   \sqrt{ \frac{\frac 12 h_i m}{e \|\nabla V^{(1)}\| } }.
-\] +\]" src="form_3053.png"/>

        -

        Strictly speaking, we would have to evaluate the electric potential $V^{(1)}$ at the location of each particle, but a good enough approximation is to use the maximum of the values at the vertices of the respective cell. (Why the vertices and not the midpoint? Because the gradient of the solution of the Laplace equation, i.e., the electric field, is largest in corner singularities which are located at the vertices of cells.) This has the advantage that we can make good use of the FEValues functionality which can recycle pre-computed material as long as the quadrature points are the same from one cell to the next.

        -

        We could always run this kind of scheme to estimate the difference between $\mathbf v_i^{(n-1)}$ and $\mathbf v_i^{(n)}$, but it relies on evaluating the electric field $\mathbf E$ on each cell, and that is expensive. As a consequence, we will limit this approach to the very first time step.

        +

        Strictly speaking, we would have to evaluate the electric potential $V^{(1)}$ at the location of each particle, but a good enough approximation is to use the maximum of the values at the vertices of the respective cell. (Why the vertices and not the midpoint? Because the gradient of the solution of the Laplace equation, i.e., the electric field, is largest in corner singularities which are located at the vertices of cells.) This has the advantage that we can make good use of the FEValues functionality which can recycle pre-computed material as long as the quadrature points are the same from one cell to the next.

        +

        We could always run this kind of scheme to estimate the difference between $\mathbf v_i^{(n-1)}$ and $\mathbf v_i^{(n)}$, but it relies on evaluating the electric field $\mathbf E$ on each cell, and that is expensive. As a consequence, we will limit this approach to the very first time step.

        Spatial discretization

        -

        Having discussed the time discretization, the discussion of the spatial discretization is going to be short: We use quadratic finite elements, i.e., the space $Q_2$, to approximate the electric potential $V$. The mesh is adapted a couple of times during the initial time step. All of this is entirely standard if you have read step-6, and the implementation does not provide for any kind of surprise.

        +

        Having discussed the time discretization, the discussion of the spatial discretization is going to be short: We use quadratic finite elements, i.e., the space $Q_2$, to approximate the electric potential $V$. The mesh is adapted a couple of times during the initial time step. All of this is entirely standard if you have read step-6, and the implementation does not provide for any kind of surprise.

        Dealing with particles programmatically

        Adding and moving particles is, in practice, not very difficult in deal.II. To add one, the create_particles() function of this program simply uses a code snippet of the following form:

        new_particle.set_location(location);
        @@ -289,7 +289,7 @@
        void set_reference_location(const Point< dim > &new_reference_location)
        Definition particle.h:542
        void set_id(const types::particle_index &new_id)
        Definition particle.h:569
        /usr/share/doc/packages/dealii/doxygen/deal.II/step_2.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_2.html 2024-01-30 03:04:51.196870952 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_2.html 2024-01-30 03:04:51.196870952 +0000 @@ -119,14 +119,14 @@

      Introduction

      Note
      The material presented here is also discussed in video lecture 9. (All video lectures are also available here.)
      -

      The finite element method is based on approximating the solution $u$ of a differential equation such as $-\Delta u=f$ by a function $u_h$ that is "piecewise" polynomial; that is, we subdivide the domain $\Omega$ on which the equation is posed into small cells that in the documentation we will generally denote by the symbol $K$. On each cell $K$, the approximating function $u_h$ we seek is then a polynomial. (Or, strictly speaking, a function that is the image of a polynomial from a "reference cell", but let us not make things more complicated than necessary for now.)

      -

      In the previous tutorial program (in step-1), we showed how we should think of the subdivision of the domain into cells as a "mesh" represented by the Triangulation class, and how this looks like in code. In the current tutorial program, we now show how one represents piecewise polynomial functions through the concept of degrees of freedom defined on this mesh. For this example, we will use the lowest order ( $Q_1$) finite elements, that is the approximating function $u_h$ we are looking for will be "bi-linear" on each quadrilateral cell $K$ of the mesh. (They would be linear if we would work on triangles.)

      -

      In practice, we represent the function as a linear combination of shape functions $\varphi_j(\mathbf x)$ with multipliers $U_j$ that we call the "degrees of freedom". For the bi-linear functions we consider here, each of these shape functions and degrees of freedom is associated with a vertex of the mesh. Later examples will demonstrate higher order elements where degrees of freedom are not necessarily associated with vertices any more, but can be associated with edges, faces, or cells.

      -

      The term "degree of freedom" is commonly used in the finite element community to indicate two slightly different, but related things. The first is that we'd like to represent the finite element solution as a linear combination of shape functions, in the form $u_h(\mathbf x) = \sum_{j=0}^{N-1} U_j \varphi_j(\mathbf
-x)$. Here, $U_j$ is a vector of expansion coefficients. Because we don't know their values yet (we will compute them as the solution of a linear or nonlinear system), they are called "unknowns" or "degrees of freedom". The second meaning of the term can be explained as follows: A mathematical description of finite element problems is often to say that we are looking for a finite dimensional function $u_h \in V_h$ that satisfies some set of equations (e.g. $a(u_h,\varphi_h)=(f,\varphi_h)$ for all test functions $\varphi_h\in
-V_h$). In other words, all we say here is that the solution needs to lie in some space $V_h$. However, to actually solve this problem on a computer we need to choose a basis of this space; this is the set of shape functions $\varphi_j(\mathbf x)$ we have used above in the expansion of $u_h(\mathbf x)$ with coefficients $U_j$. There are of course many bases of the space $V_h$, but we will specifically choose the one that is described by the finite element functions that are traditionally defined locally on the cells of the mesh.

      +

      The finite element method is based on approximating the solution $u$ of a differential equation such as $-\Delta u=f$ by a function $u_h$ that is "piecewise" polynomial; that is, we subdivide the domain $\Omega$ on which the equation is posed into small cells that in the documentation we will generally denote by the symbol $K$. On each cell $K$, the approximating function $u_h$ we seek is then a polynomial. (Or, strictly speaking, a function that is the image of a polynomial from a "reference cell", but let us not make things more complicated than necessary for now.)

      +

      In the previous tutorial program (in step-1), we showed how we should think of the subdivision of the domain into cells as a "mesh" represented by the Triangulation class, and how this looks like in code. In the current tutorial program, we now show how one represents piecewise polynomial functions through the concept of degrees of freedom defined on this mesh. For this example, we will use the lowest order ( $Q_1$) finite elements, that is the approximating function $u_h$ we are looking for will be "bi-linear" on each quadrilateral cell $K$ of the mesh. (They would be linear if we would work on triangles.)

      +

      In practice, we represent the function as a linear combination of shape functions $\varphi_j(\mathbf x)$ with multipliers $U_j$ that we call the "degrees of freedom". For the bi-linear functions we consider here, each of these shape functions and degrees of freedom is associated with a vertex of the mesh. Later examples will demonstrate higher order elements where degrees of freedom are not necessarily associated with vertices any more, but can be associated with edges, faces, or cells.

      +

      The term "degree of freedom" is commonly used in the finite element community to indicate two slightly different, but related things. The first is that we'd like to represent the finite element solution as a linear combination of shape functions, in the form $u_h(\mathbf x) = \sum_{j=0}^{N-1} U_j \varphi_j(\mathbf
+x)$. Here, $U_j$ is a vector of expansion coefficients. Because we don't know their values yet (we will compute them as the solution of a linear or nonlinear system), they are called "unknowns" or "degrees of freedom". The second meaning of the term can be explained as follows: A mathematical description of finite element problems is often to say that we are looking for a finite dimensional function $u_h \in V_h$ that satisfies some set of equations (e.g. $a(u_h,\varphi_h)=(f,\varphi_h)$ for all test functions $\varphi_h\in
+V_h$). In other words, all we say here is that the solution needs to lie in some space $V_h$. However, to actually solve this problem on a computer we need to choose a basis of this space; this is the set of shape functions $\varphi_j(\mathbf x)$ we have used above in the expansion of $u_h(\mathbf x)$ with coefficients $U_j$. There are of course many bases of the space $V_h$, but we will specifically choose the one that is described by the finite element functions that are traditionally defined locally on the cells of the mesh.

      Enumerating degrees of freedom

      -

      Describing "degrees of freedom" in this context requires us to simply enumerate the basis functions of the space $V_h$. For $Q_1$ elements this means simply enumerating the vertices of the mesh in some way, but for higher order elements, one also has to enumerate the shape functions that are associated with edges, faces, or cell interiors of the mesh. In other words, the enumeration of degrees of freedom is an entirely separate thing from the indices we use for vertices. The class that provides this enumeration of the basis functions of $V_h$ is called DoFHandler.

      +

      Describing "degrees of freedom" in this context requires us to simply enumerate the basis functions of the space $V_h$. For $Q_1$ elements this means simply enumerating the vertices of the mesh in some way, but for higher order elements, one also has to enumerate the shape functions that are associated with edges, faces, or cell interiors of the mesh. In other words, the enumeration of degrees of freedom is an entirely separate thing from the indices we use for vertices. The class that provides this enumeration of the basis functions of $V_h$ is called DoFHandler.

      Defining degrees of freedom ("DoF"s in short) on a mesh is, in practice, a rather simple task, since the library does all the work for you. Essentially, all you have to do is create a finite element object (from one of the many finite element classes deal.II already has, see for example the Finite element space descriptions documentation) and give it to a DoFHandler object through the DoFHandler::distribute_dofs() function ("distributing DoFs" is the term we use to describe the process of enumerating the basis functions as discussed above). The DoFHandler is a class that knows which degrees of freedom live where, i.e., it can answer questions like "how many degrees of freedom are there globally" and "on this cell, give me the global indices of the shape functions that live here". This is the sort of information you need when determining how big your system matrix should be, and when copying the contributions of a single cell into the global matrix.

      The first task of the current program is therefore to take a mesh and a finite element, and enumerate the degrees of freedom. In the current context, this means simply giving each vertex of the mesh a DoF index. Once that has happened, we will output in a picture which vertex ended up with which DoF index. You can find the corresponding pictures in the results section of this tutorial.

      @@ -135,11 +135,11 @@

      The next step would then be to compute a matrix and right hand side corresponding to a particular differential equation using this finite element and mesh. We will keep this step for the step-3 program and rather talk about one practical aspect of a finite element program, namely that finite element matrices are always very sparse: almost all entries in these matrices are zero.

      To be more precise, we say that a matrix is sparse if the number of nonzero entries per row in the matrix is bounded by a number that is independent of the overall number of degrees of freedom. For example, the simple 5-point stencil of a finite difference approximation of the Laplace equation leads to a sparse matrix since the number of nonzero entries per row is five, and therefore independent of the total size of the matrix. For more complicated problems – say, the Stokes problem of step-22 – and in particular in 3d, the number of entries per row may be several hundred. But the important point is that this number is independent of the overall size of the problem: If you refine the mesh, the maximal number of unknowns per row remains the same.

      Sparsity is one of the distinguishing feature of the finite element method compared to, say, approximating the solution of a partial differential equation using a Taylor expansion and matching coefficients, or using a Fourier basis.

      -

      In practical terms, it is the sparsity of matrices that enables us to solve problems with millions or billions of unknowns. To understand this, note that a matrix with $N$ rows, each with a fixed upper bound for the number of nonzero entries, requires ${\cal O}(N)$ memory locations for storage, and a matrix-vector multiplication also requires only ${\cal O}(N)$ operations. Consequently, if we had a linear solver that requires only a fixed number of matrix-vector multiplications to come up with the solution of a linear system with this matrix, then we would have a solver that can find the values of all $N$ unknowns with optimal complexity, i.e., with a total of ${\cal O}(N)$ operations. It is clear that this wouldn't be possible if the matrix were not sparse (because then the number of entries in the matrix would have to be ${\cal O}(N^s)$ with some $s>1$, and doing a fixed number of matrix-vector products would take ${\cal O}(N^s)$ operations), but it also requires very specialized solvers such as multigrid methods to satisfy the requirement that the solution requires only a fixed number of matrix-vector multiplications. We will frequently look at the question of what solver to use in the remaining programs of this tutorial.

      -

      The sparsity is generated by the fact that finite element shape functions are defined locally on individual cells, rather than globally, and that the local differential operators in the bilinear form only couple shape functions whose support overlaps. (The "support" of a function is the area where it is nonzero. For the finite element method, the support of a shape function is generally the cells adjacent to the vertex, edge, or face it is defined on.) In other words, degrees of freedom $i$ and $j$ that are not defined on the same cell do not overlap, and consequently the matrix entry $A_{ij}$ will be zero. (In some cases such as the Discontinuous Galerkin method, shape functions may also connect to neighboring cells through face integrals. But finite element methods do not generally couple shape functions beyond the immediate neighbors of a cell on which the function is defined.)

      +

      In practical terms, it is the sparsity of matrices that enables us to solve problems with millions or billions of unknowns. To understand this, note that a matrix with $N$ rows, each with a fixed upper bound for the number of nonzero entries, requires ${\cal O}(N)$ memory locations for storage, and a matrix-vector multiplication also requires only ${\cal O}(N)$ operations. Consequently, if we had a linear solver that requires only a fixed number of matrix-vector multiplications to come up with the solution of a linear system with this matrix, then we would have a solver that can find the values of all $N$ unknowns with optimal complexity, i.e., with a total of ${\cal O}(N)$ operations. It is clear that this wouldn't be possible if the matrix were not sparse (because then the number of entries in the matrix would have to be ${\cal O}(N^s)$ with some $s>1$, and doing a fixed number of matrix-vector products would take ${\cal O}(N^s)$ operations), but it also requires very specialized solvers such as multigrid methods to satisfy the requirement that the solution requires only a fixed number of matrix-vector multiplications. We will frequently look at the question of what solver to use in the remaining programs of this tutorial.

      +

      The sparsity is generated by the fact that finite element shape functions are defined locally on individual cells, rather than globally, and that the local differential operators in the bilinear form only couple shape functions whose support overlaps. (The "support" of a function is the area where it is nonzero. For the finite element method, the support of a shape function is generally the cells adjacent to the vertex, edge, or face it is defined on.) In other words, degrees of freedom $i$ and $j$ that are not defined on the same cell do not overlap, and consequently the matrix entry $A_{ij}$ will be zero. (In some cases such as the Discontinuous Galerkin method, shape functions may also connect to neighboring cells through face integrals. But finite element methods do not generally couple shape functions beyond the immediate neighbors of a cell on which the function is defined.)

      How degrees of freedom are enumerated

      By default, the DoFHandler class enumerates degrees of freedom on a mesh using an algorithm that is difficult to describe and leads to results that do look right if you know what it is doing but otherwise appears rather random; consequently, the sparsity pattern is also not optimized for any particular purpose. To show this, the code below will demonstrate a simple way to output the "sparsity pattern" that corresponds to a DoFHandler, i.e., an object that represents all of the potentially nonzero elements of a matrix one may build when discretizing a partial differential equation on a mesh and its DoFHandler. This lack of structure in the sparsity pattern will be apparent from the pictures we show below.

      -

      For most applications and algorithms, the exact way in which degrees of freedom are numbered does not matter. For example, the Conjugate Gradient method we use to solve linear systems does not care. On the other hand, some algorithms do care: in particular, some preconditioners such as SSOR will work better if they can walk through degrees of freedom in a particular order, and it would be nice if we could just sort them in such a way that SSOR can iterate through them from zero to $N$ in this order. Other examples include computing incomplete LU or Cholesky factorizations, or if we care about the block structure of matrices (see step-20 for an example). deal.II therefore has algorithms that can re-enumerate degrees of freedom in particular ways in namespace DoFRenumbering. Renumbering can be thought of as choosing a different, permuted basis of the finite element space. The sparsity pattern and matrices that result from this renumbering are therefore also simply a permutation of rows and columns compared to the ones we would get without explicit renumbering.

      +

      For most applications and algorithms, the exact way in which degrees of freedom are numbered does not matter. For example, the Conjugate Gradient method we use to solve linear systems does not care. On the other hand, some algorithms do care: in particular, some preconditioners such as SSOR will work better if they can walk through degrees of freedom in a particular order, and it would be nice if we could just sort them in such a way that SSOR can iterate through them from zero to $N$ in this order. Other examples include computing incomplete LU or Cholesky factorizations, or if we care about the block structure of matrices (see step-20 for an example). deal.II therefore has algorithms that can re-enumerate degrees of freedom in particular ways in namespace DoFRenumbering. Renumbering can be thought of as choosing a different, permuted basis of the finite element space. The sparsity pattern and matrices that result from this renumbering are therefore also simply a permutation of rows and columns compared to the ones we would get without explicit renumbering.

      In the program below, we will use the algorithm of Cuthill and McKee to do so. We will show the sparsity pattern for both the original enumeration of degrees of freedom and of the renumbered version below, in the results section.

      The commented program

      The first few includes are just like in the previous program, so do not require additional comments:

      @@ -277,7 +277,7 @@
       

      Renumbering of DoFs

      In the sparsity pattern produced above, the nonzero entries extended quite far off from the diagonal. For some algorithms, for example for incomplete LU decompositions or Gauss-Seidel preconditioners, this is unfavorable, and we will show a simple way how to improve this situation.

      -

      Remember that for an entry $(i,j)$ in the matrix to be nonzero, the supports of the shape functions i and j needed to intersect (otherwise in the integral, the integrand would be zero everywhere since either the one or the other shape function is zero at some point). However, the supports of shape functions intersected only if they were adjacent to each other, so in order to have the nonzero entries clustered around the diagonal (where $i$ equals $j$), we would like to have adjacent shape functions to be numbered with indices (DoF numbers) that differ not too much.

      +

      Remember that for an entry $(i,j)$ in the matrix to be nonzero, the supports of the shape functions i and j needed to intersect (otherwise in the integral, the integrand would be zero everywhere since either the one or the other shape function is zero at some point). However, the supports of shape functions intersected only if they were adjacent to each other, so in order to have the nonzero entries clustered around the diagonal (where $i$ equals $j$), we would like to have adjacent shape functions to be numbered with indices (DoF numbers) that differ not too much.

      This can be accomplished by a simple front marching algorithm, where one starts at a given vertex and gives it the index zero. Then, its neighbors are numbered successively, making their indices close to the original one. Then, their neighbors, if not yet numbered, are numbered, and so on.

      One algorithm that adds a little bit of sophistication along these lines is the one by Cuthill and McKee. We will use it in the following function to renumber the degrees of freedom such that the resulting sparsity pattern is more localized around the diagonal. The only interesting part of the function is the first call to DoFRenumbering::Cuthill_McKee, the rest is essentially as before:

        void renumber_dofs(DoFHandler<2> &dof_handler)
      /usr/share/doc/packages/dealii/doxygen/deal.II/step_20.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_20.html 2024-01-30 03:04:51.264871518 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_20.html 2024-01-30 03:04:51.264871518 +0000 @@ -154,13 +154,13 @@ p &=& g \qquad {\textrm{on}\ }\partial\Omega. \end{eqnarray*}" src="form_3080.png"/>

      -

      $K({\mathbf x})$ is assumed to be uniformly positive definite, i.e., there is $\alpha>0$ such that the eigenvalues $\lambda_i({\mathbf x})$ of $K(x)$ satisfy $\lambda_i({\mathbf x})\ge \alpha$. The use of the symbol $p$ instead of the usual $u$ for the solution variable will become clear in the next section.

      +

      $K({\mathbf x})$ is assumed to be uniformly positive definite, i.e., there is $\alpha>0$ such that the eigenvalues $\lambda_i({\mathbf x})$ of $K(x)$ satisfy $\lambda_i({\mathbf x})\ge \alpha$. The use of the symbol $p$ instead of the usual $u$ for the solution variable will become clear in the next section.

      After discussing the equation and the formulation we are going to use to solve it, this introduction will cover the use of block matrices and vectors, the definition of solvers and preconditioners, and finally the actual test case we are going to solve.

      We are going to extend this tutorial program in step-21 to solve not only the mixed Laplace equation, but add another equation that describes the transport of a mixture of two fluids.

      The equations covered here fall into the class of vector-valued problems. A toplevel overview of this topic can be found in the Handling vector valued problems module.

      The equations

      In the form above, the Poisson equation (i.e., the Laplace equation with a nonzero right hand side) is generally considered a good model equation for fluid flow in porous media. Of course, one typically models fluid flow through the Navier-Stokes equations or, if fluid velocities are slow or the viscosity is large, the Stokes equations (which we cover in step-22). In the first of these two models, the forces that act are inertia and viscous friction, whereas in the second it is only viscous friction – i.e., forces that one fluid particle exerts on a nearby one. This is appropriate if you have free flow in a large domain, say a pipe, a river, or in the air. On the other hand, if the fluid is confined in pores, then friction forces exerted by the pore walls on the fluid become more and more important and internal viscous friction becomes less and less important. Modeling this then first leads to the Brinkman model if both effects are important, and in the limit of very small pores to the Darcy equations. The latter is just a different name for the Poisson or Laplace equation, connotating it with the area to which one wants to apply it: slow flow in a porous medium. In essence it says that the velocity is proportional to the negative pressure gradient that drives the fluid through the porous medium.

      -

      The Darcy equation models this pressure that drives the flow. (Because the solution variable is a pressure, we here use the name $p$ instead of the name $u$ more commonly used for the solution of partial differential equations.) Typical applications of this view of the Laplace equation are then modeling groundwater flow, or the flow of hydrocarbons in oil reservoirs. In these applications, $K$ is the permeability tensor, i.e., a measure for how much resistance the soil or rock matrix asserts on the fluid flow.

      +

      The Darcy equation models this pressure that drives the flow. (Because the solution variable is a pressure, we here use the name $p$ instead of the name $u$ more commonly used for the solution of partial differential equations.) Typical applications of this view of the Laplace equation are then modeling groundwater flow, or the flow of hydrocarbons in oil reservoirs. In these applications, $K$ is the permeability tensor, i.e., a measure for how much resistance the soil or rock matrix asserts on the fluid flow.

      In the applications named above, a desirable feature for a numerical scheme is that it should be locally conservative, i.e., that whatever flows into a cell also flows out of it (or the difference is equal to the integral over the source terms over each cell, if the sources are nonzero). However, as it turns out, the usual discretizations of the Laplace equation (such as those used in step-3, step-4, or step-6) do not satisfy this property. But, one can achieve this by choosing a different formulation of the problem and a particular combination of finite element spaces.

      Formulation, weak form, and discrete problem

      To this end, one first introduces a second variable, called the velocity, ${\mathbf u}=-K\nabla p$. By its definition, the velocity is a vector in the negative direction of the pressure gradient, multiplied by the permeability tensor. If the permeability tensor is proportional to the unit matrix, this equation is easy to understand and intuitive: the higher the permeability, the higher the velocity; and the velocity is proportional to the gradient of the pressure, going from areas of high pressure to areas of low pressure (thus the negative sign).

      @@ -189,15 +189,15 @@ \end{eqnarray*}" src="form_3091.png"/>

      Here, ${\mathbf n}$ is the outward normal vector at the boundary. Note how in this formulation, Dirichlet boundary values of the original problem are incorporated in the weak form.

      -

      To be well-posed, we have to look for solutions and test functions in the space $H({\textrm{div}})=\{{\mathbf w}\in L^2(\Omega)^d:\ {\textrm{div}}\ {\mathbf w}\in L^2\}$ for $\mathbf u$, $\mathbf v$, and $L^2$ for $p,q$. It is a well-known fact stated in almost every book on finite element theory that if one chooses discrete finite element spaces for the approximation of ${\mathbf u},p$ inappropriately, then the resulting discrete problem is instable and the discrete solution will not converge to the exact solution. (Some details on the problem considered here – which falls in the class of "saddle-point problems" – can be found on the Wikipedia page on the Ladyzhenskaya-Babuska-Brezzi (LBB) condition.)

      -

      To overcome this, a number of different finite element pairs for ${\mathbf u},p$ have been developed that lead to a stable discrete problem. One such pair is to use the Raviart-Thomas spaces $RT(k)$ for the velocity ${\mathbf u}$ and discontinuous elements of class $DQ(k)$ for the pressure $p$. For details about these spaces, we refer in particular to the book on mixed finite element methods by Brezzi and Fortin, but many other books on the theory of finite elements, for example the classic book by Brenner and Scott, also state the relevant results. In any case, with appropriate choices of function spaces, the discrete formulation reads as follows: Find ${\mathbf
+<p>To be well-posed, we have to look for solutions and test functions in the space <picture><source srcset=$H({\textrm{div}})=\{{\mathbf w}\in L^2(\Omega)^d:\ {\textrm{div}}\ {\mathbf w}\in L^2\}$ for $\mathbf u$, $\mathbf v$, and $L^2$ for $p,q$. It is a well-known fact stated in almost every book on finite element theory that if one chooses discrete finite element spaces for the approximation of ${\mathbf u},p$ inappropriately, then the resulting discrete problem is instable and the discrete solution will not converge to the exact solution. (Some details on the problem considered here – which falls in the class of "saddle-point problems" – can be found on the Wikipedia page on the Ladyzhenskaya-Babuska-Brezzi (LBB) condition.)

      +

      To overcome this, a number of different finite element pairs for ${\mathbf u},p$ have been developed that lead to a stable discrete problem. One such pair is to use the Raviart-Thomas spaces $RT(k)$ for the velocity ${\mathbf u}$ and discontinuous elements of class $DQ(k)$ for the pressure $p$. For details about these spaces, we refer in particular to the book on mixed finite element methods by Brezzi and Fortin, but many other books on the theory of finite elements, for example the classic book by Brenner and Scott, also state the relevant results. In any case, with appropriate choices of function spaces, the discrete formulation reads as follows: Find ${\mathbf
 u}_h,p_h$ so that

      \begin{eqnarray*}
   A(\{{\mathbf u}_h,p_h\},\{{\mathbf v}_h,q_h\}) = F(\{{\mathbf v}_h,q_h\})
   \qquad\qquad \forall {\mathbf v}_h,q_h.
 \end{eqnarray*}

      -

      Before continuing, let us briefly pause and show that the choice of function spaces above provides us with the desired local conservation property. In particular, because the pressure space consists of discontinuous piecewise polynomials, we can choose the test function $q$ as the function that is equal to one on any given cell $K$ and zero everywhere else. If we also choose ${\mathbf v}=0$ everywhere (remember that the weak form above has to hold for all discrete test functions $q,v$), then putting these choices of test functions into the weak formulation above implies in particular that

      +

      Before continuing, let us briefly pause and show that the choice of function spaces above provides us with the desired local conservation property. In particular, because the pressure space consists of discontinuous piecewise polynomials, we can choose the test function $q$ as the function that is equal to one on any given cell $K$ and zero everywhere else. If we also choose ${\mathbf v}=0$ everywhere (remember that the weak form above has to hold for all discrete test functions $q,v$), then putting these choices of test functions into the weak formulation above implies in particular that

      \begin{eqnarray*}
   - (1,{\textrm{div}}\ {\mathbf u}_h)_K
   =
@@ -220,16 +220,16 @@
 \end{eqnarray*}

      If you now recall that ${\mathbf u}$ was the velocity, then the integral on the left is exactly the (discrete) flux across the boundary of the cell $K$. The statement is then that the flux must be equal to the integral over the sources within $K$. In particular, if there are no sources (i.e., $f=0$ in $K$), then the statement is that total flux is zero, i.e., whatever flows into a cell must flow out of it through some other part of the cell boundary. This is what we call local conservation because it holds for every cell.

      -

      On the other hand, the usual continuous $Q_k$ elements would not result in this kind of property when used for the pressure (as, for example, we do in step-43) because one can not choose a discrete test function $q_h$ that is one on a cell $K$ and zero everywhere else: It would be discontinuous and consequently not in the finite element space. (Strictly speaking, all we can say is that the proof above would not work for continuous elements. Whether these elements might still result in local conservation is a different question as one could think that a different kind of proof might still work; in reality, however, the property really does not hold.)

      +

      On the other hand, the usual continuous $Q_k$ elements would not result in this kind of property when used for the pressure (as, for example, we do in step-43) because one can not choose a discrete test function $q_h$ that is one on a cell $K$ and zero everywhere else: It would be discontinuous and consequently not in the finite element space. (Strictly speaking, all we can say is that the proof above would not work for continuous elements. Whether these elements might still result in local conservation is a different question as one could think that a different kind of proof might still work; in reality, however, the property really does not hold.)

      Assembling the linear system

      The deal.II library (of course) implements Raviart-Thomas elements $RT(k)$ of arbitrary order $k$, as well as discontinuous elements $DG(k)$. If we forget about their particular properties for a second, we then have to solve a discrete problem

      \begin{eqnarray*}
   A(x_h,w_h) = F(w_h),
 \end{eqnarray*}

      -

      with the bilinear form and right hand side as stated above, and $x_h=\{{\mathbf u}_h,p_h\}$, $w_h=\{{\mathbf v}_h,q_h\}$. Both $x_h$ and $w_h$ are from the space $X_h=RT(k)\times DQ(k)$, where $RT(k)$ is itself a space of $dim$-dimensional functions to accommodate for the fact that the flow velocity is vector-valued. The necessary question then is: how do we do this in a program?

      -

      Vector-valued elements have already been discussed in previous tutorial programs, the first time and in detail in step-8. The main difference there was that the vector-valued space $V_h$ is uniform in all its components: the $dim$ components of the displacement vector are all equal and from the same function space. What we could therefore do was to build $V_h$ as the outer product of the $dim$ times the usual $Q(1)$ finite element space, and by this make sure that all our shape functions have only a single non-zero vector component. Instead of dealing with vector-valued shape functions, all we did in step-8 was therefore to look at the (scalar) only non-zero component and use the fe.system_to_component_index(i).first call to figure out which component this actually is.

      -

      This doesn't work with Raviart-Thomas elements: following from their construction to satisfy certain regularity properties of the space $H({\textrm{div}})$, the shape functions of $RT(k)$ are usually nonzero in all their vector components at once. For this reason, were fe.system_to_component_index(i).first applied to determine the only nonzero component of shape function $i$, an exception would be generated. What we really need to do is to get at all vector components of a shape function. In deal.II diction, we call such finite elements non-primitive, whereas finite elements that are either scalar or for which every vector-valued shape function is nonzero only in a single vector component are called primitive.

      +

      with the bilinear form and right hand side as stated above, and $x_h=\{{\mathbf u}_h,p_h\}$, $w_h=\{{\mathbf v}_h,q_h\}$. Both $x_h$ and $w_h$ are from the space $X_h=RT(k)\times DQ(k)$, where $RT(k)$ is itself a space of $dim$-dimensional functions to accommodate for the fact that the flow velocity is vector-valued. The necessary question then is: how do we do this in a program?

      +

      Vector-valued elements have already been discussed in previous tutorial programs, the first time and in detail in step-8. The main difference there was that the vector-valued space $V_h$ is uniform in all its components: the $dim$ components of the displacement vector are all equal and from the same function space. What we could therefore do was to build $V_h$ as the outer product of the $dim$ times the usual $Q(1)$ finite element space, and by this make sure that all our shape functions have only a single non-zero vector component. Instead of dealing with vector-valued shape functions, all we did in step-8 was therefore to look at the (scalar) only non-zero component and use the fe.system_to_component_index(i).first call to figure out which component this actually is.

      +

      This doesn't work with Raviart-Thomas elements: following from their construction to satisfy certain regularity properties of the space $H({\textrm{div}})$, the shape functions of $RT(k)$ are usually nonzero in all their vector components at once. For this reason, were fe.system_to_component_index(i).first applied to determine the only nonzero component of shape function $i$, an exception would be generated. What we really need to do is to get at all vector components of a shape function. In deal.II diction, we call such finite elements non-primitive, whereas finite elements that are either scalar or for which every vector-valued shape function is nonzero only in a single vector component are called primitive.

      So what do we have to do for non-primitive elements? To figure this out, let us go back in the tutorial programs, almost to the very beginnings. There, we learned that we use the FEValues class to determine the values and gradients of shape functions at quadrature points. For example, we would call fe_values.shape_value(i,q_point) to obtain the value of the ith shape function on the quadrature point with number q_point. Later, in step-8 and other tutorial programs, we learned that this function call also works for vector-valued shape functions (of primitive finite elements), and that it returned the value of the only non-zero component of shape function i at quadrature point q_point.

      For non-primitive shape functions, this is clearly not going to work: there is no single non-zero vector component of shape function i, and the call to fe_values.shape_value(i,q_point) would consequently not make much sense. However, deal.II offers a second function call, fe_values.shape_value_component(i,q_point,comp) that returns the value of the compth vector component of shape function i at quadrature point q_point, where comp is an index between zero and the number of vector components of the present finite element; for example, the element we will use to describe velocities and pressures is going to have $dim+1$ components. It is worth noting that this function call can also be used for primitive shape functions: it will simply return zero for all components except one; for non-primitive shape functions, it will in general return a non-zero value for more than just one component.

      We could now attempt to rewrite the bilinear form above in terms of vector components. For example, in 2d, the first term could be rewritten like this (note that $u_0=x_0, u_1=x_1, p=x_2$):

      @@ -263,7 +263,7 @@

      fe_values.shape_value_component(j,q,1)
      ) *
      fe_values.JxW(q);
      -

      This is, at best, tedious, error prone, and not dimension independent. There are obvious ways to make things dimension independent, but in the end, the code is simply not pretty. What would be much nicer is if we could simply extract the ${\mathbf u}$ and $p$ components of a shape function $x_h^i$. In the program we do that in the following way:

      +

      This is, at best, tedious, error prone, and not dimension independent. There are obvious ways to make things dimension independent, but in the end, the code is simply not pretty. What would be much nicer is if we could simply extract the ${\mathbf u}$ and $p$ components of a shape function $x_h^i$. In the program we do that in the following way:

      This is, in fact, not only the first term of the bilinear form, but the whole thing (sans boundary contributions).

      -

      What this piece of code does is, given an fe_values object, to extract the values of the first $dim$ components of shape function i at quadrature points q, that is the velocity components of that shape function. Put differently, if we write shape functions $x_h^i$ as the tuple $\{{\mathbf u}_h^i,p_h^i\}$, then the function returns the velocity part of this tuple. Note that the velocity is of course a dim-dimensional tensor, and that the function returns a corresponding object. Similarly, where we subscript with the pressure extractor, we extract the scalar pressure component. The whole mechanism is described in more detail in the Handling vector valued problems module.

      +

      What this piece of code does is, given an fe_values object, to extract the values of the first $dim$ components of shape function i at quadrature points q, that is the velocity components of that shape function. Put differently, if we write shape functions $x_h^i$ as the tuple $\{{\mathbf u}_h^i,p_h^i\}$, then the function returns the velocity part of this tuple. Note that the velocity is of course a dim-dimensional tensor, and that the function returns a corresponding object. Similarly, where we subscript with the pressure extractor, we extract the scalar pressure component. The whole mechanism is described in more detail in the Handling vector valued problems module.

      In practice, it turns out that we can do a bit better if we evaluate the shape functions, their gradients and divergences only once per outermost loop, and store the result, as this saves us a few otherwise repeated computations (it is possible to save even more repeated operations by calculating all relevant quantities in advance and then only inserting the results in the actual loop, see step-22 for a realization of that approach). The final result then looks like this, working in every space dimension:

      for (const auto &cell : dof_handler.active_cell_iterators())
      {
      @@ -341,8 +341,8 @@

      You will find the exact same code as above in the sources for the present program. We will therefore not comment much on it below.

      Linear solvers and preconditioners

      After assembling the linear system we are faced with the task of solving it. The problem here is that the matrix possesses two undesirable properties:

        -
      • It is indefinite, i.e., it has both positive and negative eigenvalues. We don't want to prove this property here, but note that this is true for all matrices of the form $\left(\begin{array}{cc} M & B \\ B^T & 0 \end{array}\right)$ such as the one here where $M$ is positive definite.
      • -
      • The matrix has a zero block at the bottom right (there is no term in the bilinear form that couples the pressure $p$ with the pressure test function $q$).
      • +
      • It is indefinite, i.e., it has both positive and negative eigenvalues. We don't want to prove this property here, but note that this is true for all matrices of the form $\left(\begin{array}{cc} M & B \\ B^T & 0 \end{array}\right)$ such as the one here where $M$ is positive definite.
      • +
      • The matrix has a zero block at the bottom right (there is no term in the bilinear form that couples the pressure $p$ with the pressure test function $q$).

      At least it is symmetric, but the first issue above still means that the Conjugate Gradient method is not going to work since it is only applicable to problems in which the matrix is symmetric and positive definite. We would have to resort to other iterative solvers instead, such as MinRes, SymmLQ, or GMRES, that can deal with indefinite systems. However, then the next problem immediately surfaces: Due to the zero block, there are zeros on the diagonal and none of the usual, "simple" preconditioners (Jacobi, SSOR) will work as they require division by diagonal elements.

      For the matrix sizes we expect to run with this program, the by far simplest approach would be to just use a direct solver (in particular, the SparseDirectUMFPACK class that is bundled with deal.II). step-29 goes this route and shows that solving any linear system can be done in just 3 or 4 lines of code.

      @@ -362,24 +362,24 @@ \end{array}\right), \end{eqnarray*}" src="form_3124.png"/>

      -

      where $U,P$ are the values of velocity and pressure degrees of freedom, respectively, $M$ is the mass matrix on the velocity space, $B^T$ corresponds to the negative divergence operator, and $B$ is its transpose and corresponds to the gradient.

      +

      where $U,P$ are the values of velocity and pressure degrees of freedom, respectively, $M$ is the mass matrix on the velocity space, $B^T$ corresponds to the negative divergence operator, and $B$ is its transpose and corresponds to the gradient.

      By block elimination, we can then re-order this system in the following way (multiply the first row of the system by $B^TM^{-1}$ and then subtract the second row from it):

      \begin{eqnarray*}
   B^TM^{-1}B P &=& B^TM^{-1} F - G, \\
   MU &=& F - BP.
 \end{eqnarray*}

      -

      Here, the matrix $S=B^TM^{-1}B$ (called the Schur complement of $A$) is obviously symmetric and, owing to the positive definiteness of $M$ and the fact that $B$ has full column rank, $S$ is also positive definite.

      -

      Consequently, if we could compute $S$, we could apply the Conjugate Gradient method to it. However, computing $S$ is expensive because it requires us to compute the inverse of the (possibly large) matrix $M$; and $S$ is in fact also a full matrix because even though $M$ is sparse, its inverse $M^{-1}$ will generally be a dense matrix. On the other hand, the CG algorithm doesn't require us to actually have a representation of $S$: It is sufficient to form matrix-vector products with it. We can do so in steps, using the fact that matrix products are associative (i.e., we can set parentheses in such a way that the product is more convenient to compute): To compute $Sv=(B^TM^{-1}B)v=B^T(M^{-1}(Bv))$, we

        +

        Here, the matrix $S=B^TM^{-1}B$ (called the Schur complement of $A$) is obviously symmetric and, owing to the positive definiteness of $M$ and the fact that $B$ has full column rank, $S$ is also positive definite.

        +

        Consequently, if we could compute $S$, we could apply the Conjugate Gradient method to it. However, computing $S$ is expensive because it requires us to compute the inverse of the (possibly large) matrix $M$; and $S$ is in fact also a full matrix because even though $M$ is sparse, its inverse $M^{-1}$ will generally be a dense matrix. On the other hand, the CG algorithm doesn't require us to actually have a representation of $S$: It is sufficient to form matrix-vector products with it. We can do so in steps, using the fact that matrix products are associative (i.e., we can set parentheses in such a way that the product is more convenient to compute): To compute $Sv=(B^TM^{-1}B)v=B^T(M^{-1}(Bv))$, we

        1. compute $w = B v$;
        2. -solve $My = w$ for $y=M^{-1}w$, using the CG method applied to the positive definite and symmetric mass matrix $M$;
        3. +solve $My = w$ for $y=M^{-1}w$, using the CG method applied to the positive definite and symmetric mass matrix $M$;
        4. compute $z=B^Ty$ to obtain $z=Sv$.

        Note how we evaluate the expression $B^TM^{-1}Bv$ right to left to avoid matrix-matrix products; this way, all we have to do is evaluate matrix-vector products.

        -

        In the following, we will then have to come up with ways to represent the matrix $S$ so that it can be used in a Conjugate Gradient solver, as well as to define ways in which we can precondition the solution of the linear system involving $S$, and deal with solving linear systems with the matrix $M$ (the second step above).

        +

        In the following, we will then have to come up with ways to represent the matrix $S$ so that it can be used in a Conjugate Gradient solver, as well as to define ways in which we can precondition the solution of the linear system involving $S$, and deal with solving linear systems with the matrix $M$ (the second step above).

        Note
        The key point in this consideration is to recognize that to implement an iterative solver such as CG or GMRES, we never actually need the actual elements of a matrix! All that is required is that we can form matrix-vector products. The same is true for preconditioners. In deal.II we encode this requirement by only requiring that matrices and preconditioners given to solver classes have a vmult() member function that does the matrix-vector product. How a class chooses to implement this function is not important to the solver. Consequently, classes can implement it by, for example, doing a sequence of products and linear solves as discussed above.

        The LinearOperator framework in deal.II

        deal.II includes support for describing such linear operations in a very general way. This is done with the LinearOperator class that, like the MatrixType concept, defines a minimal interface for applying a linear operation to a vector:

        std::function<void(Range &, const Domain &)> vmult;
        @@ -403,10 +403,10 @@
        LinearOperator< Range, Domain, Payload > linear_operator(const OperatorExemplar &, const Matrix &)
        LinearOperator< Domain, Range, Payload > inverse_operator(const LinearOperator< Range, Domain, Payload > &op, Solver &solver, const Preconditioner &preconditioner)

        Rather than using a SolverControl we use the ReductionControl class here that stops iterations when either an absolute tolerance is reached (for which we choose $10^{-18}$) or when the residual is reduced by a certain factor (here, $10^{-10}$). In contrast the SolverControl class only checks for absolute tolerances. We have to use ReductionControl in our case to work around a minor issue: The right hand sides that we will feed to op_M_inv are essentially formed by residuals that naturally decrease vastly in norm as the outer iterations progress. This makes control by an absolute tolerance very error prone.

        -

        We now have a LinearOperator op_M_inv that we can use to construct more complicated operators such as the Schur complement $S$. Assuming that B is a reference to the upper right block constructing a LinearOperator op_S is a matter of two lines:

        const auto op_B = linear_operator(B);
        +

        We now have a LinearOperator op_M_inv that we can use to construct more complicated operators such as the Schur complement $S$. Assuming that B is a reference to the upper right block constructing a LinearOperator op_S is a matter of two lines:

        const auto op_B = linear_operator(B);
        const auto op_S = transpose_operator(op_B) * op_M_inv * op_B;
        LinearOperator< Domain, Range, Payload > transpose_operator(const LinearOperator< Range, Domain, Payload > &op)
        -

        Here, the multiplication of three LinearOperator objects yields a composite object op_S whose vmult() function first applies $B$, then $M^{-1}$ (i.e. solving an equation with $M$), and finally $B^T$ to any given input vector. In that sense op_S.vmult() is similar to the following code:

        B.vmult (tmp1, src); // multiply with the top right block: B
        +

        Here, the multiplication of three LinearOperator objects yields a composite object op_S whose vmult() function first applies $B$, then $M^{-1}$ (i.e. solving an equation with $M$), and finally $B^T$ to any given input vector. In that sense op_S.vmult() is similar to the following code:

        B.vmult (tmp1, src); // multiply with the top right block: B
        solver_M(M, tmp2, tmp1, preconditioner_M); // multiply with M^-1
        B.Tvmult (dst, tmp2); // multiply with the bottom left block: B^T

        (tmp1 and tmp2 are two temporary vectors). The key point behind this approach is the fact that we never actually create an inner product of matrices. Instead, whenever we have to perform a matrix vector multiplication with op_S we simply run all individual vmult operations in above sequence.

        @@ -425,10 +425,10 @@
        }
        };
        -
        Even though both approaches are exactly equivalent, the LinearOperator class has a big advantage over this manual approach. It provides so-called syntactic sugar: Mathematically, we think about $S$ as being the composite matrix $S=B^TM^{-1}B$ and the LinearOperator class allows you to write this out more or less verbatim,
        const auto op_M_inv = inverse_operator(op_M, solver_M, preconditioner_M);
        +
        Even though both approaches are exactly equivalent, the LinearOperator class has a big advantage over this manual approach. It provides so-called syntactic sugar: Mathematically, we think about $S$ as being the composite matrix $S=B^TM^{-1}B$ and the LinearOperator class allows you to write this out more or less verbatim,
        const auto op_M_inv = inverse_operator(op_M, solver_M, preconditioner_M);
        const auto op_S = transpose_operator(op_B) * op_M_inv * op_B;
        The manual approach on the other hand obscures this fact.
      -

      All that is left for us to do now is to form the right hand sides of the two equations defining $P$ and $U$, and then solve them with the Schur complement matrix and the mass matrix, respectively. For example the right hand side of the first equation reads $B^TM^{-1}F-G$. This could be implemented as follows:

      Vector<double> schur_rhs (P.size());
      +

      All that is left for us to do now is to form the right hand sides of the two equations defining $P$ and $U$, and then solve them with the Schur complement matrix and the mass matrix, respectively. For example the right hand side of the first equation reads $B^TM^{-1}F-G$. This could be implemented as follows:

      Vector<double> schur_rhs (P.size());
      Vector<double> tmp (U.size());
      op_M_inv.vmult (tmp, F);
      transpose_operator(op_B).vmult (schur_rhs, tmp);
      @@ -437,7 +437,7 @@
      std::function<void(Range &)> apply_add;

      The class allows lazy evaluation of expressions involving vectors and linear operators. This is done by storing the computational expression and only performing the computation when either the object is converted to a vector object, or PackagedOperation::apply() (or PackagedOperation::apply_add()) is invoked by hand. Assuming that F and G are the two vectors of the right hand side we can simply write:

      const auto schur_rhs = transpose_operator(op_B) * op_M_inv * F - G;

      Here, schur_rhs is a PackagedOperation that records the computation we specified. It does not create a vector with the actual result immediately.

      -

      With these prerequisites at hand, solving for $P$ and $U$ is a matter of creating another solver and inverse:

      SolverControl solver_control_S(2000, 1.e-12);
      +

      With these prerequisites at hand, solving for $P$ and $U$ is a matter of creating another solver and inverse:

      SolverControl solver_control_S(2000, 1.e-12);
      SolverCG<Vector<double>> solver_S(solver_control_S);
      PreconditionIdentity preconditioner_S;
      @@ -450,16 +450,16 @@
      Note
      The functionality that we developed in this example step by hand is already readily available in the library. Have a look at schur_complement(), condense_schur_rhs(), and postprocess_schur_solution().

      A preconditioner for the Schur complement

      One may ask whether it would help if we had a preconditioner for the Schur complement $S=B^TM^{-1}B$. The general answer, as usual, is: of course. The problem is only, we don't know anything about this Schur complement matrix. We do not know its entries, all we know is its action. On the other hand, we have to realize that our solver is expensive since in each iteration we have to do one matrix-vector product with the Schur complement, which means that we have to do invert the mass matrix once in each iteration.

      -

      There are different approaches to preconditioning such a matrix. On the one extreme is to use something that is cheap to apply and therefore has no real impact on the work done in each iteration. The other extreme is a preconditioner that is itself very expensive, but in return really brings down the number of iterations required to solve with $S$.

      +

      There are different approaches to preconditioning such a matrix. On the one extreme is to use something that is cheap to apply and therefore has no real impact on the work done in each iteration. The other extreme is a preconditioner that is itself very expensive, but in return really brings down the number of iterations required to solve with $S$.

      We will try something along the second approach, as much to improve the performance of the program as to demonstrate some techniques. To this end, let us recall that the ideal preconditioner is, of course, $S^{-1}$, but that is unattainable. However, how about

      \begin{eqnarray*}
   \tilde S^{-1} = [B^T ({\textrm{diag}\ }M)^{-1}B]^{-1}
 \end{eqnarray*}

      -

      as a preconditioner? That would mean that every time we have to do one preconditioning step, we actually have to solve with $\tilde S$. At first, this looks almost as expensive as solving with $S$ right away. However, note that in the inner iteration, we do not have to calculate $M^{-1}$, but only the inverse of its diagonal, which is cheap.

      -

      Thankfully, the LinearOperator framework makes this very easy to write out. We already used a Jacobi preconditioner (preconditioner_M) for the $M$ matrix earlier. So all that is left to do is to write out how the approximate Schur complement should look like:

      const auto op_aS =
      +

      as a preconditioner? That would mean that every time we have to do one preconditioning step, we actually have to solve with $\tilde S$. At first, this looks almost as expensive as solving with $S$ right away. However, note that in the inner iteration, we do not have to calculate $M^{-1}$, but only the inverse of its diagonal, which is cheap.

      +

      Thankfully, the LinearOperator framework makes this very easy to write out. We already used a Jacobi preconditioner (preconditioner_M) for the $M$ matrix earlier. So all that is left to do is to write out how the approximate Schur complement should look like:

      const auto op_aS =
      transpose_operator(op_B) * linear_operator(preconditioner_M) * op_B;
      -

      Note how this operator differs in simply doing one Jacobi sweep (i.e. multiplying with the inverses of the diagonal) instead of multiplying with the full $M^{-1}$. (This is how a single Jacobi preconditioner step with $M$ is defined: it is the multiplication with the inverse of the diagonal of $M$; in other words, the operation $({\textrm{diag}\ }M)^{-1}x$ on a vector $x$ is exactly what PreconditionJacobi does.)

      +

      Note how this operator differs in simply doing one Jacobi sweep (i.e. multiplying with the inverses of the diagonal) instead of multiplying with the full $M^{-1}$. (This is how a single Jacobi preconditioner step with $M$ is defined: it is the multiplication with the inverse of the diagonal of $M$; in other words, the operation $({\textrm{diag}\ }M)^{-1}x$ on a vector $x$ is exactly what PreconditionJacobi does.)

      With all this we almost have the preconditioner completed: it should be the inverse of the approximate Schur complement. We implement this again by creating a linear operator with inverse_operator() function. This time however we would like to choose a relatively modest tolerance for the CG solver (that inverts op_aS). The reasoning is that op_aS is only coarse approximation to op_S, so we actually do not need to invert it exactly. This, however creates a subtle problem: preconditioner_S will be used in the final outer CG iteration to create an orthogonal basis. But for this to work, it must be precisely the same linear operation for every invocation. We ensure this by using an IterationNumberControl that allows us to fix the number of CG iterations that are performed to a fixed small number (in our case 30):

      IterationNumberControl iteration_number_control_aS(30, 1.e-18);
      SolverCG<Vector<double>> solver_aS(iteration_number_control_aS);
      PreconditionIdentity preconditioner_aS;
      @@ -723,7 +723,7 @@
       
      void component_wise(DoFHandler< dim, spacedim > &dof_handler, const std::vector< unsigned int > &target_component=std::vector< unsigned int >())

      The next thing is that we want to figure out the sizes of these blocks so that we can allocate an appropriate amount of space. To this end, we call the DoFTools::count_dofs_per_fe_component() function that counts how many shape functions are non-zero for a particular vector component. We have dim+1 vector components, and DoFTools::count_dofs_per_fe_component() will count how many shape functions belong to each of these components.

      -

      There is one problem here. As described in the documentation of that function, it wants to put the number of $x$-velocity shape functions into dofs_per_component[0], the number of $y$-velocity shape functions into dofs_per_component[1] (and similar in 3d), and the number of pressure shape functions into dofs_per_component[dim]. But, the Raviart-Thomas element is special in that it is non-primitive, i.e., for Raviart-Thomas elements all velocity shape functions are nonzero in all components. In other words, the function cannot distinguish between $x$ and $y$ velocity functions because there is no such distinction. It therefore puts the overall number of velocity into each of dofs_per_component[c], $0\le c\le \text{dim}$. On the other hand, the number of pressure variables equals the number of shape functions that are nonzero in the dim-th component.

      +

      There is one problem here. As described in the documentation of that function, it wants to put the number of $x$-velocity shape functions into dofs_per_component[0], the number of $y$-velocity shape functions into dofs_per_component[1] (and similar in 3d), and the number of pressure shape functions into dofs_per_component[dim]. But, the Raviart-Thomas element is special in that it is non-primitive, i.e., for Raviart-Thomas elements all velocity shape functions are nonzero in all components. In other words, the function cannot distinguish between $x$ and $y$ velocity functions because there is no such distinction. It therefore puts the overall number of velocity into each of dofs_per_component[c], $0\le c\le \text{dim}$. On the other hand, the number of pressure variables equals the number of shape functions that are nonzero in the dim-th component.

      Using this knowledge, we can get the number of velocity shape functions from any of the first dim elements of dofs_per_component, and then use this below to initialize the vector and matrix block sizes, as well as create output.

      Note
      If you find this concept difficult to understand, you may want to consider using the function DoFTools::count_dofs_per_fe_block() instead, as we do in the corresponding piece of code in step-22. You might also want to read up on the difference between blocks and components in the glossary.
        const std::vector<types::global_dof_index> dofs_per_component =
      @@ -1075,7 +1075,7 @@
        }

      Results

      Output of the program and graphical visualization

      -

      If we run the program as is, we get this output for the $32\times 32$ mesh we use (for a total of 1024 cells with 1024 pressure degrees of freedom since we use piecewise constants, and 2112 velocities because the Raviart-Thomas element defines one degree per freedom per face and there are $1024 + 32 = 1056$ faces parallel to the $x$-axis and the same number parallel to the $y$-axis):

      \$ make run
      +

      If we run the program as is, we get this output for the $32\times 32$ mesh we use (for a total of 1024 cells with 1024 pressure degrees of freedom since we use piecewise constants, and 2112 velocities because the Raviart-Thomas element defines one degree per freedom per face and there are $1024 + 32 = 1056$ faces parallel to the $x$-axis and the same number parallel to the $y$-axis):

      \$ make run
       [ 66%] Built target step-20
       Scanning dependencies of target run
       [100%] Run step-20 with Release configuration
      /usr/share/doc/packages/dealii/doxygen/deal.II/step_21.html differs (HTML document, UTF-8 Unicode text, with very long lines)
      --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_21.html	2024-01-30 03:04:51.344872185 +0000
      +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_21.html	2024-01-30 03:04:51.344872185 +0000
      @@ -155,7 +155,7 @@
       

      The equations covered here are an extension of the material already covered in step-20. In particular, they fall into the class of vector-valued problems. A toplevel overview of this topic can be found in the Handling vector valued problems module.

      The two phase flow problem

      Modeling of two phase flow in porous media is important for both environmental remediation and the management of petroleum and groundwater reservoirs. Practical situations involving two phase flow include the dispersal of a nonaqueous phase liquid in an aquifer, or the joint movement of a mixture of fluids such as oil and water in a reservoir. Simulation models, if they are to provide realistic predictions, must accurately account for these effects.

      -

      To derive the governing equations, consider two phase flow in a reservoir $\Omega$ under the assumption that the movement of fluids is dominated by viscous effects; i.e. we neglect the effects of gravity, compressibility, and capillary pressure. Porosity will be considered to be constant. We will denote variables referring to either of the two phases using subscripts $w$ and $o$, short for water and oil. The derivation of the equations holds for other pairs of fluids as well, however.

      +

      To derive the governing equations, consider two phase flow in a reservoir $\Omega$ under the assumption that the movement of fluids is dominated by viscous effects; i.e. we neglect the effects of gravity, compressibility, and capillary pressure. Porosity will be considered to be constant. We will denote variables referring to either of the two phases using subscripts $w$ and $o$, short for water and oil. The derivation of the equations holds for other pairs of fluids as well, however.

      The velocity with which molecules of each of the two phases move is determined by Darcy's law that states that the velocity is proportional to the pressure gradient:

      \begin{eqnarray*}
   \mathbf{u}_{j}
@@ -163,7 +163,7 @@
   -\frac{k_{rj}(S)}{\mu_{j}} \mathbf{K} \cdot \nabla p
 \end{eqnarray*}

      -

      where $\mathbf{u}_{j}$ is the velocity of phase $j=o,w$, $K$ is the permeability tensor, $k_{rj}$ is the relative permeability of phase $j$, $p$ is the pressure and $\mu_{j}$ is the viscosity of phase $j$. Finally, $S$ is the saturation (volume fraction), i.e. a function with values between 0 and 1 indicating the composition of the mixture of fluids. In general, the coefficients $K, k_{rj}, \mu$ may be spatially dependent variables, and we will always treat them as non-constant functions in the following.

      +

      where $\mathbf{u}_{j}$ is the velocity of phase $j=o,w$, $K$ is the permeability tensor, $k_{rj}$ is the relative permeability of phase $j$, $p$ is the pressure and $\mu_{j}$ is the viscosity of phase $j$. Finally, $S$ is the saturation (volume fraction), i.e. a function with values between 0 and 1 indicating the composition of the mixture of fluids. In general, the coefficients $K, k_{rj}, \mu$ may be spatially dependent variables, and we will always treat them as non-constant functions in the following.

      We combine Darcy's law with the statement of conservation of mass for each phase,

      \[
   \textrm{div}\ \mathbf{u}_{j} = q_j,
@@ -174,7 +174,7 @@
 - \nabla \cdot (\mathbf{K}\lambda(S) \nabla p)= q.
 \end{eqnarray*}

      -

      Here, $q$ is the sum source term, and

      +

      Here, $q$ is the sum source term, and

      \[
   \lambda(S) = \frac{k_{rw}(S)}{\mu_{w}}+\frac{k_{ro}(S)}{\mu_{o}}
 \] @@ -218,7 +218,7 @@

      Note that the advection equation contains the term $\mathbf{u} \cdot \nabla
 F(S)$ rather than $\mathbf{u} \cdot \nabla S$ to indicate that the saturation is not simply transported along; rather, since the two phases move with different velocities, the saturation can actually change even in the advected coordinate system. To see this, rewrite $\mathbf{u} \cdot \nabla F(S)
-= \mathbf{u} F'(S) \cdot \nabla S$ to observe that the actual velocity with which the phase with saturation $S$ is transported is $\mathbf u F'(S)$ whereas the other phase is transported at velocity $\mathbf u (1-F'(S))$. $F(S)$ is consequently often referred to as the fractional flow.

      += \mathbf{u} F'(S) \cdot \nabla S$" src="form_3176.png"/> to observe that the actual velocity with which the phase with saturation $S$ is transported is $\mathbf u F'(S)$ whereas the other phase is transported at velocity $\mathbf u (1-F'(S))$. $F(S)$ is consequently often referred to as the fractional flow.

      In summary, what we get are the following two equations:

      \begin{eqnarray*}
   - \nabla \cdot (\mathbf{K}\lambda(S) \nabla p) &=& q
@@ -228,7 +228,7 @@
   \qquad \textrm{in}\ \Omega\times[0,T].
 \end{eqnarray*}

      -

      Here, $p=p(\mathbf x, t), S=S(\mathbf x, t)$ are now time dependent functions: while at every time instant the flow field is in equilibrium with the pressure (i.e. we neglect dynamic accelerations), the saturation is transported along with the flow and therefore changes over time, in turn affected the flow field again through the dependence of the first equation on $S$.

      +

      Here, $p=p(\mathbf x, t), S=S(\mathbf x, t)$ are now time dependent functions: while at every time instant the flow field is in equilibrium with the pressure (i.e. we neglect dynamic accelerations), the saturation is transported along with the flow and therefore changes over time, in turn affected the flow field again through the dependence of the first equation on $S$.

      This set of equations has a peculiar character: one of the two equations has a time derivative, the other one doesn't. This corresponds to the character that the pressure and velocities are coupled through an instantaneous constraint, whereas the saturation evolves over finite time scales.

      Such systems of equations are called Differential Algebraic Equations (DAEs), since one of the equations is a differential equation, the other is not (at least not with respect to the time variable) and is therefore an "algebraic" equation. (The notation comes from the field of ordinary differential equations, where everything that does not have derivatives with respect to the time variable is necessarily an algebraic equation.) This class of equations contains pretty well-known cases: for example, the time dependent Stokes and Navier-Stokes equations (where the algebraic constraint is that the divergence of the flow field, $\textrm{div}\ \mathbf u$, must be zero) as well as the time dependent Maxwell equations (here, the algebraic constraint is that the divergence of the electric displacement field equals the charge density, $\textrm{div}\ \mathbf D = \rho$ and that the divergence of the magnetic flux density is zero: $\textrm{div}\ \mathbf
 B = 0$); even the quasistatic model of step-18 falls into this category. We will see that the different character of the two equations will inform our discretization strategy for the two equations.

      @@ -250,7 +250,7 @@

      where $\triangle t$ is the length of a time step. Note how we solve the implicit pressure-velocity system that only depends on the previously computed saturation $S^n$, and then do an explicit time step for $S^{n+1}$ that only depends on the previously known $S^n$ and the just computed $\mathbf{u}^{n+1}$. This way, we never have to iterate for the nonlinearities of the system as we would have if we used a fully implicit method. (In a more modern perspective, this should be seen as an "operator splitting" method. step-58 has a long description of the idea behind this.)

      -

      We can then state the problem in weak form as follows, by multiplying each equation with test functions $\mathbf v$, $\phi$, and $\sigma$ and integrating terms by parts:

      +

      We can then state the problem in weak form as follows, by multiplying each equation with test functions $\mathbf v$, $\phi$, and $\sigma$ and integrating terms by parts:

      \begin{eqnarray*}
   \left((\mathbf{K}\lambda(S^n))^{-1} \mathbf{u}^{n+1},\mathbf v\right)_\Omega -
   (p^{n+1}, \nabla\cdot\mathbf v)_\Omega &=&
@@ -259,7 +259,7 @@
   (\nabla \cdot\mathbf{u}^{n+1}, \phi)_\Omega &=& (q^{n+1},\phi)_\Omega
 \end{eqnarray*}

      -

      Note that in the first term, we have to prescribe the pressure $p^{n+1}$ on the boundary $\partial\Omega$ as boundary values for our problem. $\mathbf n$ denotes the unit outward normal vector to $\partial K$, as usual.

      +

      Note that in the first term, we have to prescribe the pressure $p^{n+1}$ on the boundary $\partial\Omega$ as boundary values for our problem. $\mathbf n$ denotes the unit outward normal vector to $\partial K$, as usual.

      For the saturation equation, we obtain after integrating by parts

      \begin{eqnarray*}
   (S^{n+1}, \sigma)_\Omega
@@ -293,7 +293,7 @@
 </p>
 <p> We introduce an object of type <a class=DiscreteTime in order to keep track of the current value of time and time step in the code. This class encapsulates many complexities regarding adjusting time step size and stopping at a specified final time.

      Space discretization

      -

      In each time step, we then apply the mixed finite method of step-20 to the velocity and pressure. To be well-posed, we choose Raviart-Thomas spaces $RT_{k}$ for $\mathbf{u}$ and discontinuous elements of class $DGQ_{k}$ for $p$. For the saturation, we will also choose $DGQ_{k}$ spaces.

      +

      In each time step, we then apply the mixed finite method of step-20 to the velocity and pressure. To be well-posed, we choose Raviart-Thomas spaces $RT_{k}$ for $\mathbf{u}$ and discontinuous elements of class $DGQ_{k}$ for $p$. For the saturation, we will also choose $DGQ_{k}$ spaces.

      Since we have discontinuous spaces, we have to think about how to evaluate terms on the interfaces between cells, since discontinuous functions are not really defined there. In particular, we have to give a meaning to the last term on the left hand side of the saturation equation. To this end, let us define that we want to evaluate it in the following sense:

      \begin{eqnarray*}
   &&\left(F(S^n) (\mathbf n \cdot \mathbf{u}^{n+1}), \sigma\right)_{\partial K}
@@ -307,7 +307,7 @@
 <p> where <picture><source srcset=$\partial K_{-} \dealcoloneq \{x\in \partial K, \mathbf{u}(x) \cdot \mathbf{n}<0\}$ denotes the inflow boundary and $\partial K_{+} \dealcoloneq \{\partial K \setminus
 \partial K_{-}\}$ is the outflow part of the boundary. The quantities $S_+,\mathbf{u}_+$ then correspond to the values of these variables on the present cell, whereas $S_-,\mathbf{u}_-$ (needed on the inflow part of the boundary of $K$) are quantities taken from the neighboring cell. Some more context on discontinuous element techniques and evaluation of fluxes can also be found in step-12 and step-12b.

      Linear solvers

      -

      The linear solvers used in this program are a straightforward extension of the ones used in step-20 (but without LinearOperator). Essentially, we simply have to extend everything from two to three solution components. If we use the discrete spaces mentioned above and put shape functions into the bilinear forms, we arrive at the following linear system to be solved for time step $n+1$:

      +

      The linear solvers used in this program are a straightforward extension of the ones used in step-20 (but without LinearOperator). Essentially, we simply have to extend everything from two to three solution components. If we use the discrete spaces mentioned above and put shape functions into the bilinear forms, we arrive at the following linear system to be solved for time step $n+1$:

      \[
 \left(
 \begin{array}{ccc}
@@ -329,7 +329,7 @@
 \right)
 \]

      -

      where the individual matrices and vectors are defined as follows using shape functions $\mathbf v_i$ (of type Raviart Thomas $RT_k$) for velocities and $\phi_i$ (of type $DGQ_k$) for both pressures and saturations:

      +

      where the individual matrices and vectors are defined as follows using shape functions $\mathbf v_i$ (of type Raviart Thomas $RT_k$) for velocities and $\phi_i$ (of type $DGQ_k$) for both pressures and saturations:

      \begin{eqnarray*}
 M^u(S^n)_{ij} &=&
 \left((\mathbf{K}\lambda(S^n))^{-1} \mathbf{v}_i,\mathbf
@@ -359,7 +359,7 @@
 (S^n,\phi_i)_\Omega +\triangle t \sum_K  \left(F(S^n) q^{n+1}, \phi_i\right)_K.
 \end{eqnarray*}

      -
      Note
      Due to historical accidents, the role of matrices $B$ and $B^T$ has been reverted in this program compared to step-20. In other words, here $B$ refers to the divergence and $B^T$ to the gradient operators when it was the other way around in step-20.
      +
      Note
      Due to historical accidents, the role of matrices $B$ and $B^T$ has been reverted in this program compared to step-20. In other words, here $B$ refers to the divergence and $B^T$ to the gradient operators when it was the other way around in step-20.

      The system above presents a complication: Since the matrix $H_{ij}$ depends on $\mathbf u^{n+1}$ implicitly (the velocities are needed to determine which parts of the boundaries $\partial K$ of cells are influx or outflux parts), we can only assemble this matrix after we have solved for the velocities.

      The solution scheme then involves the following steps:

      1. @@ -377,7 +377,7 @@
      2. Solve for the saturation $S^{n+1}$.
      -

      In this scheme, we never actually build the matrix $H$, but rather generate the right hand side of the third equation once we are ready to do so.

      +

      In this scheme, we never actually build the matrix $H$, but rather generate the right hand side of the third equation once we are ready to do so.

      In the program, we use a variable solution to store the solution of the present time step. At the end of each step, we copy its content, i.e. all three of its block components, into the variable old_solution for use in the next time step.

      Choosing a time step

      A general rule of thumb in hyperbolic transport equations like the equation we have to solve for the saturation equation is that if we use an explicit time stepping scheme, then we should use a time step such that the distance that a particle can travel within one time step is no larger than the diameter of a single cell. In other words, here, we should choose

      @@ -396,7 +396,7 @@

      For simplicity, this program assumes that there is no source, $q=0$, and that the heterogeneous porous medium is isotropic $\mathbf{K}(\mathbf{x}) =
 k(\mathbf{x}) \mathbf{I}$. The first one of these is a realistic assumption in oil reservoirs: apart from injection and production wells, there are usually no mechanisms for fluids to appear or disappear out of the blue. The second one is harder to justify: on a microscopic level, most rocks are isotropic, because they consist of a network of interconnected pores. However, this microscopic scale is out of the range of today's computer simulations, and we have to be content with simulating things on the scale of meters. On that scale, however, fluid transport typically happens through a network of cracks in the rock, rather than through pores. However, cracks often result from external stress fields in the rock layer (for example from tectonic faulting) and the cracks are therefore roughly aligned. This leads to a situation where the permeability is often orders of magnitude larger in the direction parallel to the cracks than perpendicular to the cracks. A problem typically faces in reservoir simulation, however, is that the modeler doesn't know the direction of cracks because oil reservoirs are not accessible to easy inspection. The only solution in that case is to assume an effective, isotropic permeability.

      Whatever the matter, both of these restrictions, no sources and isotropy, would be easy to lift with a few lines of code in the program.

      -

      Next, for simplicity, our numerical simulation will be done on the unit cell $\Omega = [0,1]\times [0,1]$ for $t\in [0,T]$. Our initial conditions are $S(\mathbf{x},0)=0$; in the oil reservoir picture, where $S$ would indicate the water saturation, this means that the reservoir contains pure oil at the beginning. Note that we do not need any initial conditions for pressure or velocity, since the equations do not contain time derivatives of these variables. Finally, we impose the following pressure boundary conditions:

      +

      Next, for simplicity, our numerical simulation will be done on the unit cell $\Omega = [0,1]\times [0,1]$ for $t\in [0,T]$. Our initial conditions are $S(\mathbf{x},0)=0$; in the oil reservoir picture, where $S$ would indicate the water saturation, this means that the reservoir contains pure oil at the beginning. Note that we do not need any initial conditions for pressure or velocity, since the equations do not contain time derivatives of these variables. Finally, we impose the following pressure boundary conditions:

      \[
   p(\mathbf{x},t)=1-x_1 \qquad \textrm{on}\ \partial\Omega.
 \] @@ -426,7 +426,7 @@ \]" src="form_3224.png"/>

      Note
      Coming back to this testcase in step-43 several years later revealed an oddity in the setup of this testcase. To this end, consider that we can rewrite the advection equation for the saturation as $S_{t} + (\mathbf{u}
-F'(S)) \cdot \nabla S = 0$. Now, at the initial time, we have $S=0$, and with the given choice of function $F(S)$, we happen to have $F'(0)=0$. In other words, at $t=0$, the equation reduces to $S_t=0$ for all $\mathbf x$, so the saturation is zero everywhere and it is going to stay zero everywhere! This is despite the fact that $\mathbf u$ is not necessarily zero: the combined fluid is moving, but we've chosen our partial flux $F(S)$ in such a way that infinitesimal amounts of wetting fluid also only move at infinitesimal speeds (i.e., they stick to the medium more than the non-wetting phase in which they are embedded). That said, how can we square this with the knowledge that wetting fluid is invading from the left, leading to the flow patterns seen in the results section? That's where we get into mathematics: Equations like the transport equation we are considering here have infinitely many solutions, but only one of them is physical: the one that results from the so-called viscosity limit, called the viscosity solution. The thing is that with discontinuous elements we arrive at this viscosity limit because using a numerical flux introduces a finite amount of artificial viscosity into the numerical scheme. On the other hand, in step-43, we use an artificial viscosity that is proportional to $\|\mathbf u F'(S)\|$ on every cell, which at the initial time is zero. Thus, the saturation there is zero and remains zero; the solution we then get is one solution of the advection equation, but the method does not converge to the viscosity solution without further changes. We will therefore use a different initial condition in that program.
      +F'(S)) \cdot \nabla S = 0$" src="form_3225.png"/>. Now, at the initial time, we have $S=0$, and with the given choice of function $F(S)$, we happen to have $F'(0)=0$. In other words, at $t=0$, the equation reduces to $S_t=0$ for all $\mathbf x$, so the saturation is zero everywhere and it is going to stay zero everywhere! This is despite the fact that $\mathbf u$ is not necessarily zero: the combined fluid is moving, but we've chosen our partial flux $F(S)$ in such a way that infinitesimal amounts of wetting fluid also only move at infinitesimal speeds (i.e., they stick to the medium more than the non-wetting phase in which they are embedded). That said, how can we square this with the knowledge that wetting fluid is invading from the left, leading to the flow patterns seen in the results section? That's where we get into mathematics: Equations like the transport equation we are considering here have infinitely many solutions, but only one of them is physical: the one that results from the so-called viscosity limit, called the viscosity solution. The thing is that with discontinuous elements we arrive at this viscosity limit because using a numerical flux introduces a finite amount of artificial viscosity into the numerical scheme. On the other hand, in step-43, we use an artificial viscosity that is proportional to $\|\mathbf u F'(S)\|$ on every cell, which at the initial time is zero. Thus, the saturation there is zero and remains zero; the solution we then get is one solution of the advection equation, but the method does not converge to the viscosity solution without further changes. We will therefore use a different initial condition in that program.

      Finally, to come back to the description of the testcase, we will show results for computations with the two permeability functions introduced at the end of the results section of step-20:

      • A function that models a single, winding crack that snakes through the domain. In analogy to step-20, but taking care of the slightly different geometry we have here, we describe this by the following function:

        @@ -451,7 +451,7 @@ e^{-\left(\frac{|\mathbf{x}-\mathbf{x}_i|}{0.05}\right)^2}, \end{eqnarray*}" src="form_3231.png"/>

        - where the centers $\mathbf{x}_i$ are $N$ randomly chosen locations inside the domain. This function models a domain in which there are $N$ centers of higher permeability (for example where rock has cracked) embedded in a matrix of more pristine, unperturbed background rock. Note that here we have cut off the permeability function both above and below to ensure a bounded condition number.
      • + where the centers $\mathbf{x}_i$ are $N$ randomly chosen locations inside the domain. This function models a domain in which there are $N$ centers of higher permeability (for example where rock has cracked) embedded in a matrix of more pristine, unperturbed background rock. Note that here we have cut off the permeability function both above and below to ensure a bounded condition number.

      The commented program

      This program is an adaptation of step-20 and includes some technique of DG methods from step-12. A good part of the program is therefore very similar to step-20 and we will not comment again on these parts. Only the new stuff will be discussed in more detail.

      @@ -513,7 +513,7 @@
    5. project_back_saturation resets all saturation degrees of freedom with values less than zero to zero, and all those with saturations greater than one to one.
    6. -

      The rest of the class should be pretty much obvious. The viscosity variable stores the viscosity $\mu$ that enters several of the formulas in the nonlinear equations. The variable time keeps track of the time information within the simulation.

      +

      The rest of the class should be pretty much obvious. The viscosity variable stores the viscosity $\mu$ that enters several of the formulas in the nonlinear equations. The variable time keeps track of the time information within the simulation.

        template <int dim>
        class TwoPhaseFlowProblem
        {
      @@ -868,7 +868,7 @@

      TwoPhaseFlowProblem class implementation

      Here now the implementation of the main class. Much of it is actually copied from step-20, so we won't comment on it in much detail. You should try to get familiar with that program first, then most of what is happening here should be mostly clear.

      TwoPhaseFlowProblem::TwoPhaseFlowProblem

      -

      First for the constructor. We use $RT_k \times DQ_k \times DQ_k$ spaces. For initializing the DiscreteTime object, we don't set the time step size in the constructor because we don't have its value yet. The time step size is initially set to zero, but it will be computed before it is needed to increment time, as described in a subsection of the introduction. The time object internally prevents itself from being incremented when $dt = 0$, forcing us to set a non-zero desired size for $dt$ before advancing time.

      +

      First for the constructor. We use $RT_k \times DQ_k \times DQ_k$ spaces. For initializing the DiscreteTime object, we don't set the time step size in the constructor because we don't have its value yet. The time step size is initially set to zero, but it will be computed before it is needed to increment time, as described in a subsection of the introduction. The time object internally prevents itself from being incremented when $dt = 0$, forcing us to set a non-zero desired size for $dt$ before advancing time.

        template <int dim>
        TwoPhaseFlowProblem<dim>::TwoPhaseFlowProblem(const unsigned int degree)
        : degree(degree)
      @@ -1126,7 +1126,7 @@
        fe_values.get_function_values(solution, present_solution_values);
       

      First for the cell terms. These are, following the formulas in the introduction, $(S^n,\sigma)-(F(S^n) \mathbf{v}^{n+1},\nabla
-   \sigma)$, where $\sigma$ is the saturation component of the test function:

      + \sigma)$" src="form_3235.png"/>, where $\sigma$ is the saturation component of the test function:

        for (unsigned int q = 0; q < n_q_points; ++q)
        for (unsigned int i = 0; i < dofs_per_cell; ++i)
        {
      @@ -1492,9 +1492,9 @@

      Possibilities for extensions

      There are a number of areas where this program can be improved. Three of them are listed below. All of them are, in fact, addressed in a tutorial program that forms the continuation of the current one: step-43.

      Solvers

      -

      At present, the program is not particularly fast: the 2d random medium computation took about a day for the 1,000 or so time steps. The corresponding 3d computation took almost two days for 800 time steps. The reason why it isn't faster than this is twofold. First, we rebuild the entire matrix in every time step, although some parts such as the $B$, $B^T$, and $M^S$ blocks never change.

      +

      At present, the program is not particularly fast: the 2d random medium computation took about a day for the 1,000 or so time steps. The corresponding 3d computation took almost two days for 800 time steps. The reason why it isn't faster than this is twofold. First, we rebuild the entire matrix in every time step, although some parts such as the $B$, $B^T$, and $M^S$ blocks never change.

      Second, we could do a lot better with the solver and preconditioners. Presently, we solve the Schur complement $B^TM^u(S)^{-1}B$ with a CG method, using $[B^T (\textrm{diag}(M^u(S)))^{-1} B]^{-1}$ as a preconditioner. Applying this preconditioner is expensive, since it involves solving a linear system each time. This may have been appropriate for step-20, where we have to solve the entire problem only once. However, here we have to solve it hundreds of times, and in such cases it is worth considering a preconditioner that is more expensive to set up the first time, but cheaper to apply later on.

      -

      One possibility would be to realize that the matrix we use as preconditioner, $B^T (\textrm{diag}(M^u(S)))^{-1} B$ is still sparse, and symmetric on top of that. If one looks at the flow field evolve over time, we also see that while $S$ changes significantly over time, the pressure hardly does and consequently $B^T (\textrm{diag}(M^u(S)))^{-1} B \approx B^T (\textrm{diag}(M^u(S^0)))^{-1}
+<p>One possibility would be to realize that the matrix we use as preconditioner, <picture><source srcset=$B^T (\textrm{diag}(M^u(S)))^{-1} B$ is still sparse, and symmetric on top of that. If one looks at the flow field evolve over time, we also see that while $S$ changes significantly over time, the pressure hardly does and consequently $B^T (\textrm{diag}(M^u(S)))^{-1} B \approx B^T (\textrm{diag}(M^u(S^0)))^{-1}
 B$. In other words, the matrix for the first time step should be a good preconditioner also for all later time steps. With a bit of back-and-forthing, it isn't hard to actually get a representation of it as a SparseMatrix object. We could then hand it off to the SparseMIC class to form a sparse incomplete Cholesky decomposition. To form this decomposition is expensive, but we have to do it only once in the first time step, and can then use it as a cheap preconditioner in the future. We could do better even by using the SparseDirectUMFPACK class that produces not only an incomplete, but a complete decomposition of the matrix, which should yield an even better preconditioner.

      Finally, why use the approximation $B^T (\textrm{diag}(M^u(S)))^{-1} B$ to precondition $B^T M^u(S)^{-1} B$? The latter matrix, after all, is the mixed form of the Laplace operator on the pressure space, for which we use linear elements. We could therefore build a separate matrix $A^p$ on the side that directly corresponds to the non-mixed formulation of the Laplacian, for example using the bilinear form $(\mathbf{K}\lambda(S^n) \nabla
 \varphi_i,\nabla\varphi_j)$. We could then form an incomplete or complete decomposition of this non-mixed matrix and use it as a preconditioner of the mixed form.

      @@ -1505,7 +1505,7 @@ \triangle t_{n+1} \le \frac h{|\mathbf{u}^{n+1}(\mathbf{x})|} \]" src="form_3248.png"/>

      -

      that has to hold globally, i.e. for all $\mathbf x$. After discretization, we satisfy it by choosing

      +

      that has to hold globally, i.e. for all $\mathbf x$. After discretization, we satisfy it by choosing

      \[
   \triangle t_{n+1} = \frac {\min_K h_K}{\max_{\mathbf{x}}|\mathbf{u}^{n+1}(\mathbf{x})|}.
 \] /usr/share/doc/packages/dealii/doxygen/deal.II/step_22.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_22.html 2024-01-30 03:04:51.428872884 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_22.html 2024-01-30 03:04:51.428872884 +0000 @@ -167,36 +167,36 @@ This material is based upon work partly supported by the National Science Foundation under Award No. EAR-0426271 and The California Institute of Technology. Any opinions, findings, and conclusions or recommendations expressed in this publication are those of the author and do not necessarily reflect the views of the National Science Foundation or of The California Institute of Technology.

      Introduction

      This program deals with the Stokes system of equations which reads as follows in non-dimensionalized form:

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   -2\; \textrm{div}\; \varepsilon(\textbf{u}) + \nabla p &=& \textbf{f},
   \\
   -\textrm{div}\; \textbf{u} &=& 0,
-\end{eqnarray*} +\end{eqnarray*}" src="form_3249.png"/>

      -

      where $\textbf u$ denotes the velocity of a fluid, $p$ is its pressure, $\textbf f$ are external forces, and $\varepsilon(\textbf{u})= \nabla^s{\textbf{u}}= \frac 12 \left[
-(\nabla \textbf{u}) + (\nabla \textbf{u})^T\right]$ is the rank-2 tensor of symmetrized gradients; a component-wise definition of it is $\varepsilon(\textbf{u})_{ij}=\frac
-12\left(\frac{\partial u_i}{\partial x_j} + \frac{\partial u_j}{\partial x_i}\right)$.

      +

      where $\textbf u$ denotes the velocity of a fluid, $p$ is its pressure, $\textbf f$ are external forces, and $\varepsilon(\textbf{u})= \nabla^s{\textbf{u}}= \frac 12 \left[
+(\nabla \textbf{u}) + (\nabla \textbf{u})^T\right]$ is the rank-2 tensor of symmetrized gradients; a component-wise definition of it is $\varepsilon(\textbf{u})_{ij}=\frac
+12\left(\frac{\partial u_i}{\partial x_j} + \frac{\partial u_j}{\partial x_i}\right)$.

      The Stokes equations describe the steady-state motion of a slow-moving, viscous fluid such as honey, rocks in the earth mantle, or other cases where inertia does not play a significant role. If a fluid is moving fast enough that inertia forces are significant compared to viscous friction, the Stokes equations are no longer valid; taking into account inertia effects then leads to the nonlinear Navier-Stokes equations. However, in this tutorial program, we will focus on the simpler Stokes system.

      Note that when deriving the more general compressible Navier-Stokes equations, the diffusion is modeled as the divergence of the stress tensor

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   \tau = - \mu \left(2\varepsilon(\textbf{u}) - \frac{2}{3}\nabla \cdot \textbf{u} I\right),
-\end{eqnarray*} +\end{eqnarray*}" src="form_3253.png"/>

      -

      where $\mu$ is the viscosity of the fluid. With the assumption of $\mu=1$ (assume constant viscosity and non-dimensionalize the equation by dividing out $\mu$) and assuming incompressibility ( $\textrm{div}\; \textbf{u}=0$), we arrive at the formulation from above:

      -\begin{eqnarray*}
+<p> where <picture><source srcset=$\mu$ is the viscosity of the fluid. With the assumption of $\mu=1$ (assume constant viscosity and non-dimensionalize the equation by dividing out $\mu$) and assuming incompressibility ( $\textrm{div}\; \textbf{u}=0$), we arrive at the formulation from above:

      +\begin{eqnarray*}
   \textrm{div}\; \tau = -2\textrm{div}\;\varepsilon(\textbf{u}).
-\end{eqnarray*} +\end{eqnarray*}" src="form_3256.png"/>

      -

      A different formulation uses the Laplace operator ( $-\triangle \textbf{u}$) instead of the symmetrized gradient. A big difference here is that the different components of the velocity do not couple. If you assume additional regularity of the solution $\textbf{u}$ (second partial derivatives exist and are continuous), the formulations are equivalent:

      -\begin{eqnarray*}
+<p> A different formulation uses the Laplace operator ( <picture><source srcset=$-\triangle \textbf{u}$) instead of the symmetrized gradient. A big difference here is that the different components of the velocity do not couple. If you assume additional regularity of the solution $\textbf{u}$ (second partial derivatives exist and are continuous), the formulations are equivalent:

      +\begin{eqnarray*}
   \textrm{div}\; \tau
   = -2\textrm{div}\;\varepsilon(\textbf{u})
   = -\triangle \textbf{u} - \nabla \cdot (\nabla\textbf{u})^T
   = -\triangle \textbf{u}.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3259.png"/>

      -

      This is because the $i$th entry of $\nabla \cdot (\nabla\textbf{u})^T$ is given by:

      -\begin{eqnarray*}
+<p> This is because the <picture><source srcset=$i$th entry of $\nabla \cdot (\nabla\textbf{u})^T$ is given by:

      +\begin{eqnarray*}
 [\nabla \cdot (\nabla\textbf{u})^T]_i
 = \sum_j \frac{\partial}{\partial x_j} [(\nabla\textbf{u})^T]_{i,j}
 = \sum_j \frac{\partial}{\partial x_j} [(\nabla\textbf{u})]_{j,i}
@@ -205,14 +205,14 @@
 = \frac{\partial}{\partial x_i}
   \underbrace{\textrm{div}\; \textbf{u}}_{=0}
 = 0.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3261.png"/>

      If you can not assume the above mentioned regularity, or if your viscosity is not a constant, the equivalence no longer holds. Therefore, we decided to stick with the more physically accurate symmetric tensor formulation in this tutorial.

      To be well-posed, we will have to add boundary conditions to the equations. What boundary conditions are readily possible here will become clear once we discuss the weak form of the equations.

      The equations covered here fall into the class of vector-valued problems. A toplevel overview of this topic can be found in the Handling vector valued problems module.

      Weak form

      The weak form of the equations is obtained by writing it in vector form as

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   \begin{pmatrix}
     {-2\; \textrm{div}\; \varepsilon(\textbf{u}) + \nabla p}
     \\
@@ -224,23 +224,23 @@
   \\
   0
   \end{pmatrix},
-\end{eqnarray*} +\end{eqnarray*}" src="form_3262.png"/>

      -

      forming the dot product from the left with a vector-valued test function $\phi = \begin{pmatrix}\textbf{v} \\ q\end{pmatrix}$ and integrating over the domain $\Omega$, yielding the following set of equations:

      -\begin{eqnarray*}
+<p> forming the dot product from the left with a vector-valued test function <picture><source srcset=$\phi = \begin{pmatrix}\textbf{v} \\ q\end{pmatrix}$ and integrating over the domain $\Omega$, yielding the following set of equations:

      +\begin{eqnarray*}
   (\mathrm v,
    -2\; \textrm{div}\; \varepsilon(\textbf{u}) + \nabla p)_{\Omega}
   -
   (q,\textrm{div}\; \textbf{u})_{\Omega}
   =
   (\textbf{v}, \textbf{f})_\Omega,
-\end{eqnarray*} +\end{eqnarray*}" src="form_3264.png"/>

      -

      which has to hold for all test functions $\phi = \begin{pmatrix}\textbf{v}
-\\ q\end{pmatrix}$.

      +

      which has to hold for all test functions $\phi = \begin{pmatrix}\textbf{v}
+\\ q\end{pmatrix}$.

      A generally good rule of thumb is that if one can reduce how many derivatives are taken on any variable in the formulation, then one should in fact do that using integration by parts. (This is motivated by the theory of partial differential equations, and in particular the difference between strong and weak solutions.) We have already done that for the Laplace equation, where we have integrated the second derivative by parts to obtain the weak formulation that has only one derivative on both test and trial function.

      In the current context, we integrate by parts the second term:

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   (\textbf{v}, -2\; \textrm{div}\; \varepsilon(\textbf{u}))_{\Omega}
   - (\textrm{div}\; \textbf{v}, p)_{\Omega}
   + (\textbf{n}\cdot\textbf{v}, p)_{\partial\Omega}
@@ -248,10 +248,10 @@
   (q,\textrm{div}\; \textbf{u})_{\Omega}
   =
   (\textbf{v}, \textbf{f})_\Omega.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3266.png"/>

      Likewise, we integrate by parts the first term to obtain

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   (\nabla \textbf{v}, 2\; \varepsilon(\textbf{u}))_{\Omega}
   -
   (\textbf{n} \otimes \textbf{v}, 2\; \varepsilon(\textbf{u}))_{\partial\Omega}
@@ -261,19 +261,19 @@
   (q,\textrm{div}\; \textbf{u})_{\Omega}
   =
   (\textbf{v}, \textbf{f})_\Omega,
-\end{eqnarray*} +\end{eqnarray*}" src="form_3267.png"/>

      where the scalar product between two tensor-valued quantities is here defined as

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   (\nabla \textbf{v}, 2\; \varepsilon(\textbf{u}))_{\Omega}
   =
   2 \int_\Omega \sum_{i,j=1}^d \frac{\partial v_j}{\partial x_i}
   \varepsilon(\textbf{u})_{ij} \ dx.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3268.png"/>

      -

      Using this, we have now reduced the requirements on our variables to first derivatives for $\mathbf u,\mathbf v$ and no derivatives at all for $p,q$.

      -

      Because the scalar product between a general tensor like $\nabla\textbf{v}$ and a symmetric tensor like $\varepsilon(\textbf{u})$ equals the scalar product between the symmetrized forms of the two, we can also write the bilinear form above as follows:

      -\begin{eqnarray*}
+<p> Using this, we have now reduced the requirements on our variables to first derivatives for <picture><source srcset=$\mathbf u,\mathbf v$ and no derivatives at all for $p,q$.

      +

      Because the scalar product between a general tensor like $\nabla\textbf{v}$ and a symmetric tensor like $\varepsilon(\textbf{u})$ equals the scalar product between the symmetrized forms of the two, we can also write the bilinear form above as follows:

      +\begin{eqnarray*}
   (\varepsilon(\textbf{v}), 2\; \varepsilon(\textbf{u}))_{\Omega}
   -
   (\textbf{n} \otimes \textbf{v}, 2\; \varepsilon(\textbf{u}))_{\partial\Omega}
@@ -283,43 +283,43 @@
   (q,\textrm{div}\; \textbf{u})_{\Omega}
   =
   (\textbf{v}, \textbf{f})_\Omega,
-\end{eqnarray*} +\end{eqnarray*}" src="form_3272.png"/>

      We will deal with the boundary terms in the next section, but it is already clear from the domain terms

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   (\varepsilon(\textbf{v}), 2\; \varepsilon(\textbf{u}))_{\Omega}
   - (\textrm{div}\; \textbf{v}, p)_{\Omega}
   -
   (q,\textrm{div}\; \textbf{u})_{\Omega}
-\end{eqnarray*} +\end{eqnarray*}" src="form_3273.png"/>

      of the bilinear form that the Stokes equations yield a symmetric bilinear form, and consequently a symmetric (if indefinite) system matrix.

      Boundary conditions

      Note
      The material presented here is also discussed in video lecture 21.5. (All video lectures are also available here.) (See also video lecture 21.55, video lecture 21.6, video lecture 21.65.)

      The weak form just derived immediately presents us with different possibilities for imposing boundary conditions:

      1. -

        Dirichlet velocity boundary conditions: On a part $\Gamma_D\subset\partial\Omega$ we may impose Dirichlet conditions on the velocity $\textbf u$:

        +

        Dirichlet velocity boundary conditions: On a part $\Gamma_D\subset\partial\Omega$ we may impose Dirichlet conditions on the velocity $\textbf u$:

        -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
         \textbf u = \textbf g_D \qquad\qquad \textrm{on}\ \Gamma_D.
-    \end{eqnarray*} + \end{eqnarray*}" src="form_3275.png"/>

        -

        Because test functions $\textbf{v}$ come from the tangent space of the solution variable, we have that $\textbf{v}=0$ on $\Gamma_D$ and consequently that

        -\begin{eqnarray*}
+<p> Because test functions <picture><source srcset=$\textbf{v}$ come from the tangent space of the solution variable, we have that $\textbf{v}=0$ on $\Gamma_D$ and consequently that

        +\begin{eqnarray*}
       -(\textbf{n} \otimes \mathrm
         v, 2\; \varepsilon(\textbf{u}))_{\Gamma_D}
       +
       (\textbf{n}\cdot\textbf{v}, p)_{\Gamma_D}
       = 0.
-    \end{eqnarray*} + \end{eqnarray*}" src="form_3279.png"/>

        In other words, as usual, strongly imposed boundary values do not appear in the weak form.

        It is noteworthy that if we impose Dirichlet boundary values on the entire boundary, then the pressure is only determined up to a constant. An algorithmic realization of that would use similar tools as have been seen in step-11.

      2. -

        Neumann-type or natural boundary conditions: On the rest of the boundary $\Gamma_N=\partial\Omega\backslash\Gamma_D$, let us re-write the boundary terms as follows:

        -\begin{eqnarray*}
+<p class=Neumann-type or natural boundary conditions: On the rest of the boundary $\Gamma_N=\partial\Omega\backslash\Gamma_D$, let us re-write the boundary terms as follows:

        +\begin{eqnarray*}
       -(\textbf{n} \otimes \mathrm
         v, 2\; \varepsilon(\textbf{u}))_{\Gamma_N}
       +
@@ -349,17 +349,17 @@
       &=&
       (\textbf{v},
        \textbf{n}\cdot [p \textbf{I} - 2\; \varepsilon(\textbf{u})])_{\Gamma_N}.
/usr/share/doc/packages/dealii/doxygen/deal.II/step_23.html differs (HTML document, UTF-8 Unicode text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_23.html	2024-01-30 03:04:51.476873284 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_23.html	2024-01-30 03:04:51.476873284 +0000
@@ -132,8 +132,8 @@
  <a class=

        Introduction

        Note
        The material presented here is also discussed in video lecture 28. (All video lectures are also available here.)

        This is the first of a number of tutorial programs that will finally cover "real" time-dependent problems, not the slightly odd form of time dependence found in step-18 or the DAE model of step-21. In particular, this program introduces the wave equation in a bounded domain. Later, step-24 will consider an example of absorbing boundary conditions, and step-25 a kind of nonlinear wave equation producing solutions called solitons.

        -

        The wave equation in its prototypical form reads as follows: find $u(x,t), x\in\Omega, t\in[0,T]$ that satisfies

        -\begin{eqnarray*}
+<p>The wave equation in its prototypical form reads as follows: find <picture><source srcset=$u(x,t), x\in\Omega, t\in[0,T]$ that satisfies

        +\begin{eqnarray*}
         \frac{\partial^2 u}{\partial t^2}
         -
         \Delta u &=& f
@@ -151,10 +151,10 @@
         \frac{\partial u(x,0)}{\partial t} &=& u_1(x)
         \qquad
         \textrm{in}\ \Omega.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3358.png"/>

        Note that since this is an equation with second-order time derivatives, we need to pose two initial conditions, one for the value and one for the time derivative of the solution.

        -

        Physically, the equation describes the motion of an elastic medium. In 2-d, one can think of how a membrane moves if subjected to a force. The Dirichlet boundary conditions above indicate that the membrane is clamped at the boundary at a height $g(x,t)$ (this height might be moving as well — think of people holding a blanket and shaking it up and down). The first initial condition equals the initial deflection of the membrane, whereas the second one gives its velocity. For example, one could think of pushing the membrane down with a finger and then letting it go at $t=0$ (nonzero deflection but zero initial velocity), or hitting it with a hammer at $t=0$ (zero deflection but nonzero velocity). Both cases would induce motion in the membrane.

        +

        Physically, the equation describes the motion of an elastic medium. In 2-d, one can think of how a membrane moves if subjected to a force. The Dirichlet boundary conditions above indicate that the membrane is clamped at the boundary at a height $g(x,t)$ (this height might be moving as well — think of people holding a blanket and shaking it up and down). The first initial condition equals the initial deflection of the membrane, whereas the second one gives its velocity. For example, one could think of pushing the membrane down with a finger and then letting it go at $t=0$ (nonzero deflection but zero initial velocity), or hitting it with a hammer at $t=0$ (zero deflection but nonzero velocity). Both cases would induce motion in the membrane.

        Time discretization

        Method of lines or Rothe's method?

        There is a long-standing debate in the numerical analysis community over whether a discretization of time dependent equations should involve first discretizing the time variable leading to a stationary PDE at each time step that is then solved using standard finite element techniques (this is called the Rothe method), or whether one should first discretize the spatial variables, leading to a large system of ordinary differential equations that can then be handled by one of the usual ODE solvers (this is called the method of lines).

        @@ -167,12 +167,12 @@

        Rothe's method!

        Given these considerations, here is how we will proceed: let us first define a simple time stepping method for this second order problem, and then in a second step do the spatial discretization, i.e. we will follow Rothe's approach.

        For the first step, let us take a little detour first: in order to discretize a second time derivative, we can either discretize it directly, or we can introduce an additional variable and transform the system into a first order system. In many cases, this turns out to be equivalent, but dealing with first order systems is often simpler. To this end, let us introduce

        -\[
+<picture><source srcset=\[
         v = \frac{\partial u}{\partial t},
-\] +\]" src="form_3360.png"/>

        and call this variable the velocity for obvious reasons. We can then reformulate the original wave equation as follows:

        -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
         \frac{\partial u}{\partial t}
         -
         v
@@ -197,37 +197,37 @@
         v(x,0) &=& u_1(x)
         \qquad
         \textrm{in}\ \Omega.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3361.png"/>

        -

        The advantage of this formulation is that it now only contains first time derivatives for both variables, for which it is simple to write down time stepping schemes. Note that we do not have boundary conditions for $v$ at first. However, we could enforce $v=\frac{\partial
-g}{\partial t}$ on the boundary. It turns out in numerical examples that this is actually necessary: without doing so the solution doesn't look particularly wrong, but the Crank-Nicolson scheme does not conserve energy if one doesn't enforce these boundary conditions.

        -

        With this formulation, let us introduce the following time discretization where a superscript $n$ indicates the number of a time step and $k=t_n-t_{n-1}$ is the length of the present time step:

        -\begin{eqnarray*}
+<p> The advantage of this formulation is that it now only contains first time derivatives for both variables, for which it is simple to write down time stepping schemes. Note that we do not have boundary conditions for <picture><source srcset=$v$ at first. However, we could enforce $v=\frac{\partial
+g}{\partial t}$ on the boundary. It turns out in numerical examples that this is actually necessary: without doing so the solution doesn't look particularly wrong, but the Crank-Nicolson scheme does not conserve energy if one doesn't enforce these boundary conditions.

        +

        With this formulation, let us introduce the following time discretization where a superscript $n$ indicates the number of a time step and $k=t_n-t_{n-1}$ is the length of the present time step:

        +\begin{eqnarray*}
   \frac{u^n - u^{n-1}}{k}
   - \left[\theta v^n + (1-\theta) v^{n-1}\right] &=& 0,
   \\
   \frac{v^n - v^{n-1}}{k}
   - \Delta\left[\theta u^n + (1-\theta) u^{n-1}\right]
   &=& \theta f^n + (1-\theta) f^{n-1}.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3364.png"/>

        -

        Note how we introduced a parameter $\theta$ here. If we chose $\theta=0$, for example, the first equation would reduce to $\frac{u^n - u^{n-1}}{k}  - v^{n-1} = 0$, which is well-known as the forward or explicit Euler method. On the other hand, if we set $\theta=1$, then we would get $\frac{u^n - u^{n-1}}{k}  - v^n = 0$, which corresponds to the backward or implicit Euler method. Both these methods are first order accurate methods. They are simple to implement, but they are not really very accurate.

        -

        The third case would be to choose $\theta=\frac 12$. The first of the equations above would then read $\frac{u^n - u^{n-1}}{k}
-- \frac 12 \left[v^n + v^{n-1}\right] = 0$. This method is known as the Crank-Nicolson method and has the advantage that it is second order accurate. In addition, it has the nice property that it preserves the energy in the solution (physically, the energy is the sum of the kinetic energy of the particles in the membrane plus the potential energy present due to the fact that it is locally stretched; this quantity is a conserved one in the continuous equation, but most time stepping schemes do not conserve it after time discretization). Since $v^n$ also appears in the equation for $u^n$, the Crank-Nicolson scheme is also implicit.

        -

        In the program, we will leave $\theta$ as a parameter, so that it will be easy to play with it. The results section will show some numerical evidence comparing the different schemes.

        -

        The equations above (called the semidiscretized equations because we have only discretized the time, but not space), can be simplified a bit by eliminating $v^n$ from the first equation and rearranging terms. We then get

        -\begin{eqnarray*}
+<p> Note how we introduced a parameter <picture><source srcset=$\theta$ here. If we chose $\theta=0$, for example, the first equation would reduce to $\frac{u^n - u^{n-1}}{k}  - v^{n-1} = 0$, which is well-known as the forward or explicit Euler method. On the other hand, if we set $\theta=1$, then we would get $\frac{u^n - u^{n-1}}{k}  - v^n = 0$, which corresponds to the backward or implicit Euler method. Both these methods are first order accurate methods. They are simple to implement, but they are not really very accurate.

        +

        The third case would be to choose $\theta=\frac 12$. The first of the equations above would then read $\frac{u^n - u^{n-1}}{k}
+- \frac 12 \left[v^n + v^{n-1}\right] = 0$. This method is known as the Crank-Nicolson method and has the advantage that it is second order accurate. In addition, it has the nice property that it preserves the energy in the solution (physically, the energy is the sum of the kinetic energy of the particles in the membrane plus the potential energy present due to the fact that it is locally stretched; this quantity is a conserved one in the continuous equation, but most time stepping schemes do not conserve it after time discretization). Since $v^n$ also appears in the equation for $u^n$, the Crank-Nicolson scheme is also implicit.

        +

        In the program, we will leave $\theta$ as a parameter, so that it will be easy to play with it. The results section will show some numerical evidence comparing the different schemes.

        +

        The equations above (called the semidiscretized equations because we have only discretized the time, but not space), can be simplified a bit by eliminating $v^n$ from the first equation and rearranging terms. We then get

        +\begin{eqnarray*}
   \left[ 1-k^2\theta^2\Delta \right] u^n &=&
          \left[ 1+k^2\theta(1-\theta)\Delta\right] u^{n-1} + k v^{n-1}
          + k^2\theta\left[\theta f^n + (1-\theta) f^{n-1}\right],\\
    v^n &=& v^{n-1} + k\Delta\left[ \theta u^n + (1-\theta) u^{n-1}\right]
    + k\left[\theta f^n + (1-\theta) f^{n-1}\right].
-\end{eqnarray*} +\end{eqnarray*}" src="form_3372.png"/>

        -

        In this form, we see that if we are given the solution $u^{n-1},v^{n-1}$ of the previous timestep, that we can then solve for the variables $u^n,v^n$ separately, i.e. one at a time. This is convenient. In addition, we recognize that the operator in the first equation is positive definite, and the second equation looks particularly simple.

        +

        In this form, we see that if we are given the solution $u^{n-1},v^{n-1}$ of the previous timestep, that we can then solve for the variables $u^n,v^n$ separately, i.e. one at a time. This is convenient. In addition, we recognize that the operator in the first equation is positive definite, and the second equation looks particularly simple.

        Space discretization

        -

        We have now derived equations that relate the approximate (semi-discrete) solution $u^n(x)$ and its time derivative $v^n(x)$ at time $t_n$ with the solutions $u^{n-1}(x),v^{n-1}(x)$ of the previous time step at $t_{n-1}$. The next step is to also discretize the spatial variable using the usual finite element methodology. To this end, we multiply each equation with a test function, integrate over the entire domain, and integrate by parts where necessary. This leads to

        -\begin{eqnarray*}
+<p>We have now derived equations that relate the approximate (semi-discrete) solution <picture><source srcset=$u^n(x)$ and its time derivative $v^n(x)$ at time $t_n$ with the solutions $u^{n-1}(x),v^{n-1}(x)$ of the previous time step at $t_{n-1}$. The next step is to also discretize the spatial variable using the usual finite element methodology. To this end, we multiply each equation with a test function, integrate over the entire domain, and integrate by parts where necessary. This leads to

        +\begin{eqnarray*}
   (u^n,\varphi) + k^2\theta^2(\nabla u^n,\nabla \varphi) &=&
   (u^{n-1},\varphi) - k^2\theta(1-\theta)(\nabla u^{n-1},\nabla \varphi)
   +
@@ -247,15 +247,15 @@
   \left[
   \theta (f^n,\varphi) + (1-\theta) (f^{n-1},\varphi)
   \right].
-\end{eqnarray*} +\end{eqnarray*}" src="form_3378.png"/>

        -

        It is then customary to approximate $u^n(x) \approx u^n_h(x) = \sum_i
-U_i^n\phi_i^n(x)$, where $\phi_i^n(x)$ are the shape functions used for the discretization of the $n$-th time step and $U_i^n$ are the unknown nodal values of the solution. Similarly, $v^n(x) \approx
-v^n_h(x) = \sum_i V_i^n\phi_i^n(x)$. Finally, we have the solutions of the previous time step, $u^{n-1}(x) \approx u^{n-1}_h(x) = \sum_i
-U_i^{n-1}\phi_i^{n-1}(x)$ and $v^{n-1}(x) \approx v^{n-1}_h(x) = \sum_i
-V_i^{n-1}\phi_i^{n-1}(x)$. Note that since the solution of the previous time step has already been computed by the time we get to time step $n$, $U^{n-1},V^{n-1}$ are known. Furthermore, note that the solutions of the previous step may have been computed on a different mesh, so we have to use shape functions $\phi^{n-1}_i(x)$.

        +

        It is then customary to approximate $u^n(x) \approx u^n_h(x) = \sum_i
+U_i^n\phi_i^n(x)$, where $\phi_i^n(x)$ are the shape functions used for the discretization of the $n$-th time step and $U_i^n$ are the unknown nodal values of the solution. Similarly, $v^n(x) \approx
+v^n_h(x) = \sum_i V_i^n\phi_i^n(x)$. Finally, we have the solutions of the previous time step, $u^{n-1}(x) \approx u^{n-1}_h(x) = \sum_i
+U_i^{n-1}\phi_i^{n-1}(x)$ and $v^{n-1}(x) \approx v^{n-1}_h(x) = \sum_i
+V_i^{n-1}\phi_i^{n-1}(x)$. Note that since the solution of the previous time step has already been computed by the time we get to time step $n$, $U^{n-1},V^{n-1}$ are known. Furthermore, note that the solutions of the previous step may have been computed on a different mesh, so we have to use shape functions $\phi^{n-1}_i(x)$.

        If we plug these expansions into above equations and test with the test functions from the present mesh, we get the following linear system:

        -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   (M^n + k^2\theta^2 A^n)U^n &=&
   M^{n,n-1}U^{n-1} - k^2\theta(1-\theta) A^{n,n-1}U^{n-1}
   +
@@ -275,10 +275,10 @@
   \left[
   \theta F^n + (1-\theta) F^{n-1}
   \right],
-\end{eqnarray*} +\end{eqnarray*}" src="form_3387.png"/>

        where

        -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
         M^n_{ij} &=& (\phi_i^n, \phi_j^n),
         \\
         A^n_{ij} &=& (\nabla\phi_i^n, \nabla\phi_j^n),
@@ -290,14 +290,14 @@
         F^n_{i} &=& (f^n,\phi_i^n),
         \\
         F^{n-1}_{i} &=& (f^{n-1},\phi_i^n).
-\end{eqnarray*} +\end{eqnarray*}" src="form_3388.png"/>

        If we solve these two equations, we can move the solution one step forward and go on to the next time step.

        -

        It is worth noting that if we choose the same mesh on each time step (as we will in fact do in the program below), then we have the same shape functions on time step $n$ and $n-1$, i.e. $\phi^n_i=\phi_i^{n-1}=\phi_i$. Consequently, we get $M^n=M^{n,n-1}=M$ and $A^n=A^{n,n-1}=A$. On the other hand, if we had used different shape functions, then we would have to compute integrals that contain shape functions defined on two meshes. This is a somewhat messy process that we omit here, but that is treated in some detail in step-28.

        +

        It is worth noting that if we choose the same mesh on each time step (as we will in fact do in the program below), then we have the same shape functions on time step $n$ and $n-1$, i.e. $\phi^n_i=\phi_i^{n-1}=\phi_i$. Consequently, we get $M^n=M^{n,n-1}=M$ and $A^n=A^{n,n-1}=A$. On the other hand, if we had used different shape functions, then we would have to compute integrals that contain shape functions defined on two meshes. This is a somewhat messy process that we omit here, but that is treated in some detail in step-28.

        Under these conditions (i.e. a mesh that doesn't change), one can optimize the solution procedure a bit by basically eliminating the solution of the second linear system. We will discuss this in the introduction of the step-25 program.

        Energy conservation

        -

        One way to compare the quality of a time stepping scheme is to see whether the numerical approximation preserves conservation properties of the continuous equation. For the wave equation, the natural quantity to look at is the energy. By multiplying the wave equation by $u_t$, integrating over $\Omega$, and integrating by parts where necessary, we find that

        -\[
+<p>One way to compare the quality of a time stepping scheme is to see whether the numerical approximation preserves conservation properties of the continuous equation. For the wave equation, the natural quantity to look at is the energy. By multiplying the wave equation by <picture><source srcset=$u_t$, integrating over $\Omega$, and integrating by parts where necessary, we find that

        +\[
         \frac{d}{d t}
         \left[\frac 12 \int_\Omega \left(\frac{\partial u}{\partial
         t}\right)^2 + (\nabla u)^2 \; dx\right]
@@ -306,34 +306,34 @@
         +
         \int_{\partial\Omega} n\cdot\nabla u
         \frac{\partial g}{\partial t} \; dx.
-\] +\]" src="form_3394.png"/>

        By consequence, in absence of body forces and constant boundary values, we get that

        -\[
+<picture><source srcset=\[
         E(t) = \frac 12 \int_\Omega \left(\frac{\partial u}{\partial
         t}\right)^2 + (\nabla u)^2 \; dx
-\] +\]" src="form_3395.png"/>

        -

        is a conserved quantity, i.e. one that doesn't change with time. We will compute this quantity after each time step. It is straightforward to see that if we replace $u$ by its finite element approximation, and $\frac{\partial u}{\partial t}$ by the finite element approximation of the velocity $v$, then

        -\[
+<p> is a conserved quantity, i.e. one that doesn't change with time. We will compute this quantity after each time step. It is straightforward to see that if we replace <picture><source srcset=$u$ by its finite element approximation, and $\frac{\partial u}{\partial t}$ by the finite element approximation of the velocity $v$, then

        +\[
         E(t_n) = \frac 12 \left<V^n, M^n V^n\right>
         +
         \frac 12 \left<U^n, A^n U^n\right>.
-\] +\]" src="form_3397.png"/>

        As we will see in the results section, the Crank-Nicolson scheme does indeed conserve the energy, whereas neither the forward nor the backward Euler scheme do.

        Who are Courant, Friedrichs, and Lewy?

        -

        One of the reasons why the wave equation is not easy to solve numerically is that explicit time discretizations are only stable if the time step is small enough. In particular, it is coupled to the spatial mesh width $h$. For the lowest order discretization we use here, the relationship reads

        -\[
+<p>One of the reasons why the wave equation is not easy to solve numerically is that explicit time discretizations are only stable if the time step is small enough. In particular, it is coupled to the spatial mesh width <picture><source srcset=$h$. For the lowest order discretization we use here, the relationship reads

        +\[
         k\le \frac hc
-\] +\]" src="form_3398.png"/>

        -

        where $c$ is the wave speed, which in our formulation of the wave equation has been normalized to one. Consequently, unless we use the implicit schemes with $\theta>0$, our solutions will not be numerically stable if we violate this restriction. Implicit schemes do not have this restriction for stability, but they become inaccurate if the time step is too large.

        +

        where $c$ is the wave speed, which in our formulation of the wave equation has been normalized to one. Consequently, unless we use the implicit schemes with $\theta>0$, our solutions will not be numerically stable if we violate this restriction. Implicit schemes do not have this restriction for stability, but they become inaccurate if the time step is too large.

        This condition was first recognized by Courant, Friedrichs, and Lewy — in 1928, long before computers became available for numerical computations! (This result appeared in the German language article R. Courant, K. Friedrichs and H. Lewy: Über die partiellen Differenzengleichungen der mathematischen Physik, Mathematische Annalen, vol. 100, no. 1, pages 32-74, 1928.) This condition on the time step is most frequently just referred to as the CFL condition. Intuitively, the CFL condition says that the time step must not be larger than the time it takes a wave to cross a single cell.

        -

        In the program, we will refine the square $[-1,1]^2$ seven times uniformly, giving a mesh size of $h=\frac 1{64}$, which is what we set the time step to. The fact that we set the time step and mesh size individually in two different places is error prone: it is too easy to refine the mesh once more but forget to also adjust the time step. step-24 shows a better way how to keep these things in sync.

        +

        In the program, we will refine the square $[-1,1]^2$ seven times uniformly, giving a mesh size of $h=\frac 1{64}$, which is what we set the time step to. The fact that we set the time step and mesh size individually in two different places is error prone: it is too easy to refine the mesh once more but forget to also adjust the time step. step-24 shows a better way how to keep these things in sync.

        The test case

        -

        Although the program has all the hooks to deal with nonzero initial and boundary conditions and body forces, we take a simple case where the domain is a square $[-1,1]^2$ and

        -\begin{eqnarray*}
+<p>Although the program has all the hooks to deal with nonzero initial and boundary conditions and body forces, we take a simple case where the domain is a square <picture><source srcset=$[-1,1]^2$ and

        +\begin{eqnarray*}
         f &=& 0,
         \\
         u_0 &=& 0,
@@ -347,7 +347,7 @@
/usr/share/doc/packages/dealii/doxygen/deal.II/step_24.html differs (HTML document, UTF-8 Unicode text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_24.html	2024-01-30 03:04:51.520873651 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_24.html	2024-01-30 03:04:51.520873651 +0000
@@ -131,101 +131,101 @@
 <p><a class=

        The problem

        The temperature at a given location, neglecting thermal diffusion, can be stated as

        -\[
+<picture><source srcset=\[
 \rho C_p \frac{\partial}{\partial t}T(t,\mathbf r) = H(t,\mathbf r)
-\] +\]" src="form_3429.png"/>

        -

        Here $\rho (\mathbf r) $ is the density; $C_p (\mathbf r) $ is the specific heat; $\frac{\partial T}{\partial t}(t,\mathbf r)$ is the temperature rise due to the delivered microwave energy; and $H(t,\mathbf r)$ is the heating function defined as the thermal energy per time and volume transformed from deposited microwave energy.

        -

        Let us assume that tissues have heterogeneous dielectric properties but homogeneous acoustic properties. The basic acoustic generation equation in an acoustically homogeneous medium can be described as follows: if $u$ is the vector-valued displacement, then tissue certainly reacts to changes in pressure by acceleration:

        -\[
+<p>Here <picture><source srcset=$\rho (\mathbf r) $ is the density; $C_p (\mathbf r) $ is the specific heat; $\frac{\partial T}{\partial t}(t,\mathbf r)$ is the temperature rise due to the delivered microwave energy; and $H(t,\mathbf r)$ is the heating function defined as the thermal energy per time and volume transformed from deposited microwave energy.

        +

        Let us assume that tissues have heterogeneous dielectric properties but homogeneous acoustic properties. The basic acoustic generation equation in an acoustically homogeneous medium can be described as follows: if $u$ is the vector-valued displacement, then tissue certainly reacts to changes in pressure by acceleration:

        +\[
 \rho \frac{\partial^2}{\partial t^2}u(t,\mathbf r) =
 -\nabla p(t,\mathbf r).
-\] +\]" src="form_3434.png"/>

        Furthermore, it contracts due to excess pressure and expands based on changes in temperature:

        -\[
+<picture><source srcset=\[
 \nabla \cdot u(t,\mathbf r) = -\frac{p(t,\mathbf r)}{\rho c_0^2}+\beta T(t,\mathbf r) .
-\] +\]" src="form_3435.png"/>

        Here, $\beta$ is a thermoexpansion coefficient.

        -

        Let us now make the assumption that heating only happens on a time scale much shorter than wave propagation through tissue (i.e. the temporal length of the microwave pulse that heats the tissue is much shorter than the time it takes a wave to cross the domain). In that case, the heating rate $H(t,\mathbf r)$ can be written as $H(t,\mathbf r) = a(\mathbf
-r)\delta(t)$ (where $a(\mathbf r)$ is a map of absorption strengths for microwave energy and $\delta(t)$ is the Dirac delta function), which together with the first equation above will yield an instantaneous jump in the temperature $T(\mathbf r)$ at time $t=0$. Using this assumption, and taking all equations together, we can rewrite and combine the above as follows:

        -\[
+<p>Let us now make the assumption that heating only happens on a time scale much shorter than wave propagation through tissue (i.e. the temporal length of the microwave pulse that heats the tissue is much shorter than the time it takes a wave to cross the domain). In that case, the heating rate <picture><source srcset=$H(t,\mathbf r)$ can be written as $H(t,\mathbf r) = a(\mathbf
+r)\delta(t)$ (where $a(\mathbf r)$ is a map of absorption strengths for microwave energy and $\delta(t)$ is the Dirac delta function), which together with the first equation above will yield an instantaneous jump in the temperature $T(\mathbf r)$ at time $t=0$. Using this assumption, and taking all equations together, we can rewrite and combine the above as follows:

        +\[
 \Delta p-\frac{1}{c_0^2} \frac{\partial^2 p}{\partial t^2} = \lambda
 a(\mathbf r)\frac{d\delta(t)}{dt}
-\] +\]" src="form_3440.png"/>

        -

        where $\lambda = - \frac{\beta}{C_p}$.

        +

        where $\lambda = - \frac{\beta}{C_p}$.

        This somewhat strange equation with the derivative of a Dirac delta function on the right hand side can be rewritten as an initial value problem as follows:

        -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
 \Delta \bar{p}- \frac{1}{c_0^2} \frac{\partial^2 \bar{p}}{\partial t^2} & = &
 0 \\
 \bar{p}(0,\mathbf r) &=& c_0^2 \lambda a(\mathbf r) = b(\mathbf r)  \\
 \frac{\partial\bar{p}(0,\mathbf r)}{\partial t} &=& 0.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3442.png"/>

        (A derivation of this transformation into an initial value problem is given at the end of this introduction as an appendix.)

        -

        In the inverse problem, it is the initial condition $b(\mathbf r) = c_0^2 \lambda a(\mathbf r)$ that one would like to recover, since it is a map of absorption strengths for microwave energy, and therefore presumably an indicator to discern healthy from diseased tissue.

        +

        In the inverse problem, it is the initial condition $b(\mathbf r) = c_0^2 \lambda a(\mathbf r)$ that one would like to recover, since it is a map of absorption strengths for microwave energy, and therefore presumably an indicator to discern healthy from diseased tissue.

        In real application, the thermoacoustic source is very small as compared to the medium. The propagation path of the thermoacoustic waves can then be approximated as from the source to the infinity. Furthermore, detectors are only a limited distance from the source. One only needs to evaluate the values when the thermoacoustic waves pass through the detectors, although they do continue beyond. This is therefore a problem where we are only interested in a small part of an infinite medium, and we do not want waves generated somewhere to be reflected at the boundary of the domain which we consider interesting. Rather, we would like to simulate only that part of the wave field that is contained inside the domain of interest, and waves that hit the boundary of that domain to simply pass undisturbed through the boundary. In other words, we would like the boundary to absorb any waves that hit it.

        In general, this is a hard problem: Good absorbing boundary conditions are nonlinear and/or numerically very expensive. We therefore opt for a simple first order approximation to absorbing boundary conditions that reads

        -\[
+<picture><source srcset=\[
 \frac{\partial\bar{p}}{\partial\mathbf n} =
 -\frac{1}{c_0} \frac{\partial\bar{p}}{\partial t}
-\] +\]" src="form_3444.png"/>

        -

        Here, $\frac{\partial\bar{p}}{\partial\mathbf n}$ is the normal derivative at the boundary. It should be noted that this is not a particularly good boundary condition, but it is one of the very few that are reasonably simple to implement.

        +

        Here, $\frac{\partial\bar{p}}{\partial\mathbf n}$ is the normal derivative at the boundary. It should be noted that this is not a particularly good boundary condition, but it is one of the very few that are reasonably simple to implement.

        Weak form and discretization

        As in step-23, one first introduces a second variable, which is defined as the derivative of the pressure potential:

        -\[
+<picture><source srcset=\[
 v = \frac{\partial\bar{p}}{\partial t}
-\] +\]" src="form_3446.png"/>

        With the second variable, one then transforms the forward problem into two separate equations:

        -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
 \bar{p}_{t} - v & = & 0 \\
 \Delta\bar{p} - \frac{1}{c_0^2}\,v_{t} & = & f
-\end{eqnarray*} +\end{eqnarray*}" src="form_3447.png"/>

        with initial conditions:

        -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
 \bar{p}(0,\mathbf r) & = & b(r) \\
 v(0,\mathbf r)=\bar{p}_t(0,\mathbf r) & = & 0.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3448.png"/>

        -

        Note that we have introduced a right hand side $f(t,\mathbf r)$ here to show how to derive these formulas in the general case, although in the application to the thermoacoustic problem $f=0$.

        -

        The semi-discretized, weak version of this model, using the general $\theta$ scheme introduced in step-23 is then:

        -\begin{eqnarray*}
+<p> Note that we have introduced a right hand side <picture><source srcset=$f(t,\mathbf r)$ here to show how to derive these formulas in the general case, although in the application to the thermoacoustic problem $f=0$.

        +

        The semi-discretized, weak version of this model, using the general $\theta$ scheme introduced in step-23 is then:

        +\begin{eqnarray*}
 \left(\frac{\bar{p}^n-\bar{p}^{n-1}}{k},\phi\right)_\Omega-
 \left(\theta v^{n}+(1-\theta)v^{n-1},\phi\right)_\Omega & = & 0   \\
 -\left(\nabla((\theta\bar{p}^n+(1-\theta)\bar{p}^{n-1})),\nabla\phi\right)_\Omega-
 \frac{1}{c_0}\left(\frac{\bar{p}^n-\bar{p}^{n-1}}{k},\phi\right)_{\partial\Omega} -
 \frac{1}{c_0^2}\left(\frac{v^n-v^{n-1}}{k},\phi\right)_\Omega & =
 & \left(\theta f^{n}+(1-\theta)f^{n-1}, \phi\right)_\Omega,
-\end{eqnarray*} +\end{eqnarray*}" src="form_3450.png"/>

        -

        where $\phi$ is an arbitrary test function, and where we have used the absorbing boundary condition to integrate by parts: absorbing boundary conditions are incorporated into the weak form by using

        -\[
+<p> where <picture><source srcset=$\phi$ is an arbitrary test function, and where we have used the absorbing boundary condition to integrate by parts: absorbing boundary conditions are incorporated into the weak form by using

        +\[
 \int_\Omega\varphi \, \Delta p\; dx =
 -\int_\Omega\nabla \varphi \cdot \nabla p dx +
 \int_{\partial\Omega}\varphi \frac{\partial p}{\partial {\mathbf n}}ds.
-\] +\]" src="form_3451.png"/>

        From this we obtain the discrete model by introducing a finite number of shape functions, and get

        -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
 M\bar{p}^{n}-k \theta M v^n & = & M\bar{p}^{n-1}+k (1-\theta)Mv^{n-1},\\
 
 (-c_0^2k \theta A-c_0 B)\bar{p}^n-Mv^{n} & = &
 (c_0^2k(1-\theta)A-c_0B)\bar{p}^{n-1}-Mv^{n-1}+c_0^2k(\theta F^{n}+(1-\theta)F^{n-1}).
-\end{eqnarray*} +\end{eqnarray*}" src="form_3452.png"/>

        -

        The matrices $M$ and $A$ are here as in step-23, and the boundary mass matrix

        -\[
+<p> The matrices <picture><source srcset=$M$ and $A$ are here as in step-23, and the boundary mass matrix

        +\[
         B_{ij} = \left(\varphi_i,\varphi_j\right)_{\partial\Omega}
-\] +\]" src="form_3453.png"/>

        results from the use of absorbing boundary conditions.

        Above two equations can be rewritten in a matrix form with the pressure and its derivative as an unknown vector:

        -\[
+<picture><source srcset=\[
 \left(\begin{array}{cc}
  M         &       -k\theta M \\
 c_0^2\,k\,\theta\,A+c_0\,B  &  M   \\
@@ -238,10 +238,10 @@
  G_1  \\
  G_2 -(\theta F^{n}+(1-\theta)F ^{n-1})c_{0}^{2}k \\
                 \end{array}\right)
-\] +\]" src="form_3454.png"/>

        where

        -\[
+<picture><source srcset=\[
 \left(\begin{array}{c}
 G_1 \\
 G_2 \\
@@ -250,115 +250,115 @@
  M\bar{p}^{n-1}+k(1-\theta)Mv^{n-1}\\
  (-c_{0}^{2}k (1-\theta)A+c_0 B)\bar{p}^{n-1} +Mv^{n-1}
                 \end{array}\right)
-\] +\]" src="form_3455.png"/>

        By simple transformations, one then obtains two equations for the pressure potential and its derivative, just as in the previous tutorial program:

        -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
 (M+(k\,\theta\,c_{0})^{2}A+c_0k\theta B)\bar{p}^{n} & = &
 G_{1}+(k\, \theta)G_{2}-(c_0k)^2\theta (\theta F^{n}+(1-\theta)F^{n-1}) \\
 Mv^n & = & -(c_0^2\,k\, \theta\, A+c_0B)\bar{p}^{n}+ G_2 -
 c_0^2k(\theta F^{n}+(1-\theta)F^{n-1})
-\end{eqnarray*} +\end{eqnarray*}" src="form_3456.png"/>

        What the program does

        Compared to step-23, this programs adds the treatment of a simple absorbing boundary conditions. In addition, it deals with data obtained from actual experimental measurements. To this end, we need to evaluate the solution at points at which the experiment also evaluates a real pressure field. We will see how to do that using the VectorTools::point_value function further down below.

        Appendix: PDEs with Dirac delta functions as right hand side and their transformation to an initial value problem

        In the derivation of the initial value problem for the wave equation, we initially found that the equation had the derivative of a Dirac delta function as a right hand side:

        -\[
+<picture><source srcset=\[
 \Delta p-\frac{1}{c_0^2} \frac{\partial^2 p}{\partial t^2} = \lambda
 a(\mathbf r)\frac{d\delta(t)}{dt}.
-\] +\]" src="form_3457.png"/>

        -

        In order to see how to transform this single equation into the usual statement of a PDE with initial conditions, let us make the assumption that the physically quite reasonable medium is at rest initially, i.e. $p(t,\mathbf
-r)=\frac{\partial p(t,\mathbf r)}{\partial t}=0$ for $t<0$. Next, let us form the indefinite integral with respect to time of both sides:

        -\[
+<p> In order to see how to transform this single equation into the usual statement of a PDE with initial conditions, let us make the assumption that the physically quite reasonable medium is at rest initially, i.e. <picture><source srcset=$p(t,\mathbf
+r)=\frac{\partial p(t,\mathbf r)}{\partial t}=0$ for $t<0$. Next, let us form the indefinite integral with respect to time of both sides:

        +\[
 \int^t \Delta p\; dt -\int^t \frac{1}{c_0^2} \frac{\partial^2 p}{\partial t^2}
 \; dt
 =
 \int^t \lambda a(\mathbf r)\frac{d\delta(t)}{dt} \;dt.
-\] +\]" src="form_3460.png"/>

        This immediately leads to the statement

        -\[
+<picture><source srcset=\[
 P(t,\mathbf r) - \frac{1}{c_0^2} \frac{\partial p}{\partial t}
/usr/share/doc/packages/dealii/doxygen/deal.II/step_25.html differs (HTML document, UTF-8 Unicode text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_25.html	2024-01-30 03:04:51.572874084 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_25.html	2024-01-30 03:04:51.572874084 +0000
@@ -153,14 +153,14 @@
 \end{eqnarray*}

        Discretization of the equations in time

        -

        Now, we can discretize the split formulation in time using the $\theta$-method, which has a stencil of only two time steps. By choosing a $\theta\in [0,1]$, the latter discretization allows us to choose from a continuum of schemes. In particular, if we pick $\theta=0$ or $\theta=1$, we obtain the first-order accurate explicit or implicit Euler method, respectively. Another important choice is $\theta=\frac{1}{2}$, which gives the second-order accurate Crank-Nicolson scheme. Henceforth, a superscript $n$ denotes the values of the variables at the $n^{\mathrm{th}}$ time step, i.e. at $t=t_n \dealcoloneq n k$, where $k$ is the (fixed) time step size. Thus, the split formulation of the time-discretized sine-Gordon equation becomes

        +

        Now, we can discretize the split formulation in time using the $\theta$-method, which has a stencil of only two time steps. By choosing a $\theta\in [0,1]$, the latter discretization allows us to choose from a continuum of schemes. In particular, if we pick $\theta=0$ or $\theta=1$, we obtain the first-order accurate explicit or implicit Euler method, respectively. Another important choice is $\theta=\frac{1}{2}$, which gives the second-order accurate Crank-Nicolson scheme. Henceforth, a superscript $n$ denotes the values of the variables at the $n^{\mathrm{th}}$ time step, i.e. at $t=t_n \dealcoloneq n k$, where $k$ is the (fixed) time step size. Thus, the split formulation of the time-discretized sine-Gordon equation becomes

        \begin{eqnarray*}
   \frac{u^n - u^{n-1}}{k} - \left[\theta v^n + (1-\theta) v^{n-1}\right] &=& 0,\\
   \frac{v^n - v^{n-1}}{k} - \Delta\left[\theta u^n + (1-\theta) u^{n-1}\right]
   &=& -\sin\left[\theta u^n + (1-\theta) u^{n-1}\right].
 \end{eqnarray*}

        -

        We can simplify the latter via a bit of algebra. Eliminating $v^n$ from the first equation and rearranging, we obtain

        +

        We can simplify the latter via a bit of algebra. Eliminating $v^n$ from the first equation and rearranging, we obtain

        \begin{eqnarray*}
   \left[ 1-k^2\theta^2\Delta \right] u^n &=&
          \left[ 1+k^2\theta(1-\theta)\Delta\right] u^{n-1} + k v^{n-1}
@@ -169,8 +169,8 @@
          - k\sin\left[ \theta u^n + (1-\theta) u^{n-1} \right].
 \end{eqnarray*}

        -

        It may seem as though we can just proceed to discretize the equations in space at this point. While this is true for the second equation (which is linear in $v^n$), this would not work for all $\theta$ since the first equation above is nonlinear. Therefore, a nonlinear solver must be implemented, then the equations can be discretized in space and solved.

        -

        To this end, we can use Newton's method. Given the nonlinear equation $F(u^n) = 0$, we produce successive approximations to $u^n$ as follows:

        +

        It may seem as though we can just proceed to discretize the equations in space at this point. While this is true for the second equation (which is linear in $v^n$), this would not work for all $\theta$ since the first equation above is nonlinear. Therefore, a nonlinear solver must be implemented, then the equations can be discretized in space and solved.

        +

        To this end, we can use Newton's method. Given the nonlinear equation $F(u^n) = 0$, we produce successive approximations to $u^n$ as follows:

        \begin{eqnarray*}
   \mbox{ Find } \delta u^n_l \mbox{ s.t. } F'(u^n_l)\delta u^n_l = -F(u^n_l)
   \mbox{, set }  u^n_{l+1} = u^n_l + \delta u^n_l.
@@ -187,7 +187,7 @@
 </p>
 <p> Notice that while <picture><source srcset=$F(u^n_l)$ is a function, $F'(u^n_l)$ is an operator.

        Weak formulation of the time-discretized equations

        -

        With hindsight, we choose both the solution and the test space to be $H^1(\Omega)$. Hence, multiplying by a test function $\varphi$ and integrating, we obtain the following variational (or weak) formulation of the split formulation (including the nonlinear solver for the first equation) at each time step:

        +

        With hindsight, we choose both the solution and the test space to be $H^1(\Omega)$. Hence, multiplying by a test function $\varphi$ and integrating, we obtain the following variational (or weak) formulation of the split formulation (including the nonlinear solver for the first equation) at each time step:

        \begin{eqnarray*}
   &\mbox{ Find}& \delta u^n_l \in H^1(\Omega) \mbox{ s.t. }
   \left( F'(u^n_l)\delta u^n_l, \varphi \right)_{\Omega}
@@ -201,10 +201,10 @@
          \varphi \right)_{\Omega} \;\forall\varphi\in H^1(\Omega).
 \end{eqnarray*}

        -

        Note that the we have used integration by parts and the zero Neumann boundary conditions on all terms involving the Laplacian operator. Moreover, $F(\cdot)$ and $F'(\cdot)$ are as defined above, and $(\cdot,\cdot)_{\Omega}$ denotes the usual $L^2$ inner product over the domain $\Omega$, i.e. $(f,g)_{\Omega} = \int_\Omega fg
+<p> Note that the we have used integration by parts and the zero Neumann boundary conditions on all terms involving the Laplacian operator. Moreover, <picture><source srcset=$F(\cdot)$ and $F'(\cdot)$ are as defined above, and $(\cdot,\cdot)_{\Omega}$ denotes the usual $L^2$ inner product over the domain $\Omega$, i.e. $(f,g)_{\Omega} = \int_\Omega fg
 \,\mathrm{d}x$. Finally, notice that the first equation is, in fact, the definition of an iterative procedure, so it is solved multiple times during each time step until a stopping criterion is met.

        Discretization of the weak formulation in space

        -

        Using the Finite Element Method, we discretize the variational formulation in space. To this end, let $V_h$ be a finite-dimensional $H^1(\Omega)$-conforming finite element space ( $\mathrm{dim}\, V_h = N
+<p>Using the Finite Element Method, we discretize the variational formulation in space. To this end, let <picture><source srcset=$V_h$ be a finite-dimensional $H^1(\Omega)$-conforming finite element space ( $\mathrm{dim}\, V_h = N
 < \infty$) with nodal basis $\{\varphi_1,\ldots,\varphi_N\}$. Now, we can expand all functions in the weak formulation (see above) in terms of the nodal basis. Henceforth, we shall denote by a capital letter the vector of coefficients (in the nodal basis) of a function denoted by the same letter in lower case; e.g., $u^n = \sum_{i=1}^N
 U^n_i \varphi_i$ where $U^n \in {R}^N$ and $u^n \in
 H^1(\Omega)$. Thus, the finite-dimensional version of the variational formulation requires that we solve the following matrix equations at each time step:

        @@ -223,9 +223,9 @@ + k^2\theta^2N(u^n_l,u^{n-1}) \end{eqnarray*}" src="form_3524.png"/>

        -

        Again, note that the first matrix equation above is, in fact, the definition of an iterative procedure, so it is solved multiple times until a stopping criterion is met. Moreover, $M$ is the mass matrix, i.e. $M_{ij} = \left( \varphi_i,\varphi_j \right)_{\Omega}$, $A$ is the Laplace matrix, i.e. $A_{ij} = \left( \nabla \varphi_i, \nabla
-\varphi_j \right)_{\Omega}$, $S$ is the nonlinear term in the equation that defines our auxiliary velocity variable, i.e. $S_j(f,g) = \left(
-  \sin\left[ \theta f + (1-\theta) g\right], \varphi_j \right)_{\Omega}$, and $N$ is the nonlinear term in the Jacobian matrix of $F(\cdot)$, i.e. $N_{ij}(f,g) = \left( \cos\left[ \theta f + (1-\theta) g\right]\varphi_i,
+<p> Again, note that the first matrix equation above is, in fact, the definition of an iterative procedure, so it is solved multiple times until a stopping criterion is met. Moreover, <picture><source srcset=$M$ is the mass matrix, i.e. $M_{ij} = \left( \varphi_i,\varphi_j \right)_{\Omega}$, $A$ is the Laplace matrix, i.e. $A_{ij} = \left( \nabla \varphi_i, \nabla
+\varphi_j \right)_{\Omega}$, $S$ is the nonlinear term in the equation that defines our auxiliary velocity variable, i.e. $S_j(f,g) = \left(
+  \sin\left[ \theta f + (1-\theta) g\right], \varphi_j \right)_{\Omega}$, and $N$ is the nonlinear term in the Jacobian matrix of $F(\cdot)$, i.e. $N_{ij}(f,g) = \left( \cos\left[ \theta f + (1-\theta) g\right]\varphi_i,
   \varphi_j \right)_{\Omega}$.

        What solvers can we use for the first equation? Let's look at the matrix we have to invert:

        \[
@@ -235,12 +235,12 @@
   + k^2 \theta^2 \int_\Omega \nabla\varphi_i\nabla\varphi_j \; dx,
 \]

        -

        for some $\alpha$ that depends on the present and previous solution. First, note that the matrix is symmetric. In addition, if the time step $k$ is small enough, i.e. if $k\theta<1$, then the matrix is also going to be positive definite. In the program below, this will always be the case, so we will use the Conjugate Gradient method together with the SSOR method as preconditioner. We should keep in mind, however, that this will fail if we happen to use a bigger time step. Fortunately, in that case the solver will just throw an exception indicating a failure to converge, rather than silently producing a wrong result. If that happens, then we can simply replace the CG method by something that can handle indefinite symmetric systems. The GMRES solver is typically the standard method for all "bad" linear systems, but it is also a slow one. Possibly better would be a solver that utilizes the symmetry, such as, for example, SymmLQ, which is also implemented in deal.II.

        -

        This program uses a clever optimization over step-23 and step-24: If you read the above formulas closely, it becomes clear that the velocity $V$ only ever appears in products with the mass matrix. In step-23 and step-24, we were, therefore, a bit wasteful: in each time step, we would solve a linear system with the mass matrix, only to multiply the solution of that system by $M$ again in the next time step. This can, of course, be avoided, and we do so in this program.

        +

        for some $\alpha$ that depends on the present and previous solution. First, note that the matrix is symmetric. In addition, if the time step $k$ is small enough, i.e. if $k\theta<1$, then the matrix is also going to be positive definite. In the program below, this will always be the case, so we will use the Conjugate Gradient method together with the SSOR method as preconditioner. We should keep in mind, however, that this will fail if we happen to use a bigger time step. Fortunately, in that case the solver will just throw an exception indicating a failure to converge, rather than silently producing a wrong result. If that happens, then we can simply replace the CG method by something that can handle indefinite symmetric systems. The GMRES solver is typically the standard method for all "bad" linear systems, but it is also a slow one. Possibly better would be a solver that utilizes the symmetry, such as, for example, SymmLQ, which is also implemented in deal.II.

        +

        This program uses a clever optimization over step-23 and step-24: If you read the above formulas closely, it becomes clear that the velocity $V$ only ever appears in products with the mass matrix. In step-23 and step-24, we were, therefore, a bit wasteful: in each time step, we would solve a linear system with the mass matrix, only to multiply the solution of that system by $M$ again in the next time step. This can, of course, be avoided, and we do so in this program.

        The test case

        There are a few analytical solutions for the sine-Gordon equation, both in 1D and 2D. In particular, the program as is computes the solution to a problem with a single kink-like solitary wave initial condition. This solution is given by Leibbrandt in Phys. Rev. Lett. 41(7), and is implemented in the ExactSolution class.

        It should be noted that this closed-form solution, strictly speaking, only holds for the infinite-space initial-value problem (not the Neumann initial-boundary-value problem under consideration here). However, given that we impose zero Neumann boundary conditions, we expect that the solution to our initial-boundary-value problem would be close to the solution of the infinite-space initial-value problem, if reflections of waves off the boundaries of our domain do not occur. In practice, this is of course not the case, but we can at least assume that this were so.

        -

        The constants $\vartheta$ and $\lambda$ in the 2D solution and $\vartheta$, $\phi$ and $\tau$ in the 3D solution are called the Bäcklund transformation parameters. They control such things as the orientation and steepness of the kink. For the purposes of testing the code against the exact solution, one should choose the parameters so that the kink is aligned with the grid.

        +

        The constants $\vartheta$ and $\lambda$ in the 2D solution and $\vartheta$, $\phi$ and $\tau$ in the 3D solution are called the Bäcklund transformation parameters. They control such things as the orientation and steepness of the kink. For the purposes of testing the code against the exact solution, one should choose the parameters so that the kink is aligned with the grid.

        The solutions that we implement in the ExactSolution class are these:

        • In 1D:

          @@ -263,7 +263,7 @@ u(x,y,t) = 4 \arctan \left[a_0 e^{s\xi}\right], \]" src="form_3534.png"/>

          -

          where $\xi$ is defined as

          +

          where $\xi$ is defined as

          \[
     \xi = x \cos\vartheta + \sin(\vartheta) (y\cosh\lambda + t\sinh \lambda),
   \] @@ -277,7 +277,7 @@ u(x,y,z,t) = 4 \arctan \left[c_0 e^{s\xi}\right], \]" src="form_3537.png"/>

          - where $\xi$ is defined as

          + where $\xi$ is defined as

          \[
     \xi = x \cos\vartheta + y \sin \vartheta \cos\phi +
           \sin \vartheta \sin\phi (z\cosh\tau + t\sinh \tau),
@@ -329,7 +329,7 @@
 <p>The entire algorithm for solving the problem is encapsulated in this class. As in previous example programs, the class is declared with a template parameter, which is the spatial dimension, so that we can solve the sine-Gordon equation in one, two or three spatial dimensions. For more on the dimension-independent class-encapsulation of the problem, the reader should consult <a class=step-3 and step-4.

          Compared to step-23 and step-24, there isn't anything newsworthy in the general structure of the program (though there is of course in the inner workings of the various functions!). The most notable difference is the presence of the two new functions compute_nl_term and compute_nl_matrix that compute the nonlinear contributions to the system matrix and right-hand side of the first equation, as discussed in the Introduction. In addition, we have to have a vector solution_update that contains the nonlinear update to the solution vector in each Newton step.

          As also mentioned in the introduction, we do not store the velocity variable in this program, but the mass matrix times the velocity. This is done in the M_x_velocity variable (the "x" is intended to stand for "times").

          -

          Finally, the output_timestep_skip variable stores the number of time steps to be taken each time before graphical output is to be generated. This is of importance when using fine meshes (and consequently small time steps) where we would run lots of time steps and create lots of output files of solutions that look almost the same in subsequent files. This only clogs up our visualization procedures and we should avoid creating more output than we are really interested in. Therefore, if this variable is set to a value $n$ bigger than one, output is generated only every $n$th time step.

          +

          Finally, the output_timestep_skip variable stores the number of time steps to be taken each time before graphical output is to be generated. This is of importance when using fine meshes (and consequently small time steps) where we would run lots of time steps and create lots of output files of solutions that look almost the same in subsequent files. This only clogs up our visualization procedures and we should avoid creating more output than we are really interested in. Therefore, if this variable is set to a value $n$ bigger than one, output is generated only every $n$th time step.

            template <int dim>
            class SineGordonProblem
            {
          @@ -473,8 +473,8 @@

          Implementation of the SineGordonProblem class

          Let's move on to the implementation of the main class, as it implements the algorithm outlined in the introduction.

          SineGordonProblem::SineGordonProblem

          -

          This is the constructor of the SineGordonProblem class. It specifies the desired polynomial degree of the finite elements, associates a DoFHandler to the triangulation object (just as in the example programs step-3 and step-4), initializes the current or initial time, the final time, the time step size, and the value of $\theta$ for the time stepping scheme. Since the solutions we compute here are time-periodic, the actual value of the start-time doesn't matter, and we choose it so that we start at an interesting time.

          -

          Note that if we were to chose the explicit Euler time stepping scheme ( $\theta = 0$), then we must pick a time step $k \le h$, otherwise the scheme is not stable and oscillations might arise in the solution. The Crank-Nicolson scheme ( $\theta = \frac{1}{2}$) and the implicit Euler scheme ( $\theta=1$) do not suffer from this deficiency, since they are unconditionally stable. However, even then the time step should be chosen to be on the order of $h$ in order to obtain a good solution. Since we know that our mesh results from the uniform subdivision of a rectangle, we can compute that time step easily; if we had a different domain, the technique in step-24 using GridTools::minimal_cell_diameter would work as well.

          +

          This is the constructor of the SineGordonProblem class. It specifies the desired polynomial degree of the finite elements, associates a DoFHandler to the triangulation object (just as in the example programs step-3 and step-4), initializes the current or initial time, the final time, the time step size, and the value of $\theta$ for the time stepping scheme. Since the solutions we compute here are time-periodic, the actual value of the start-time doesn't matter, and we choose it so that we start at an interesting time.

          +

          Note that if we were to chose the explicit Euler time stepping scheme ( $\theta = 0$), then we must pick a time step $k \le h$, otherwise the scheme is not stable and oscillations might arise in the solution. The Crank-Nicolson scheme ( $\theta = \frac{1}{2}$) and the implicit Euler scheme ( $\theta=1$) do not suffer from this deficiency, since they are unconditionally stable. However, even then the time step should be chosen to be on the order of $h$ in order to obtain a good solution. Since we know that our mesh results from the uniform subdivision of a rectangle, we can compute that time step easily; if we had a different domain, the technique in step-24 using GridTools::minimal_cell_diameter would work as well.

            template <int dim>
            SineGordonProblem<dim>::SineGordonProblem()
            : fe(1)
          @@ -489,7 +489,7 @@
           
          STL namespace.

          SineGordonProblem::make_grid_and_dofs

          -

          This function creates a rectangular grid in dim dimensions and refines it several times. Also, all matrix and vector members of the SineGordonProblem class are initialized to their appropriate sizes once the degrees of freedom have been assembled. Like step-24, we use MatrixCreator functions to generate a mass matrix $M$ and a Laplace matrix $A$ and store them in the appropriate variables for the remainder of the program's life.

          +

          This function creates a rectangular grid in dim dimensions and refines it several times. Also, all matrix and vector members of the SineGordonProblem class are initialized to their appropriate sizes once the degrees of freedom have been assembled. Like step-24, we use MatrixCreator functions to generate a mass matrix $M$ and a Laplace matrix $A$ and store them in the appropriate variables for the remainder of the program's life.

            template <int dim>
            void SineGordonProblem<dim>::make_grid_and_dofs()
            {
          @@ -765,7 +765,7 @@
            << "advancing to t = " << time << '.' << std::endl;
           

          At the beginning of each time step we must solve the nonlinear equation in the split formulation via Newton's method — i.e. solve for $\delta U^{n,l}$ then compute $U^{n,l+1}$ and so on. The stopping criterion for this nonlinear iteration is that $\|F_h(U^{n,l})\|_2 \le 10^{-6} \|F_h(U^{n,0})\|_2$. Consequently, we need to record the norm of the residual in the first iteration.

          -

          At the end of each iteration, we output to the console how many linear solver iterations it took us. When the loop below is done, we have (an approximation of) $U^n$.

          +

          At the end of each iteration, we output to the console how many linear solver iterations it took us. When the loop below is done, we have (an approximation of) $U^n$.

            double initial_rhs_norm = 0.;
            bool first_iteration = true;
            do
          @@ -789,7 +789,7 @@
           
            std::cout << " CG iterations per nonlinear step." << std::endl;
           
          -

          Upon obtaining the solution to the first equation of the problem at $t=t_n$, we must update the auxiliary velocity variable $V^n$. However, we do not compute and store $V^n$ since it is not a quantity we use directly in the problem. Hence, for simplicity, we update $MV^n$ directly:

          +

      Upon obtaining the solution to the first equation of the problem at $t=t_n$, we must update the auxiliary velocity variable $V^n$. However, we do not compute and store $V^n$ since it is not a quantity we use directly in the problem. Hence, for simplicity, we update $MV^n$ directly:

        Vector<double> tmp_vector(solution.size());
        laplace_matrix.vmult(tmp_vector, solution);
        M_x_velocity.add(-time_step * theta, tmp_vector);
      @@ -848,7 +848,7 @@
        return 0;
        }

      Results

      -

      The explicit Euler time stepping scheme ( $\theta=0$) performs adequately for the problems we wish to solve. Unfortunately, a rather small time step has to be chosen due to stability issues — $k\sim h/10$ appears to work for most the simulations we performed. On the other hand, the Crank-Nicolson scheme ( $\theta=\frac{1}{2}$) is unconditionally stable, and (at least for the case of the 1D breather) we can pick the time step to be as large as $25h$ without any ill effects on the solution. The implicit Euler scheme ( $\theta=1$) is "exponentially damped," so it is not a good choice for solving the sine-Gordon equation, which is conservative. However, some of the damped schemes in the continuum that is offered by the $\theta$-method were useful for eliminating spurious oscillations due to boundary effects.

      +

      The explicit Euler time stepping scheme ( $\theta=0$) performs adequately for the problems we wish to solve. Unfortunately, a rather small time step has to be chosen due to stability issues — $k\sim h/10$ appears to work for most the simulations we performed. On the other hand, the Crank-Nicolson scheme ( $\theta=\frac{1}{2}$) is unconditionally stable, and (at least for the case of the 1D breather) we can pick the time step to be as large as $25h$ without any ill effects on the solution. The implicit Euler scheme ( $\theta=1$) is "exponentially damped," so it is not a good choice for solving the sine-Gordon equation, which is conservative. However, some of the damped schemes in the continuum that is offered by the $\theta$-method were useful for eliminating spurious oscillations due to boundary effects.

      In the simulations below, we solve the sine-Gordon equation on the interval $\Omega =
 [-10,10]$ in 1D and on the square $\Omega = [-10,10]\times [-10,10]$ in 2D. In each case, the respective grid is refined uniformly 6 times, i.e. $h\sim
 2^{-6}$.

      @@ -858,7 +858,7 @@ u_{\mathrm{breather}}(x,t) = -4\arctan \left(\frac{m}{\sqrt{1-m^2}} \frac{\sin\left(\sqrt{1-m^2}t +c_2\right)}{\cosh(mx+c_1)} \right), \]" src="form_3569.png"/>

      -

      where $c_1$, $c_2$ and $m<1$ are constants. In the simulation below, we have chosen $c_1=0$, $c_2=0$, $m=0.5$. Moreover, it is know that the period of oscillation of the breather is $2\pi\sqrt{1-m^2}$, hence we have chosen $t_0=-5.4414$ and $t_f=2.7207$ so that we can observe three oscillations of the solution. Then, taking $u_0(x) = u_{\mathrm{breather}}(x,t_0)$, $\theta=0$ and $k=h/10$, the program computed the following solution.

      +

      where $c_1$, $c_2$ and $m<1$ are constants. In the simulation below, we have chosen $c_1=0$, $c_2=0$, $m=0.5$. Moreover, it is know that the period of oscillation of the breather is $2\pi\sqrt{1-m^2}$, hence we have chosen $t_0=-5.4414$ and $t_f=2.7207$ so that we can observe three oscillations of the solution. Then, taking $u_0(x) = u_{\mathrm{breather}}(x,t_0)$, $\theta=0$ and $k=h/10$, the program computed the following solution.

      Animation of the 1D stationary breather.

      Though not shown how to do this in the program, another way to visualize the (1+1)-d solution is to use output generated by the DataOutStack class; it allows to "stack" the solutions of individual time steps, so that we get 2D space-time graphs from 1D time-dependent solutions. This produces the space-time plot below instead of the animation above.

      A space-time plot of the 1D stationary breather.

      @@ -874,11 +874,11 @@ \xi = x \cos\vartheta + \sin(\vartheta) (y\cosh\lambda + t\sinh \lambda) \]" src="form_3581.png"/>

      -

      where $a_0$, $\vartheta$ and $\lambda$ are constants. In the simulation below we have chosen $a_0=\lambda=1$. Notice that if $\vartheta=\pi$ the kink is stationary, hence it would make a good solution against which we can validate the program in 2D because no reflections off the boundary of the domain occur.

      +

      where $a_0$, $\vartheta$ and $\lambda$ are constants. In the simulation below we have chosen $a_0=\lambda=1$. Notice that if $\vartheta=\pi$ the kink is stationary, hence it would make a good solution against which we can validate the program in 2D because no reflections off the boundary of the domain occur.

      The simulation shown below was performed with $u_0(x) = u_{\mathrm{kink}}(x,t_0)$, $\theta=\frac{1}{2}$, $k=20h$, $t_0=1$ and $t_f=500$. The $L^2$ norm of the error of the finite element solution at each time step remained on the order of $10^{-2}$, showing that the program is working correctly in 2D, as well as 1D. Unfortunately, the solution is not very interesting, nonetheless we have included a snapshot of it below for completeness.

      Stationary 2D kink.

      Now that we have validated the code in 1D and 2D, we move to a problem where the analytical solution is unknown.

      -

      To this end, we rotate the kink solution discussed above about the $z$ axis: we let $\vartheta=\frac{\pi}{4}$. The latter results in a solitary wave that is not aligned with the grid, so reflections occur at the boundaries of the domain immediately. For the simulation shown below, we have taken $u_0(x)=u_{\mathrm{kink}}(x,t_0)$, $\theta=\frac{2}{3}$, $k=20h$, $t_0=0$ and $t_f=20$. Moreover, we had to pick $\theta=\frac{2}{3}$ because for any $\theta\le\frac{1}{2}$ oscillations arose at the boundary, which are likely due to the scheme and not the equation, thus picking a value of $\theta$ a good bit into the "exponentially damped" spectrum of the time stepping schemes assures these oscillations are not created.

      +

      To this end, we rotate the kink solution discussed above about the $z$ axis: we let $\vartheta=\frac{\pi}{4}$. The latter results in a solitary wave that is not aligned with the grid, so reflections occur at the boundaries of the domain immediately. For the simulation shown below, we have taken $u_0(x)=u_{\mathrm{kink}}(x,t_0)$, $\theta=\frac{2}{3}$, $k=20h$, $t_0=0$ and $t_f=20$. Moreover, we had to pick $\theta=\frac{2}{3}$ because for any $\theta\le\frac{1}{2}$ oscillations arose at the boundary, which are likely due to the scheme and not the equation, thus picking a value of $\theta$ a good bit into the "exponentially damped" spectrum of the time stepping schemes assures these oscillations are not created.

      Animation of a moving 2D kink, at 45 degrees to the axes of the grid, showing boundary effects.

      Another interesting solution to the sine-Gordon equation (which cannot be obtained analytically) can be produced by using two 1D breathers to construct the following separable 2D initial condition:

      \[
/usr/share/doc/packages/dealii/doxygen/deal.II/step_26.html differs (HTML document, UTF-8 Unicode text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_26.html	2024-01-30 03:04:51.624874517 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_26.html	2024-01-30 03:04:51.624874517 +0000
@@ -149,7 +149,7 @@
 \end{align*}

      In some sense, this equation is simpler than the ones we have discussed in the preceding programs step-23, step-24, step-25, namely the wave equation. This is due to the fact that the heat equation smoothes out the solution over time, and is consequently more forgiving in many regards. For example, when using implicit time stepping methods, we can actually take large time steps, we have less trouble with the small disturbances we introduce through adapting the mesh every few time steps, etc.

      -

      Our goal here will be to solve the equations above using the theta-scheme that discretizes the equation in time using the following approach, where we would like $u^n(\mathbf x)$ to approximate $u(\mathbf x, t_n)$ at some time $t_n$:

      +

      Our goal here will be to solve the equations above using the theta-scheme that discretizes the equation in time using the following approach, where we would like $u^n(\mathbf x)$ to approximate $u(\mathbf x, t_n)$ at some time $t_n$:

      \begin{align*}
   \frac{u^n(\mathbf x)-u^{n-1}(\mathbf x)}{k_n}
   -
@@ -166,8 +166,8 @@
   \right].
 \end{align*}

      -

      Here, $k_n=t_n-t_{n-1}$ is the time step size. The theta-scheme generalizes the explicit Euler ( $\theta=0$), implicit Euler ( $\theta=1$) and Crank-Nicolson ( $\theta=\frac 12$) time discretizations. Since the latter has the highest convergence order, we will choose $\theta=\frac 12$ in the program below, but make it so that playing with this parameter remains simple. (If you are interested in playing with higher order methods, take a look at step-52.)

      -

      Given this time discretization, space discretization happens as it always does, by multiplying with test functions, integrating by parts, and then restricting everything to a finite dimensional subspace. This yields the following set of fully discrete equations after multiplying through with $k_n$:

      +

      Here, $k_n=t_n-t_{n-1}$ is the time step size. The theta-scheme generalizes the explicit Euler ( $\theta=0$), implicit Euler ( $\theta=1$) and Crank-Nicolson ( $\theta=\frac 12$) time discretizations. Since the latter has the highest convergence order, we will choose $\theta=\frac 12$ in the program below, but make it so that playing with this parameter remains simple. (If you are interested in playing with higher order methods, take a look at step-52.)

      +

      Given this time discretization, space discretization happens as it always does, by multiplying with test functions, integrating by parts, and then restricting everything to a finite dimensional subspace. This yields the following set of fully discrete equations after multiplying through with $k_n$:

      \begin{align*}
   M U^n-MU^{n-1}
   +
@@ -185,7 +185,7 @@
   \right],
 \end{align*}

      -

      where $M$ is the mass matrix and $A$ is the stiffness matrix that results from discretizing the Laplacian. Bringing all known quantities to the right hand side yields the linear system we have to solve in every step:

      +

      where $M$ is the mass matrix and $A$ is the stiffness matrix that results from discretizing the Laplacian. Bringing all known quantities to the right hand side yields the linear system we have to solve in every step:

      \begin{align*}
   (M
   +
@@ -211,7 +211,7 @@
 <ul>
 <li>
 <p class=Time step size and minimal mesh size: For stationary problems, the general approach is "make the mesh as fine as it is necessary". For problems with singularities, this often leads to situations where we get many levels of refinement into corners or along interfaces. The very first tutorial to use adaptive meshes, step-6, is a point in case already.

      -

      However, for time dependent problems, we typically need to choose the time step related to the mesh size. For explicit time discretizations, this is obvious, since we need to respect a CFL condition that ties the time step size to the smallest mesh size. For implicit time discretizations, no such hard restriction exists, but in practice we still want to make the time step smaller if we make the mesh size smaller since we typically have error estimates of the form $\|e\| \le {\cal O}(k^p + h^q)$ where $p,q$ are the convergence orders of the time and space discretization, respectively. We can only make the error small if we decrease both terms. Ideally, an estimate like this would suggest to choose $k \propto h^{q/p}$. Because, at least for problems with non-smooth solutions, the error is typically localized in the cells with the smallest mesh size, we have to indeed choose $k \propto h_{\text{min}}^{q/p}$, using the smallest mesh size.

      +

      However, for time dependent problems, we typically need to choose the time step related to the mesh size. For explicit time discretizations, this is obvious, since we need to respect a CFL condition that ties the time step size to the smallest mesh size. For implicit time discretizations, no such hard restriction exists, but in practice we still want to make the time step smaller if we make the mesh size smaller since we typically have error estimates of the form $\|e\| \le {\cal O}(k^p + h^q)$ where $p,q$ are the convergence orders of the time and space discretization, respectively. We can only make the error small if we decrease both terms. Ideally, an estimate like this would suggest to choose $k \propto h^{q/p}$. Because, at least for problems with non-smooth solutions, the error is typically localized in the cells with the smallest mesh size, we have to indeed choose $k \propto h_{\text{min}}^{q/p}$, using the smallest mesh size.

      The consequence is that refining the mesh further in one place implies not only the moderate additional effort of increasing the number of degrees of freedom slightly, but also the much larger effort of having the solve the global linear system more often because of the smaller time step.

      In practice, one typically deals with this by acknowledging that we can not make the time step arbitrarily small, and consequently can not make the local mesh size arbitrarily small. Rather, we set a maximal level of refinement and when we flag cells for refinement, we simply do not refine those cells whose children would exceed this maximal level of refinement.

      There is a similar problem in that we will choose a right hand side that will switch on in different parts of the domain at different times. To avoid being caught flat footed with too coarse a mesh in areas where we suddenly need a finer mesh, we will also enforce in our program a minimal mesh refinement level.

      @@ -242,7 +242,7 @@ \sum_j U^n \varphi_j(\mathbf x), \end{align*}" src="form_3618.png"/>

      -

      multiply with test functions $\varphi_i(\mathbf x)$ and integrate by parts where necessary. In a process as outlined above, this would yield

      +

      multiply with test functions $\varphi_i(\mathbf x)$ and integrate by parts where necessary. In a process as outlined above, this would yield

      \begin{align*}
     \sum_j
     (M
@@ -262,7 +262,7 @@
     \right].
   \end{align*}

      -

      Now imagine that we have changed the mesh between time steps $n-1$ and $n$. Then the problem is that the basis functions we use for $u_h^n$ and $u^{n-1}$ are different! This pertains to the terms on the right hand side, the first of which we could more clearly write as (the second follows the same pattern)

      +

      Now imagine that we have changed the mesh between time steps $n-1$ and $n$. Then the problem is that the basis functions we use for $u_h^n$ and $u^{n-1}$ are different! This pertains to the terms on the right hand side, the first of which we could more clearly write as (the second follows the same pattern)

      \begin{align*}
     (\varphi_i, u_h^{n-1})
     =
@@ -274,7 +274,7 @@
     i=1\ldots N_n.
   \end{align*}

      -

      If the meshes used in these two time steps are the same, then $(\varphi_i^n, \varphi_j^{n-1})$ forms a square mass matrix $M_{ij}$. However, if the meshes are not the same, then in general the matrix is rectangular. Worse, it is difficult to even compute these integrals because if we loop over the cells of the mesh at time step $n$, then we need to evaluate $\varphi_j^{n-1}$ at the quadrature points of these cells, but they do not necessarily correspond to the cells of the mesh at time step $n-1$ and $\varphi_j^{n-1}$ is not defined via these cells; the same of course applies if we wanted to compute the integrals via integration on the cells of mesh $n-1$.

      +

      If the meshes used in these two time steps are the same, then $(\varphi_i^n, \varphi_j^{n-1})$ forms a square mass matrix $M_{ij}$. However, if the meshes are not the same, then in general the matrix is rectangular. Worse, it is difficult to even compute these integrals because if we loop over the cells of the mesh at time step $n$, then we need to evaluate $\varphi_j^{n-1}$ at the quadrature points of these cells, but they do not necessarily correspond to the cells of the mesh at time step $n-1$ and $\varphi_j^{n-1}$ is not defined via these cells; the same of course applies if we wanted to compute the integrals via integration on the cells of mesh $n-1$.

      In any case, what we have to face is a situation where we need to integrate shape functions defined on two different meshes. This can be done, and is in fact demonstrated in step-28, but the process is at best described by the word "awkward".

      In practice, one does not typically want to do this. Rather, we avoid the whole situation by interpolating the solution from the old to the new mesh every time we adapt the mesh. In other words, rather than solving the equations above, we instead solve the problem

      \begin{align*}
@@ -296,14 +296,14 @@
     \right],
   \end{align*}

      -

      where $I_h^n$ is the interpolation operator onto the finite element space used in time step $n$. This is not the optimal approach since it introduces an additional error besides time and space discretization, but it is a pragmatic one that makes it feasible to do time adapting meshes.

      +

      where $I_h^n$ is the interpolation operator onto the finite element space used in time step $n$. This is not the optimal approach since it introduces an additional error besides time and space discretization, but it is a pragmatic one that makes it feasible to do time adapting meshes.

    7. What could possibly go wrong? Verifying whether the code is correct

      There are a number of things one can typically get wrong when implementing a finite element code. In particular, for time dependent problems, the following are common sources of bugs:

      A less common problem is getting the initial conditions wrong because one can typically see that it is wrong by just outputting the first time step. In any case, in order to verify the correctness of the code, it is helpful to have a testing protocol that allows us to verify each of these components separately. This means:

      @@ -314,7 +314,7 @@

      deal.II defines a number of integral types via alias in namespace types. (In the previous sentence, the word "integral" is used as the adjective that corresponds to the noun "integer". It shouldn't be confused with the noun "integral" that represents the area or volume under a curve or surface. The adjective "integral" is widely used in the C++ world in contexts such as "integral type", "integral constant", etc.) In particular, in this program you will see types::global_dof_index in a couple of places: an integer type that is used to denote the global index of a degree of freedom, i.e., the index of a particular degree of freedom within the DoFHandler object that is defined on top of a triangulation (as opposed to the index of a particular degree of freedom within a particular cell). For the current program (as well as almost all of the tutorial programs), you will have a few thousand to maybe a few million unknowns globally (and, for $Q_1$ elements, you will have 4 locally on each cell in 2d and 8 in 3d). Consequently, a data type that allows to store sufficiently large numbers for global DoF indices is unsigned int given that it allows to store numbers between 0 and slightly more than 4 billion (on most systems, where integers are 32-bit). In fact, this is what types::global_dof_index is.

      So, why not just use unsigned int right away? deal.II used to do this until version 7.3. However, deal.II supports very large computations (via the framework discussed in step-40) that may have more than 4 billion unknowns when spread across a few thousand processors. Consequently, there are situations where unsigned int is not sufficiently large and we need a 64-bit unsigned integral type. To make this possible, we introduced types::global_dof_index which by default is defined as simply unsigned int whereas it is possible to define it as unsigned long long int if necessary, by passing a particular flag during configuration (see the ReadMe file).

      This covers the technical aspect. But there is also a documentation purpose: everywhere in the library and codes that are built on it, if you see a place using the data type types::global_dof_index, you immediately know that the quantity that is being referenced is, in fact, a global dof index. No such meaning would be apparent if we had just used unsigned int (which may also be a local index, a boundary indicator, a material id, etc.). Immediately knowing what a variable refers to also helps avoid errors: it's quite clear that there must be a bug if you see an object of type types::global_dof_index being assigned to variable of type types::subdomain_id, even though they are both represented by unsigned integers and the compiler will, consequently, not complain.

      -

      In more practical terms what the presence of this type means is that during assembly, we create a $4\times 4$ matrix (in 2d, using a $Q_1$ element) of the contributions of the cell we are currently sitting on, and then we need to add the elements of this matrix to the appropriate elements of the global (system) matrix. For this, we need to get at the global indices of the degrees of freedom that are local to the current cell, for which we will always use the following piece of the code:

      cell->get_dof_indices (local_dof_indices);
      +

      In more practical terms what the presence of this type means is that during assembly, we create a $4\times 4$ matrix (in 2d, using a $Q_1$ element) of the contributions of the cell we are currently sitting on, and then we need to add the elements of this matrix to the appropriate elements of the global (system) matrix. For this, we need to get at the global indices of the degrees of freedom that are local to the current cell, for which we will always use the following piece of the code:

      cell->get_dof_indices (local_dof_indices);

      where local_dof_indices is declared as

      std::vector<types::global_dof_index> local_dof_indices (fe.n_dofs_per_cell());

      The name of this variable might be a bit of a misnomer – it stands for "the global indices of those degrees of freedom locally defined on the current /usr/share/doc/packages/dealii/doxygen/deal.II/step_30.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_30.html 2024-01-30 03:04:51.944877183 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_30.html 2024-01-30 03:04:51.944877183 +0000 @@ -218,20 +218,20 @@

      Motivation

      Adaptive local refinement is used to obtain fine meshes which are well adapted to solving the problem at hand efficiently. In short, the size of cells which produce a large error is reduced to obtain a better approximation of the solution to the problem at hand. However, a lot of problems contain anisotropic features. Prominent examples are shocks or boundary layers in compressible viscous flows. An efficient mesh approximates these features with cells of higher aspect ratio which are oriented according to the mentioned features. Using only isotropic refinement, the aspect ratios of the original mesh cells are preserved, as they are inherited by the children of a cell. Thus, starting from an isotropic mesh, a boundary layer will be refined in order to catch the rapid variation of the flow field in the wall normal direction, thus leading to cells with very small edge lengths both in normal and tangential direction. Usually, much higher edge lengths in tangential direction and thus significantly less cells could be used without a significant loss in approximation accuracy. An anisotropic refinement process can modify the aspect ratio from mother to child cells by a factor of two for each refinement step. In the course of several refinements, the aspect ratio of the fine cells can be optimized, saving a considerable number of cells and correspondingly degrees of freedom and thus computational resources, memory as well as CPU time.

      Implementation

      -

      Most of the time, when we do finite element computations, we only consider one cell at a time, for example to calculate cell contributions to the global matrix, or to interpolate boundary values. However, sometimes we have to look at how cells are related in our algorithms. Relationships between cells come in two forms: neighborship and mother-child relationship. For the case of isotropic refinement, deal.II uses certain conventions (invariants) for cell relationships that are always maintained. For example, a refined cell always has exactly $2^{dim}$ children. And (except for the 1d case), two neighboring cells may differ by at most one refinement level: they are equally often refined or one of them is exactly once more refined, leaving exactly one hanging node on the common face. Almost all of the time these invariants are only of concern in the internal implementation of the library. However, there are cases where knowledge of them is also relevant to an application program.

      -

      In the current context, it is worth noting that the kind of mesh refinement affects some of the most fundamental assumptions. Consequently, some of the usual code found in application programs will need modifications to exploit the features of meshes which were created using anisotropic refinement. For those interested in how deal.II evolved, it may be of interest that the loosening of such invariants required some incompatible changes. For example, the library used to have a member GeometryInfo<dim>::children_per_cell that specified how many children a cell has once it is refined. For isotropic refinement, this number is equal to $2^{dim}$, as mentioned above. However, for anisotropic refinement, this number does not exist, as is can be either two or four in 2D and two, four or eight in 3D, and the member GeometryInfo<dim>::children_per_cell has consequently been removed. It has now been replaced by GeometryInfo<dim>::max_children_per_cell which specifies the maximum number of children a cell can have. How many children a refined cell has was previously available as static information, but now it depends on the actual refinement state of a cell and can be retrieved using TriaAccessor::n_children(), a call that works equally well for both isotropic and anisotropic refinement. A very similar situation can be found for faces and their subfaces: the pertinent information can be queried using GeometryInfo<dim>::max_children_per_face or face->n_children(), depending on the context.

      -

      Another important aspect, and the most important one in this tutorial, is the treatment of neighbor-relations when assembling jump terms on the faces between cells. Looking at the documentation of the assemble_system functions in step-12 we notice, that we need to decide if a neighboring cell is coarser, finer or on the same (refinement) level as our current cell. These decisions do not work in the same way for anisotropic refinement as the information given by the level of a cell is not enough to completely characterize anisotropic cells; for example, are the terminal children of a two-dimensional cell that is first cut in $x$-direction and whose children are then cut in $y$-direction on level 2, or are they on level 1 as they would be if the cell would have been refined once isotropically, resulting in the same set of finest cells?

      +

      Most of the time, when we do finite element computations, we only consider one cell at a time, for example to calculate cell contributions to the global matrix, or to interpolate boundary values. However, sometimes we have to look at how cells are related in our algorithms. Relationships between cells come in two forms: neighborship and mother-child relationship. For the case of isotropic refinement, deal.II uses certain conventions (invariants) for cell relationships that are always maintained. For example, a refined cell always has exactly $2^{dim}$ children. And (except for the 1d case), two neighboring cells may differ by at most one refinement level: they are equally often refined or one of them is exactly once more refined, leaving exactly one hanging node on the common face. Almost all of the time these invariants are only of concern in the internal implementation of the library. However, there are cases where knowledge of them is also relevant to an application program.

      +

      In the current context, it is worth noting that the kind of mesh refinement affects some of the most fundamental assumptions. Consequently, some of the usual code found in application programs will need modifications to exploit the features of meshes which were created using anisotropic refinement. For those interested in how deal.II evolved, it may be of interest that the loosening of such invariants required some incompatible changes. For example, the library used to have a member GeometryInfo<dim>::children_per_cell that specified how many children a cell has once it is refined. For isotropic refinement, this number is equal to $2^{dim}$, as mentioned above. However, for anisotropic refinement, this number does not exist, as is can be either two or four in 2D and two, four or eight in 3D, and the member GeometryInfo<dim>::children_per_cell has consequently been removed. It has now been replaced by GeometryInfo<dim>::max_children_per_cell which specifies the maximum number of children a cell can have. How many children a refined cell has was previously available as static information, but now it depends on the actual refinement state of a cell and can be retrieved using TriaAccessor::n_children(), a call that works equally well for both isotropic and anisotropic refinement. A very similar situation can be found for faces and their subfaces: the pertinent information can be queried using GeometryInfo<dim>::max_children_per_face or face->n_children(), depending on the context.

      +

      Another important aspect, and the most important one in this tutorial, is the treatment of neighbor-relations when assembling jump terms on the faces between cells. Looking at the documentation of the assemble_system functions in step-12 we notice, that we need to decide if a neighboring cell is coarser, finer or on the same (refinement) level as our current cell. These decisions do not work in the same way for anisotropic refinement as the information given by the level of a cell is not enough to completely characterize anisotropic cells; for example, are the terminal children of a two-dimensional cell that is first cut in $x$-direction and whose children are then cut in $y$-direction on level 2, or are they on level 1 as they would be if the cell would have been refined once isotropically, resulting in the same set of finest cells?

      After anisotropic refinement, a coarser neighbor is not necessarily exactly one level below ours, but can pretty much have any level relative to the current one; in fact, it can even be on a higher level even though it is coarser. Thus the decisions have to be made on a different basis, whereas the intention of the decisions stays the same.

      In the following, we will discuss the cases that can happen when we want to compute contributions to the matrix (or right hand side) of the form

      -\[
+<picture><source srcset=\[
   \int_{\partial K} \varphi_i(x) \varphi_j(x) \; dx
-\] +\]" src="form_3933.png"/>

      or similar; remember that we integrate terms like this using the FEFaceValues and FESubfaceValues classes. We will also show how to write code that works for both isotropic and anisotropic refinement:

      Here, the left two cells resulted from an anisotropic bisection of the mother cell in $y$-direction, whereas the right four cells resulted from a simultaneous anisotropic refinement in both the $y$- and $z$-directions. The left cell marked with # has two finer neighbors marked with +, but the actual neighbor of the left cell is the complete right mother cell, as the two cells marked with + are finer and their direct mother is the one large cell.

      However, fortunately, CellAccessor::neighbor_child_on_subface() takes care of these situations by itself, if you loop over the correct number of subfaces, in the above example this is two. The FESubfaceValues<dim>::reinit function takes care of this too, so that the resulting state is always correct. There is one little caveat, however: For reiniting the neighbors FEFaceValues object you need to know the index of the face that points toward the current cell. Usually you assume that the neighbor you get directly is as coarse or as fine as you, if it has children, thus this information can be obtained with CellAccessor::neighbor_of_neighbor(). If the neighbor is coarser, however, you would have to use the first value in CellAccessor::neighbor_of_coarser_neighbor() instead. In order to make this easy for you, there is CellAccessor::neighbor_face_no() which does the correct thing for you and returns the desired result.

      @@ -294,15 +294,15 @@

    This approach is similar to the one we have used in step-27 for hp-refinement and has the great advantage of flexibility: Any error indicator can be used in the anisotropic process, i.e. if you have quite involved a posteriori goal-oriented error indicators available you can use them as easily as a simple Kelly error estimator. The anisotropic part of the refinement process is not influenced by this choice. Furthermore, simply leaving out the third and forth steps leads to the same isotropic refinement you used to get before any anisotropic changes in deal.II or your application program. As a last advantage, working only on cells flagged for refinement results in a faster evaluation of the anisotropic indicator, which can become noticeable on finer meshes with a lot of cells if the indicator is quite involved.

    Here, we use a very simple approach which is only applicable to DG methods. The general idea is quite simple: DG methods allow the discrete solution to jump over the faces of a cell, whereas it is smooth within each cell. Of course, in the limit we expect that the jumps tend to zero as we refine the mesh and approximate the true solution better and better. Thus, a large jump across a given face indicates that the cell should be refined (at least) orthogonally to that face, whereas a small jump does not lead to this conclusion. It is possible, of course, that the exact solution is not smooth and that it also features a jump. In that case, however, a large jump over one face indicates, that this face is more or less parallel to the jump and in the vicinity of it, thus again we would expect a refinement orthogonal to the face under consideration to be effective.

    -

    The proposed indicator calculates the average jump $K_j$, i.e. the mean value of the absolute jump $|[u]|$ of the discrete solution $u$ over the two faces $f_i^j$, $i=1,2$, $j=1..d$ orthogonal to coordinate direction $j$ on the unit cell.

    -\[
+<p>The proposed indicator calculates the average jump <picture><source srcset=$K_j$, i.e. the mean value of the absolute jump $|[u]|$ of the discrete solution $u$ over the two faces $f_i^j$, $i=1,2$, $j=1..d$ orthogonal to coordinate direction $j$ on the unit cell.

    +\[
 K_j = \frac{\sum_{i=1}^2 \int_{f_i^j}|[u]| dx}{\sum_{i=1}^2 |f_i^j|} .
-\] +\]" src="form_3940.png"/>

    -

    If the average jump in one direction is larger than the average of the jumps in the other directions by a certain factor $\kappa$, i.e. if $K_i > \kappa \frac 1{d-1} \sum_{j=1, j\neq i}^d K_j$, the cell is refined only along that particular direction $i$, otherwise the cell is refined isotropically.

    +

    If the average jump in one direction is larger than the average of the jumps in the other directions by a certain factor $\kappa$, i.e. if $K_i > \kappa \frac 1{d-1} \sum_{j=1, j\neq i}^d K_j$, the cell is refined only along that particular direction $i$, otherwise the cell is refined isotropically.

    Such a criterion is easily generalized to systems of equations: the absolute value of the jump would be replaced by an appropriate norm of the vector-valued jump.

    The problem

    -

    We solve the linear transport equation presented in step-12. The domain is extended to cover $[-1,1]\times[0,1]$ in 2D, where the flow field $\beta$ describes a counterclockwise quarter circle around the origin in the right half of the domain and is parallel to the x-axis in the left part of the domain. The inflow boundary is again located at $x=1$ and along the positive part of the x-axis, and the boundary conditions are chosen as in step-12.

    +

    We solve the linear transport equation presented in step-12. The domain is extended to cover $[-1,1]\times[0,1]$ in 2D, where the flow field $\beta$ describes a counterclockwise quarter circle around the origin in the right half of the domain and is parallel to the x-axis in the left part of the domain. The inflow boundary is again located at $x=1$ and along the positive part of the x-axis, and the boundary conditions are chosen as in step-12.

    The commented program

    The deal.II include files have already been covered in previous examples and will thus not be further commented on.

      #href_anchor"line">  #include <deal.II/base/quadrature_lib.h>
    @@ -382,7 +382,7 @@
    virtual void value_list(const std::vector< Point< dim > > &points, std::vector< RangeNumberType > &values, const unsigned int component=0) const
    Definition point.h:112
    #define AssertDimension(dim1, dim2)
    -

    The flow field is chosen to be a quarter circle with counterclockwise flow direction and with the origin as midpoint for the right half of the domain with positive $x$ values, whereas the flow simply goes to the left in the left part of the domain at a velocity that matches the one coming in from the right. In the circular part the magnitude of the flow velocity is proportional to the distance from the origin. This is a difference to step-12, where the magnitude was 1 everywhere. the new definition leads to a linear variation of $\beta$ along each given face of a cell. On the other hand, the solution $u(x,y)$ is exactly the same as before.

    +

    The flow field is chosen to be a quarter circle with counterclockwise flow direction and with the origin as midpoint for the right half of the domain with positive $x$ values, whereas the flow simply goes to the left in the left part of the domain at a velocity that matches the one coming in from the right. In the circular part the magnitude of the flow velocity is proportional to the distance from the origin. This is a difference to step-12, where the magnitude was 1 everywhere. the new definition leads to a linear variation of $\beta$ along each given face of a cell. On the other hand, the solution $u(x,y)$ is exactly the same as before.

      void value_list(const std::vector<Point<dim>> &points,
      std::vector<Point<dim>> & values) const
      {
    @@ -1336,7 +1336,7 @@

    We see, that the solution on the anisotropically refined mesh is very similar to the solution obtained on the isotropically refined mesh. Thus the anisotropic indicator seems to effectively select the appropriate cells for anisotropic refinement.

    -

    The pictures also explain why the mesh is refined as it is. In the whole left part of the domain refinement is only performed along the $y$-axis of cells. In the right part of the domain the refinement is dominated by isotropic refinement, as the anisotropic feature of the solution - the jump from one to zero - is not well aligned with the mesh where the advection direction takes a turn. However, at the bottom and closest (to the observer) parts of the quarter circle this jumps again becomes more and more aligned with the mesh and the refinement algorithm reacts by creating anisotropic cells of increasing aspect ratio.

    +

    The pictures also explain why the mesh is refined as it is. In the whole left part of the domain refinement is only performed along the $y$-axis of cells. In the right part of the domain the refinement is dominated by isotropic refinement, as the anisotropic feature of the solution - the jump from one to zero - is not well aligned with the mesh where the advection direction takes a turn. However, at the bottom and closest (to the observer) parts of the quarter circle this jumps again becomes more and more aligned with the mesh and the refinement algorithm reacts by creating anisotropic cells of increasing aspect ratio.

    It might seem that the necessary alignment of anisotropic features and the coarse mesh can decrease performance significantly for real world problems. That is not wrong in general: If one were, for example, to apply anisotropic refinement to problems in which shocks appear (e.g., the equations solved in step-69), then it many cases the shock is not aligned with the mesh and anisotropic refinement will help little unless one also introduces techniques to move the mesh in alignment with the shocks. On the other hand, many steep features of solutions are due to boundary layers. In those cases, the mesh is already aligned with the anisotropic features because it is of course aligned with the boundary itself, and anisotropic refinement will almost always increase the efficiency of computations on adapted grids for these cases.

    The plain program

    /* ---------------------------------------------------------------------
    /usr/share/doc/packages/dealii/doxygen/deal.II/step_31.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_31.html 2024-01-30 03:04:52.060878150 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_31.html 2024-01-30 03:04:52.060878150 +0000 @@ -174,7 +174,7 @@

    The Boussinesq equations

    This program deals with an interesting physical problem: how does a fluid (i.e., a liquid or gas) behave if it experiences differences in buoyancy caused by temperature differences? It is clear that those parts of the fluid that are hotter (and therefore lighter) are going to rise up and those that are cooler (and denser) are going to sink down with gravity.

    In cases where the fluid moves slowly enough such that inertial effects can be neglected, the equations that describe such behavior are the Boussinesq equations that read as follows:

    -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   -\nabla \cdot (2 \eta \varepsilon ({\mathbf u})) + \nabla p &=&
   -\rho\; \beta \; T\; \mathbf{g},
   \\
@@ -185,49 +185,49 @@
   {\mathbf u} \cdot \nabla T
   -
   \nabla \cdot \kappa \nabla T &=& \gamma.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3945.png"/>

    -

    These equations fall into the class of vector-valued problems (a toplevel overview of this topic can be found in the Handling vector valued problems module). Here, $\mathbf u$ is the velocity field, $p$ the pressure, and $T$ the temperature of the fluid. $\varepsilon ({\mathbf u}) = \frac 12
-[(\nabla{\mathbf u}) + (\nabla {\mathbf u})^T]$ is the symmetric gradient of the velocity. As can be seen, velocity and pressure solve a Stokes equation describing the motion of an incompressible fluid, an equation we have previously considered in step-22; we will draw extensively on the experience we have gained in that program, in particular with regard to efficient linear Stokes solvers.

    -

    The forcing term of the fluid motion is the buoyancy of the fluid, expressed as the product of the density $\rho$, the thermal expansion coefficient $\beta$, the temperature $T$ and the gravity vector $\mathbf{g}$ pointing downward. (A derivation of why the right hand side looks like it looks is given in the introduction of step-32.) While the first two equations describe how the fluid reacts to temperature differences by moving around, the third equation states how the fluid motion affects the temperature field: it is an advection diffusion equation, i.e., the temperature is attached to the fluid particles and advected along in the flow field, with an additional diffusion (heat conduction) term. In many applications, the diffusion coefficient is fairly small, and the temperature equation is in fact transport, not diffusion dominated and therefore in character more hyperbolic than elliptic; we will have to take this into account when developing a stable discretization.

    -

    In the equations above, the term $\gamma$ on the right hand side denotes the heat sources and may be a spatially and temporally varying function. $\eta$ and $\kappa$ denote the viscosity and diffusivity coefficients, which we assume constant for this tutorial program. The more general case when $\eta$ depends on the temperature is an important factor in physical applications: Most materials become more fluid as they get hotter (i.e., $\eta$ decreases with $T$); sometimes, as in the case of rock minerals at temperatures close to their melting point, $\eta$ may change by orders of magnitude over the typical range of temperatures.

    -

    We note that the Stokes equation above could be nondimensionalized by introducing the Rayleigh number $\mathrm{Ra}=\frac{\|\mathbf{g}\| \beta \rho}{\eta \kappa} \delta T L^3$ using a typical length scale $L$, typical temperature difference $\delta T$, density $\rho$, thermal diffusivity $\eta$, and thermal conductivity $\kappa$. $\mathrm{Ra}$ is a dimensionless number that describes the ratio of heat transport due to convection induced by buoyancy changes from temperature differences, and of heat transport due to thermal diffusion. A small Rayleigh number implies that buoyancy is not strong relative to viscosity and fluid motion $\mathbf{u}$ is slow enough so that heat diffusion $\kappa\nabla T$ is the dominant heat transport term. On the other hand, a fluid with a high Rayleigh number will show vigorous convection that dominates heat conduction.

    -

    For most fluids for which we are interested in computing thermal convection, the Rayleigh number is very large, often $10^6$ or larger. From the structure of the equations, we see that this will lead to large pressure differences and large velocities. Consequently, the convection term in the convection-diffusion equation for $T$ will also be very large and an accurate solution of this equation will require us to choose small time steps. Problems with large Rayleigh numbers are therefore hard to solve numerically for similar reasons that make solving the Navier-Stokes equations hard to solve when the Reynolds number $\mathrm{Re}$ is large.

    -

    Note that a large Rayleigh number does not necessarily involve large velocities in absolute terms. For example, the Rayleigh number in the earth mantle is larger than $10^6$. Yet the velocities are small: the material is in fact solid rock but it is so hot and under pressure that it can flow very slowly, on the order of at most a few centimeters per year. Nevertheless, this can lead to mixing over time scales of many million years, a time scale much shorter than for the same amount of heat to be distributed by thermal conductivity and a time scale of relevance to affect the evolution of the earth's interior and surface structure.

    +

    These equations fall into the class of vector-valued problems (a toplevel overview of this topic can be found in the Handling vector valued problems module). Here, $\mathbf u$ is the velocity field, $p$ the pressure, and $T$ the temperature of the fluid. $\varepsilon ({\mathbf u}) = \frac 12
+[(\nabla{\mathbf u}) + (\nabla {\mathbf u})^T]$ is the symmetric gradient of the velocity. As can be seen, velocity and pressure solve a Stokes equation describing the motion of an incompressible fluid, an equation we have previously considered in step-22; we will draw extensively on the experience we have gained in that program, in particular with regard to efficient linear Stokes solvers.

    +

    The forcing term of the fluid motion is the buoyancy of the fluid, expressed as the product of the density $\rho$, the thermal expansion coefficient $\beta$, the temperature $T$ and the gravity vector $\mathbf{g}$ pointing downward. (A derivation of why the right hand side looks like it looks is given in the introduction of step-32.) While the first two equations describe how the fluid reacts to temperature differences by moving around, the third equation states how the fluid motion affects the temperature field: it is an advection diffusion equation, i.e., the temperature is attached to the fluid particles and advected along in the flow field, with an additional diffusion (heat conduction) term. In many applications, the diffusion coefficient is fairly small, and the temperature equation is in fact transport, not diffusion dominated and therefore in character more hyperbolic than elliptic; we will have to take this into account when developing a stable discretization.

    +

    In the equations above, the term $\gamma$ on the right hand side denotes the heat sources and may be a spatially and temporally varying function. $\eta$ and $\kappa$ denote the viscosity and diffusivity coefficients, which we assume constant for this tutorial program. The more general case when $\eta$ depends on the temperature is an important factor in physical applications: Most materials become more fluid as they get hotter (i.e., $\eta$ decreases with $T$); sometimes, as in the case of rock minerals at temperatures close to their melting point, $\eta$ may change by orders of magnitude over the typical range of temperatures.

    +

    We note that the Stokes equation above could be nondimensionalized by introducing the Rayleigh number $\mathrm{Ra}=\frac{\|\mathbf{g}\| \beta \rho}{\eta \kappa} \delta T L^3$ using a typical length scale $L$, typical temperature difference $\delta T$, density $\rho$, thermal diffusivity $\eta$, and thermal conductivity $\kappa$. $\mathrm{Ra}$ is a dimensionless number that describes the ratio of heat transport due to convection induced by buoyancy changes from temperature differences, and of heat transport due to thermal diffusion. A small Rayleigh number implies that buoyancy is not strong relative to viscosity and fluid motion $\mathbf{u}$ is slow enough so that heat diffusion $\kappa\nabla T$ is the dominant heat transport term. On the other hand, a fluid with a high Rayleigh number will show vigorous convection that dominates heat conduction.

    +

    For most fluids for which we are interested in computing thermal convection, the Rayleigh number is very large, often $10^6$ or larger. From the structure of the equations, we see that this will lead to large pressure differences and large velocities. Consequently, the convection term in the convection-diffusion equation for $T$ will also be very large and an accurate solution of this equation will require us to choose small time steps. Problems with large Rayleigh numbers are therefore hard to solve numerically for similar reasons that make solving the Navier-Stokes equations hard to solve when the Reynolds number $\mathrm{Re}$ is large.

    +

    Note that a large Rayleigh number does not necessarily involve large velocities in absolute terms. For example, the Rayleigh number in the earth mantle is larger than $10^6$. Yet the velocities are small: the material is in fact solid rock but it is so hot and under pressure that it can flow very slowly, on the order of at most a few centimeters per year. Nevertheless, this can lead to mixing over time scales of many million years, a time scale much shorter than for the same amount of heat to be distributed by thermal conductivity and a time scale of relevance to affect the evolution of the earth's interior and surface structure.

    Note
    If you are interested in using the program as the basis for your own experiments, you will also want to take a look at its continuation in step-32. Furthermore, step-32 later was developed into the much larger open source code ASPECT (see https://aspect.geodynamics.org/ ) that can solve realistic problems and that you may want to investigate before trying to morph step-31 into something that can solve whatever you want to solve.

    Boundary and initial conditions

    -

    Since the Boussinesq equations are derived under the assumption that inertia of the fluid's motion does not play a role, the flow field is at each time entirely determined by buoyancy difference at that time, not by the flow field at previous times. This is reflected by the fact that the first two equations above are the steady state Stokes equation that do not contain a time derivative. Consequently, we do not need initial conditions for either velocities or pressure. On the other hand, the temperature field does satisfy an equation with a time derivative, so we need initial conditions for $T$.

    -

    As for boundary conditions: if $\kappa>0$ then the temperature satisfies a second order differential equation that requires boundary data all around the boundary for all times. These can either be a prescribed boundary temperature $T|_{\partial\Omega}=T_b$ (Dirichlet boundary conditions), or a prescribed thermal flux $\mathbf{n}\cdot\kappa\nabla
-T|_{\partial\Omega}=\phi$; in this program, we will use an insulated boundary condition, i.e., prescribe no thermal flux: $\phi=0$.

    -

    Similarly, the velocity field requires us to pose boundary conditions. These may be no-slip no-flux conditions $\mathbf{u}=0$ on $\partial\Omega$ if the fluid sticks to the boundary, or no normal flux conditions $\mathbf n \cdot \mathbf
-u = 0$ if the fluid can flow along but not across the boundary, or any number of other conditions that are physically reasonable. In this program, we will use no normal flux conditions.

    +

    Since the Boussinesq equations are derived under the assumption that inertia of the fluid's motion does not play a role, the flow field is at each time entirely determined by buoyancy difference at that time, not by the flow field at previous times. This is reflected by the fact that the first two equations above are the steady state Stokes equation that do not contain a time derivative. Consequently, we do not need initial conditions for either velocities or pressure. On the other hand, the temperature field does satisfy an equation with a time derivative, so we need initial conditions for $T$.

    +

    As for boundary conditions: if $\kappa>0$ then the temperature satisfies a second order differential equation that requires boundary data all around the boundary for all times. These can either be a prescribed boundary temperature $T|_{\partial\Omega}=T_b$ (Dirichlet boundary conditions), or a prescribed thermal flux $\mathbf{n}\cdot\kappa\nabla
+T|_{\partial\Omega}=\phi$; in this program, we will use an insulated boundary condition, i.e., prescribe no thermal flux: $\phi=0$.

    +

    Similarly, the velocity field requires us to pose boundary conditions. These may be no-slip no-flux conditions $\mathbf{u}=0$ on $\partial\Omega$ if the fluid sticks to the boundary, or no normal flux conditions $\mathbf n \cdot \mathbf
+u = 0$ if the fluid can flow along but not across the boundary, or any number of other conditions that are physically reasonable. In this program, we will use no normal flux conditions.

    Solution approach

    -

    Like the equations solved in step-21, we here have a system of differential-algebraic equations (DAE): with respect to the time variable, only the temperature equation is a differential equation whereas the Stokes system for $\mathbf{u}$ and $p$ has no time-derivatives and is therefore of the sort of an algebraic constraint that has to hold at each time instant. The main difference to step-21 is that the algebraic constraint there was a mixed Laplace system of the form

    -\begin{eqnarray*}
+<p>Like the equations solved in <a class=step-21, we here have a system of differential-algebraic equations (DAE): with respect to the time variable, only the temperature equation is a differential equation whereas the Stokes system for $\mathbf{u}$ and $p$ has no time-derivatives and is therefore of the sort of an algebraic constraint that has to hold at each time instant. The main difference to step-21 is that the algebraic constraint there was a mixed Laplace system of the form

    +\begin{eqnarray*}
   \mathbf u + {\mathbf K}\lambda \nabla p &=& 0, \\
   \nabla\cdot \mathbf u &=& f,
-\end{eqnarray*} +\end{eqnarray*}" src="form_3960.png"/>

    where now we have a Stokes system

    -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   -\nabla \cdot (2 \eta \varepsilon ({\mathbf u})) + \nabla p &=& f, \\
   \nabla\cdot \mathbf u &=& 0,
-\end{eqnarray*} +\end{eqnarray*}" src="form_3961.png"/>

    -

    where $\nabla \cdot \eta \varepsilon (\cdot)$ is an operator similar to the Laplacian $\Delta$ applied to a vector field.

    +

    where $\nabla \cdot \eta \varepsilon (\cdot)$ is an operator similar to the Laplacian $\Delta$ applied to a vector field.

    Given the similarity to what we have done in step-21, it may not come as a surprise that we choose a similar approach, although we will have to make adjustments for the change in operator in the top-left corner of the differential operator.

    Time stepping

    -

    The structure of the problem as a DAE allows us to use the same strategy as we have already used in step-21, i.e., we use a time lag scheme: we first solve the temperature equation (using an extrapolated velocity field), and then insert the new temperature solution into the right hand side of the velocity equation. The way we implement this in our code looks at things from a slightly different perspective, though. We first solve the Stokes equations for velocity and pressure using the temperature field from the previous time step, which means that we get the velocity for the previous time step. In other words, we first solve the Stokes system for time step $n - 1$ as

    -\begin{eqnarray*}
+<p>The structure of the problem as a DAE allows us to use the same strategy as we have already used in <a class=step-21, i.e., we use a time lag scheme: we first solve the temperature equation (using an extrapolated velocity field), and then insert the new temperature solution into the right hand side of the velocity equation. The way we implement this in our code looks at things from a slightly different perspective, though. We first solve the Stokes equations for velocity and pressure using the temperature field from the previous time step, which means that we get the velocity for the previous time step. In other words, we first solve the Stokes system for time step $n - 1$ as

    +\begin{eqnarray*}
   -\nabla \cdot (2\eta \varepsilon ({\mathbf u}^{n-1})) + \nabla p^{n-1} &=&
   -\rho\; \beta \; T^{n-1} \mathbf{g},
   \\
   \nabla \cdot {\mathbf u}^{n-1} &=& 0,
-\end{eqnarray*} +\end{eqnarray*}" src="form_3965.png"/>

    -

    and then the temperature equation with an extrapolated velocity field to time $n$.

    -

    In contrast to step-21, we'll use a higher order time stepping scheme here, namely the Backward Differentiation Formula scheme of order 2 (BDF-2 in short) that replaces the time derivative $\frac{\partial T}{\partial t}$ by the (one-sided) difference quotient $\frac{\frac 32 T^{n}-2T^{n-1}+\frac 12 T^{n-2}}{k}$ with $k$ the time step size. This gives the discretized-in-time temperature equation

    -\begin{eqnarray*}
+<p> and then the temperature equation with an extrapolated velocity field to time <picture><source srcset=$n$.

    +

    In contrast to step-21, we'll use a higher order time stepping scheme here, namely the Backward Differentiation Formula scheme of order 2 (BDF-2 in short) that replaces the time derivative $\frac{\partial T}{\partial t}$ by the (one-sided) difference quotient $\frac{\frac 32 T^{n}-2T^{n-1}+\frac 12 T^{n-2}}{k}$ with $k$ the time step size. This gives the discretized-in-time temperature equation

    +\begin{eqnarray*}
   \frac 32 T^n
   -
   k\nabla \cdot \kappa \nabla T^n
@@ -239,13 +239,13 @@
   k(2{\mathbf u}^{n-1} - {\mathbf u}^{n-2} ) \cdot \nabla (2T^{n-1}-T^{n-2})
   +
   k\gamma.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3968.png"/>

    -

    Note how the temperature equation is solved semi-explicitly: diffusion is treated implicitly whereas advection is treated explicitly using an extrapolation (or forward-projection) of temperature and velocity, including the just-computed velocity ${\mathbf u}^{n-1}$. The forward-projection to the current time level $n$ is derived from a Taylor expansion, $T^n
+<p> Note how the temperature equation is solved semi-explicitly: diffusion is treated implicitly whereas advection is treated explicitly using an extrapolation (or forward-projection) of temperature and velocity, including the just-computed velocity <picture><source srcset=${\mathbf u}^{n-1}$. The forward-projection to the current time level $n$ is derived from a Taylor expansion, $T^n
 \approx T^{n-1} + k_n \frac{\partial T}{\partial t} \approx T^{n-1} + k_n
-\frac{T^{n-1}-T^{n-2}}{k_n} = 2T^{n-1}-T^{n-2}$. We need this projection for maintaining the order of accuracy of the BDF-2 scheme. In other words, the temperature fields we use in the explicit right hand side are second order approximations of the current temperature field — not quite an explicit time stepping scheme, but by character not too far away either.

    -

    The introduction of the temperature extrapolation limits the time step by a Courant-Friedrichs-Lewy (CFL) condition just like it was in step-21. (We wouldn't have had that stability condition if we treated the advection term implicitly since the BDF-2 scheme is A-stable, at the price that we needed to build a new temperature matrix at each time step.) We will discuss the exact choice of time step in the results section, but for the moment of importance is that this CFL condition means that the time step size $k$ may change from time step to time step, and that we have to modify the above formula slightly. If $k_n,k_{n-1}$ are the time steps sizes of the current and previous time step, then we use the approximations

    -\begin{align*}
+\frac{T^{n-1}-T^{n-2}}{k_n} = 2T^{n-1}-T^{n-2}$. We need this projection for maintaining the order of accuracy of the BDF-2 scheme. In other words, the temperature fields we use in the explicit right hand side are second order approximations of the current temperature field — not quite an explicit time stepping scheme, but by character not too far away either.

    +

    The introduction of the temperature extrapolation limits the time step by a Courant-Friedrichs-Lewy (CFL) condition just like it was in step-21. (We wouldn't have had that stability condition if we treated the advection term implicitly since the BDF-2 scheme is A-stable, at the price that we needed to build a new temperature matrix at each time step.) We will discuss the exact choice of time step in the results section, but for the moment of importance is that this CFL condition means that the time step size $k$ may change from time step to time step, and that we have to modify the above formula slightly. If $k_n,k_{n-1}$ are the time steps sizes of the current and previous time step, then we use the approximations

    +\begin{align*}
 \frac{\partial T}{\partial t} \approx
  \frac 1{k_n}
  \left(
@@ -255,10 +255,10 @@
        +
        \frac{k_n^2}{k_{n-1}(k_n+k_{n-1})} T^{n-2}
  \right)
- \end{align*} + \end{align*}" src="form_3972.png"/>

    and

    -\begin{align*}
+<picture><source srcset=\begin{align*}
 T^n \approx
    T^{n-1} + k_n \frac{\partial T}{\partial t}
    \approx
@@ -266,10 +266,10 @@
    \frac{T^{n-1}-T^{n-2}}{k_{n-1}}
    =
    \left(1+\frac{k_n}{k_{n-1}}\right)T^{n-1}-\frac{k_n}{k_{n-1}}T^{n-2},
-\end{align*} +\end{align*}" src="form_3973.png"/>

    and above equation is generalized as follows:

    -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   \frac{2k_n+k_{n-1}}{k_n+k_{n-1}} T^n
   -
   k_n\nabla \cdot \kappa \nabla T^n
@@ -281,14 +281,14 @@
   k_n{\mathbf u}^{*,n} \cdot \nabla T^{*,n}
   +
   k_n\gamma,
-\end{eqnarray*} +\end{eqnarray*}" src="form_3974.png"/>

    -

    where ${(\cdot)}^{*,n} = \left(1+\frac{k_n}{k_{n-1}}\right)(\cdot)^{n-1} -
-\frac{k_n}{k_{n-1}}(\cdot)^{n-2}$ denotes the extrapolation of velocity $\mathbf u$ and temperature $T$ to time level $n$, using the values at the two previous time steps. That's not an easy to read equation, but will provide us with the desired higher order accuracy. As a consistency check, it is easy to verify that it reduces to the same equation as above if $k_n=k_{n-1}$.

    -

    As a final remark we note that the choice of a higher order time stepping scheme of course forces us to keep more time steps in memory; in particular, we here will need to have $T^{n-2}$ around, a vector that we could previously discard. This seems like a nuisance that we were able to avoid previously by using only a first order time stepping scheme, but as we will see below when discussing the topic of stabilization, we will need this vector anyway and so keeping it around for time discretization is essentially for free and gives us the opportunity to use a higher order scheme.

    +

    where ${(\cdot)}^{*,n} = \left(1+\frac{k_n}{k_{n-1}}\right)(\cdot)^{n-1} -
+\frac{k_n}{k_{n-1}}(\cdot)^{n-2}$ denotes the extrapolation of velocity $\mathbf u$ and temperature $T$ to time level $n$, using the values at the two previous time steps. That's not an easy to read equation, but will provide us with the desired higher order accuracy. As a consistency check, it is easy to verify that it reduces to the same equation as above if $k_n=k_{n-1}$.

    +

    As a final remark we note that the choice of a higher order time stepping scheme of course forces us to keep more time steps in memory; in particular, we here will need to have $T^{n-2}$ around, a vector that we could previously discard. This seems like a nuisance that we were able to avoid previously by using only a first order time stepping scheme, but as we will see below when discussing the topic of stabilization, we will need this vector anyway and so keeping it around for time discretization is essentially for free and gives us the opportunity to use a higher order scheme.

    Weak form and space discretization for the Stokes part

    -

    Like solving the mixed Laplace equations, solving the Stokes equations requires us to choose particular pairs of finite elements for velocities and pressure variables. Because this has already been discussed in step-22, we only cover this topic briefly: Here, we use the stable pair $Q_{p+1}^d \times Q_p, p\ge 1$. These are continuous elements, so we can form the weak form of the Stokes equation without problem by integrating by parts and substituting continuous functions by their discrete counterparts:

    -\begin{eqnarray*}
+<p>Like solving the mixed Laplace equations, solving the Stokes equations requires us to choose particular pairs of finite elements for velocities and pressure variables. Because this has already been discussed in <a class=step-22, we only cover this topic briefly: Here, we use the stable pair $Q_{p+1}^d \times Q_p, p\ge 1$. These are continuous elements, so we can form the weak form of the Stokes equation without problem by integrating by parts and substituting continuous functions by their discrete counterparts:

    +\begin{eqnarray*}
   (\nabla {\mathbf v}_h, 2\eta \varepsilon ({\mathbf u}^{n-1}_h))
   -
   (\nabla \cdot {\mathbf v}_h, p^{n-1}_h)
@@ -296,12 +296,12 @@
   -({\mathbf v}_h, \rho\; \beta \; T^{n-1}_h \mathbf{g}),
   \\
   (q_h, \nabla \cdot {\mathbf u}^{n-1}_h) &=& 0,
-\end{eqnarray*} +\end{eqnarray*}" src="form_3979.png"/>

    -

    for all test functions $\mathbf v_h, q_h$. The first term of the first equation is considered as the inner product between tensors, i.e. $(\nabla {\mathbf v}_h, \eta \varepsilon ({\mathbf u}^{n-1}_h))_\Omega
+<p> for all test functions <picture><source srcset=$\mathbf v_h, q_h$. The first term of the first equation is considered as the inner product between tensors, i.e. $(\nabla {\mathbf v}_h, \eta \varepsilon ({\mathbf u}^{n-1}_h))_\Omega
  = \int_\Omega \sum_{i,j=1}^d [\nabla {\mathbf v}_h]_{ij}
-           \eta [\varepsilon ({\mathbf u}^{n-1}_h)]_{ij}\, dx$. Because the second tensor in this product is symmetric, the anti-symmetric component of $\nabla {\mathbf v}_h$ plays no role and it leads to the entirely same form if we use the symmetric gradient of $\mathbf v_h$ instead. Consequently, the formulation we consider and that we implement is

    -\begin{eqnarray*}
+           \eta [\varepsilon ({\mathbf u}^{n-1}_h)]_{ij}\, dx$. Because the second tensor in this product is symmetric, the anti-symmetric component of $\nabla {\mathbf v}_h$ plays no role and it leads to the entirely same form if we use the symmetric gradient of $\mathbf v_h$ instead. Consequently, the formulation we consider and that we implement is

    +\begin{eqnarray*}
   (\varepsilon({\mathbf v}_h), 2\eta \varepsilon ({\mathbf u}^{n-1}_h))
   -
   (\nabla \cdot {\mathbf v}_h, p^{n-1}_h)
@@ -309,32 +309,32 @@
   -({\mathbf v}_h, \rho\; \beta \; T^{n-1}_h \mathbf{g}),
   \\
   (q_h, \nabla \cdot {\mathbf u}^{n-1}_h) &=& 0.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3984.png"/>

    This is exactly the same as what we already discussed in step-22 and there is not much more to say about this here.

    Stabilization, weak form and space discretization for the temperature equation

    The more interesting question is what to do with the temperature advection-diffusion equation. By default, not all discretizations of this equation are equally stable unless we either do something like upwinding, stabilization, or all of this. One way to achieve this is to use discontinuous elements (i.e., the FE_DGQ class that we used, for example, in the discretization of the transport equation in step-12, or in discretizing the pressure in step-20 and step-21) and to define a flux at the interface between cells that takes into account upwinding. If we had a pure advection problem this would probably be the simplest way to go. However, here we have some diffusion as well, and the discretization of the Laplace operator with discontinuous elements is cumbersome because of the significant number of additional terms that need to be integrated on each face between cells. Discontinuous elements also have the drawback that the use of numerical fluxes introduces an additional numerical diffusion that acts everywhere, whereas we would really like to minimize the effect of numerical diffusion to a minimum and only apply it where it is necessary to stabilize the scheme.

    A better alternative is therefore to add some nonlinear viscosity to the model. Essentially, what this does is to transform the temperature equation from the form

    -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   \frac{\partial T}{\partial t}
   +
   {\mathbf u} \cdot \nabla T
   -
   \nabla \cdot \kappa \nabla T &=& \gamma
-\end{eqnarray*} +\end{eqnarray*}" src="form_3985.png"/>

    to something like

    -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   \frac{\partial T}{\partial t}
   +
   {\mathbf u} \cdot \nabla T
   -
   \nabla \cdot (\kappa+\nu(T)) \nabla T &=& \gamma,
-\end{eqnarray*} +\end{eqnarray*}" src="form_3986.png"/> /usr/share/doc/packages/dealii/doxygen/deal.II/step_32.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_32.html 2024-01-30 03:04:52.240879649 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_32.html 2024-01-30 03:04:52.244879683 +0000 @@ -166,58 +166,58 @@

    In addition to these changes, we also use a slightly different preconditioner, and we will have to make a number of changes that have to do with the fact that we want to solve a realistic problem here, not a model problem. The latter, in particular, will require that we think about scaling issues as well as what all those parameters and coefficients in the equations under consideration actually mean. We will discuss first the issues that affect changes in the mathematical formulation and solver structure, then how to parallelize things, and finally the actual testcase we will consider.

    Using the "right" pressure

    In step-31, we used the following Stokes model for the velocity and pressure field:

    -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   -\nabla \cdot (2 \eta \varepsilon ({\mathbf u})) + \nabla p &=&
   -\rho \; \beta \; T \mathbf{g},
   \\
   \nabla \cdot {\mathbf u} &=& 0.
-\end{eqnarray*} +\end{eqnarray*}" src="form_4126.png"/>

    -

    The right hand side of the first equation appears a wee bit unmotivated. Here's how things should really be. We need the external forces that act on the fluid, which we assume are given by gravity only. In the current case, we assume that the fluid does expand slightly for the purposes of this gravity force, but not enough that we need to modify the incompressibility condition (the second equation). What this means is that for the purpose of the right hand side, we can assume that $\rho=\rho(T)$. An assumption that may not be entirely justified is that we can assume that the changes of density as a function of temperature are small, leading to an expression of the form $\rho(T) = \rho_{\text{ref}}
-[1-\beta(T-T_{\text{ref}})]$, i.e., the density equals $\rho_{\text{ref}}$ at reference temperature and decreases linearly as the temperature increases (as the material expands). The force balance equation then looks properly written like this:

    -\begin{eqnarray*}
+<p> The right hand side of the first equation appears a wee bit unmotivated. Here's how things should really be. We need the external forces that act on the fluid, which we assume are given by gravity only. In the current case, we assume that the fluid does expand slightly for the purposes of this gravity force, but not enough that we need to modify the incompressibility condition (the second equation). What this means is that for the purpose of the right hand side, we can assume that <picture><source srcset=$\rho=\rho(T)$. An assumption that may not be entirely justified is that we can assume that the changes of density as a function of temperature are small, leading to an expression of the form $\rho(T) = \rho_{\text{ref}}
+[1-\beta(T-T_{\text{ref}})]$, i.e., the density equals $\rho_{\text{ref}}$ at reference temperature and decreases linearly as the temperature increases (as the material expands). The force balance equation then looks properly written like this:

    +\begin{eqnarray*}
   -\nabla \cdot (2 \eta \varepsilon ({\mathbf u})) + \nabla p &=&
   \rho_{\text{ref}} [1-\beta(T-T_{\text{ref}})] \mathbf{g}.
-\end{eqnarray*} +\end{eqnarray*}" src="form_4130.png"/>

    -

    Now note that the gravity force results from a gravity potential as $\mathbf g=-\nabla \varphi$, so that we can re-write this as follows:

    -\begin{eqnarray*}
+<p> Now note that the gravity force results from a gravity potential as <picture><source srcset=$\mathbf g=-\nabla \varphi$, so that we can re-write this as follows:

    +\begin{eqnarray*}
   -\nabla \cdot (2 \eta \varepsilon ({\mathbf u})) + \nabla p &=&
   -\rho_{\text{ref}} \; \beta\; T\; \mathbf{g}
   -\rho_{\text{ref}} [1+\beta T_{\text{ref}}] \nabla\varphi.
-\end{eqnarray*} +\end{eqnarray*}" src="form_4132.png"/>

    -

    The second term on the right is time independent, and so we could introduce a new "dynamic" pressure $p_{\text{dyn}}=p+\rho_{\text{ref}}
-[1+\beta T_{\text{ref}}] \varphi=p_{\text{total}}-p_{\text{static}}$ with which the Stokes equations would read:

    -\begin{eqnarray*}
+<p> The second term on the right is time independent, and so we could introduce a new $p_{\text{dyn}}=p+\rho_{\text{ref}}
+[1+\beta T_{\text{ref}}] \varphi=p_{\text{total}}-p_{\text{static}}$ with which the Stokes equations would read:

    +\begin{eqnarray*}
   -\nabla \cdot (2 \eta \varepsilon ({\mathbf u})) + \nabla p_{\text{dyn}} &=&
   -\rho_{\text{ref}} \; \beta \; T \; \mathbf{g},
   \\
   \nabla \cdot {\mathbf u} &=& 0.
-\end{eqnarray*} +\end{eqnarray*}" src="form_4134.png"/>

    This is exactly the form we used in step-31, and it was appropriate to do so because all changes in the fluid flow are only driven by the dynamic pressure that results from temperature differences. (In other words: Any contribution to the right hand side that results from taking the gradient of a scalar field have no effect on the velocity field.)

    On the other hand, we will here use the form of the Stokes equations that considers the total pressure instead:

    -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   -\nabla \cdot (2 \eta \varepsilon ({\mathbf u})) + \nabla p &=&
   \rho(T)\; \mathbf{g},
   \\
   \nabla \cdot {\mathbf u} &=& 0.
-\end{eqnarray*} +\end{eqnarray*}" src="form_4135.png"/>

    There are several advantages to this:

    -
    Note
    There is, however, a downside to this procedure. In the earth, the dynamic pressure is several orders of magnitude smaller than the total pressure. If we use the equations above and solve all variables to, say, 4 digits of accuracy, then we may be able to get the velocity and the total pressure right, but we will have no accuracy at all if we compute the dynamic pressure by subtracting from the total pressure the static part $p_\text{static}=\rho_{\text{ref}}
-[1+\beta T_{\text{ref}}] \varphi$. If, for example, the dynamic pressure is six orders of magnitude smaller than the static pressure, then we need to solve the overall pressure to at least seven digits of accuracy to get anything remotely accurate. That said, in practice this turns out not to be a limiting factor.
    +
    Note
    There is, however, a downside to this procedure. In the earth, the dynamic pressure is several orders of magnitude smaller than the total pressure. If we use the equations above and solve all variables to, say, 4 digits of accuracy, then we may be able to get the velocity and the total pressure right, but we will have no accuracy at all if we compute the dynamic pressure by subtracting from the total pressure the static part $p_\text{static}=\rho_{\text{ref}}
+[1+\beta T_{\text{ref}}] \varphi$. If, for example, the dynamic pressure is six orders of magnitude smaller than the static pressure, then we need to solve the overall pressure to at least seven digits of accuracy to get anything remotely accurate. That said, in practice this turns out not to be a limiting factor.

    The scaling of discretized equations

    Remember that we want to solve the following set of equations:

    -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   -\nabla \cdot (2 \eta \varepsilon ({\mathbf u})) + \nabla p &=&
   \rho(T) \mathbf{g},
   \\
@@ -228,11 +228,11 @@
   {\mathbf u} \cdot \nabla T
   -
   \nabla \cdot \kappa \nabla T &=& \gamma,
-\end{eqnarray*} +\end{eqnarray*}" src="form_4138.png"/>

    augmented by appropriate boundary and initial conditions. As discussed in step-31, we will solve this set of equations by solving for a Stokes problem first in each time step, and then moving the temperature equation forward by one time interval.

    The problem under consideration in this current section is with the Stokes problem: if we discretize it as usual, we get a linear system

    -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   M \; X
   =
   \left(\begin{array}{cc}
@@ -247,10 +247,10 @@
   \end{array}\right)
   =
   F
-\end{eqnarray*} +\end{eqnarray*}" src="form_4139.png"/>

    which in this program we will solve with a FGMRES solver. This solver iterates until the residual of these linear equations is below a certain tolerance, i.e., until

    -\[
+<picture><source srcset=\[
   \left\|
   \left(\begin{array}{c}
     F_U - A U^{(k)} - B P^{(k)}
@@ -259,35 +259,35 @@
   \end{array}\right)
   \right\|
   < \text{Tol}.
-\] +\]" src="form_4140.png"/>

    -

    This does not make any sense from the viewpoint of physical units: the quantities involved here have physical units so that the first part of the residual has units $\frac{\text{Pa}}{\text{m}}
-\text{m}^{\text{dim}}$ (most easily established by considering the term $(\nabla \cdot \mathbf v, p)_{\Omega}$ and considering that the pressure has units $\text{Pa}=\frac{\text{kg}}{\text{m}\;\text{s}^2}$ and the integration yields a factor of $\text{m}^{\text{dim}}$), whereas the second part of the residual has units $\frac{\text{m}^{\text{dim}}}{\text{s}}$. Taking the norm of this residual vector would yield a quantity with units $\text{m}^{\text{dim}-1} \sqrt{\left(\text{Pa}\right)^2 +
-       \left(\frac{\text{m}}{\text{s}}\right)^2}$. This, quite obviously, does not make sense, and we should not be surprised that doing so is eventually going to come back hurting us.

    -

    So why is this an issue here, but not in step-31? The reason back there is that everything was nicely balanced: velocities were on the order of one, the pressure likewise, the viscosity was one, and the domain had a diameter of $\sqrt{2}$. As a result, while nonsensical, nothing bad happened. On the other hand, as we will explain below, things here will not be that simply scaled: $\eta$ will be around $10^{21}$, velocities on the order of $10^{-8}$, pressure around $10^8$, and the diameter of the domain is $10^7$. In other words, the order of magnitude for the first equation is going to be $\eta\text{div}\varepsilon(\mathbf u) \approx 10^{21} \frac{10^{-8}}{(10^7)^2}
-\approx 10^{-1}$, whereas the second equation will be around $\text{div}{\mathbf u}\approx \frac{10^{-8}}{10^7} \approx 10^{-15}$. Well, so what this will lead to is this: if the solver wants to make the residual small, it will almost entirely focus on the first set of equations because they are so much bigger, and ignore the divergence equation that describes mass conservation. That's exactly what happens: unless we set the tolerance to extremely small values, the resulting flow field is definitely not divergence free. As an auxiliary problem, it turns out that it is difficult to find a tolerance that always works; in practice, one often ends up with a tolerance that requires 30 or 40 iterations for most time steps, and 10,000 for some others.

    -

    So what's a numerical analyst to do in a case like this? The answer is to start at the root and first make sure that everything is mathematically consistent first. In our case, this means that if we want to solve the system of Stokes equations jointly, we have to scale them so that they all have the same physical dimensions. In our case, this means multiplying the second equation by something that has units $\frac{\text{Pa}\;\text{s}}{\text{m}}$; one choice is to multiply with $\frac{\eta}{L}$ where $L$ is a typical lengthscale in our domain (which experiments show is best chosen to be the diameter of plumes — around 10 km — rather than the diameter of the domain). Using these numbers for $\eta$ and $L$, this factor is around $10^{17}$. So, we now get this for the Stokes system:

    -\begin{eqnarray*}
+<p> This does not make any sense from the viewpoint of physical units: the quantities involved here have physical units so that the first part of the residual has units <picture><source srcset=$\frac{\text{Pa}}{\text{m}}
+\text{m}^{\text{dim}}$ (most easily established by considering the term $(\nabla \cdot \mathbf v, p)_{\Omega}$ and considering that the pressure has units $\text{Pa}=\frac{\text{kg}}{\text{m}\;\text{s}^2}$ and the integration yields a factor of $\text{m}^{\text{dim}}$), whereas the second part of the residual has units $\frac{\text{m}^{\text{dim}}}{\text{s}}$. Taking the norm of this residual vector would yield a quantity with units $\text{m}^{\text{dim}-1} \sqrt{\left(\text{Pa}\right)^2 +
+       \left(\frac{\text{m}}{\text{s}}\right)^2}$. This, quite obviously, does not make sense, and we should not be surprised that doing so is eventually going to come back hurting us.

    +

    So why is this an issue here, but not in step-31? The reason back there is that everything was nicely balanced: velocities were on the order of one, the pressure likewise, the viscosity was one, and the domain had a diameter of $\sqrt{2}$. As a result, while nonsensical, nothing bad happened. On the other hand, as we will explain below, things here will not be that simply scaled: $\eta$ will be around $10^{21}$, velocities on the order of $10^{-8}$, pressure around $10^8$, and the diameter of the domain is $10^7$. In other words, the order of magnitude for the first equation is going to be $\eta\text{div}\varepsilon(\mathbf u) \approx 10^{21} \frac{10^{-8}}{(10^7)^2}
+\approx 10^{-1}$, whereas the second equation will be around $\text{div}{\mathbf u}\approx \frac{10^{-8}}{10^7} \approx 10^{-15}$. Well, so what this will lead to is this: if the solver wants to make the residual small, it will almost entirely focus on the first set of equations because they are so much bigger, and ignore the divergence equation that describes mass conservation. That's exactly what happens: unless we set the tolerance to extremely small values, the resulting flow field is definitely not divergence free. As an auxiliary problem, it turns out that it is difficult to find a tolerance that always works; in practice, one often ends up with a tolerance that requires 30 or 40 iterations for most time steps, and 10,000 for some others.

    +

    So what's a numerical analyst to do in a case like this? The answer is to start at the root and first make sure that everything is mathematically consistent first. In our case, this means that if we want to solve the system of Stokes equations jointly, we have to scale them so that they all have the same physical dimensions. In our case, this means multiplying the second equation by something that has units $\frac{\text{Pa}\;\text{s}}{\text{m}}$; one choice is to multiply with $\frac{\eta}{L}$ where $L$ is a typical lengthscale in our domain (which experiments show is best chosen to be the diameter of plumes — around 10 km — rather than the diameter of the domain). Using these numbers for $\eta$ and $L$, this factor is around $10^{17}$. So, we now get this for the Stokes system:

    +\begin{eqnarray*}
   -\nabla \cdot (2 \eta \varepsilon ({\mathbf u})) + \nabla p &=&
   \rho(T) \; \mathbf{g},
   \\
   \frac{\eta}{L} \nabla \cdot {\mathbf u} &=& 0.
-\end{eqnarray*} +\end{eqnarray*}" src="form_4156.png"/>

    -

    The trouble with this is that the result is not symmetric any more (we have $\frac{\eta}{L} \nabla \cdot$ at the bottom left, but not its transpose operator at the top right). This, however, can be cured by introducing a scaled pressure $\hat p = \frac{L}{\eta}p$, and we get the scaled equations

    -\begin{eqnarray*}
+<p> The trouble with this is that the result is not symmetric any more (we have <picture><source srcset=$\frac{\eta}{L} \nabla \cdot$ at the bottom left, but not its transpose operator at the top right). This, however, can be cured by introducing a scaled pressure $\hat p = \frac{L}{\eta}p$, and we get the scaled equations

    +\begin{eqnarray*}
   -\nabla \cdot (2 \eta \varepsilon ({\mathbf u})) +
   \nabla \left(\frac{\eta}{L} \hat p\right) &=&
   \rho(T) \; \mathbf{g},
   \\
   \frac{\eta}{L} \nabla \cdot {\mathbf u} &=& 0.
-\end{eqnarray*} +\end{eqnarray*}" src="form_4159.png"/>

    -

    This is now symmetric. Obviously, we can easily recover the original pressure $p$ from the scaled pressure $\hat p$ that we compute as a result of this procedure.

    -

    In the program below, we will introduce a factor EquationData::pressure_scaling that corresponds to $\frac{\eta}{L}$, and we will use this factor in the assembly of the system matrix and preconditioner. Because it is annoying and error prone, we will recover the unscaled pressure immediately following the solution of the linear system, i.e., the solution vector's pressure component will immediately be unscaled to retrieve the physical pressure. Since the solver uses the fact that we can use a good initial guess by extrapolating the previous solutions, we also have to scale the pressure immediately before solving.

    +

    This is now symmetric. Obviously, we can easily recover the original pressure $p$ from the scaled pressure $\hat p$ that we compute as a result of this procedure.

    +

    In the program below, we will introduce a factor EquationData::pressure_scaling that corresponds to $\frac{\eta}{L}$, and we will use this factor in the assembly of the system matrix and preconditioner. Because it is annoying and error prone, we will recover the unscaled pressure immediately following the solution of the linear system, i.e., the solution vector's pressure component will immediately be unscaled to retrieve the physical pressure. Since the solver uses the fact that we can use a good initial guess by extrapolating the previous solutions, we also have to scale the pressure immediately before solving.

    Changes to the Stokes preconditioner and solver

    -

    In this tutorial program, we apply a variant of the preconditioner used in step-31. That preconditioner was built to operate on the system matrix $M$ in block form such that the product matrix

    -\begin{eqnarray*}
+<p>In this tutorial program, we apply a variant of the preconditioner used in <a class=step-31. That preconditioner was built to operate on the system matrix $M$ in block form such that the product matrix

    +\begin{eqnarray*}
   P^{-1} M
   =
   \left(\begin{array}{cc}
@@ -296,24 +296,24 @@
   \left(\begin{array}{cc}
     A & B^T \\ B & 0
   \end{array}\right)
-\end{eqnarray*} +\end{eqnarray*}" src="form_4161.png"/>

    -

    is of a form that Krylov-based iterative solvers like GMRES can solve in a few iterations. We then replaced the exact inverse of $A$ by the action of an AMG preconditioner $\tilde{A}$ based on a vector Laplace matrix, approximated the Schur complement $S = B A^{-1} B^T$ by a mass matrix $M_p$ on the pressure space and wrote an InverseMatrix class for implementing the action of $M_p^{-1}\approx S^{-1}$ on vectors. In the InverseMatrix class, we used a CG solve with an incomplete Cholesky (IC) preconditioner for performing the inner solves.

    -

    An observation one can make is that we use just the action of a preconditioner for approximating the velocity inverse $A^{-1}$ (and the outer GMRES iteration takes care of the approximate character of the inverse), whereas we use a more or less exact inverse for $M_p^{-1}$, realized by a fully converged CG solve. This appears unbalanced, but there's system to this madness: almost all the effort goes into the upper left block to which we apply the AMG preconditioner, whereas even an exact inversion of the pressure mass matrix costs basically nothing. Consequently, if it helps us reduce the overall number of iterations somewhat, then this effort is well spent.

    +

    is of a form that Krylov-based iterative solvers like GMRES can solve in a few iterations. We then replaced the exact inverse of $A$ by the action of an AMG preconditioner $\tilde{A}$ based on a vector Laplace matrix, approximated the Schur complement $S = B A^{-1} B^T$ by a mass matrix $M_p$ on the pressure space and wrote an InverseMatrix class for implementing the action of $M_p^{-1}\approx S^{-1}$ on vectors. In the InverseMatrix class, we used a CG solve with an incomplete Cholesky (IC) preconditioner for performing the inner solves.

    +

    An observation one can make is that we use just the action of a preconditioner for approximating the velocity inverse $A^{-1}$ (and the outer GMRES iteration takes care of the approximate character of the inverse), whereas we use a more or less exact inverse for $M_p^{-1}$, realized by a fully converged CG solve. This appears unbalanced, but there's system to this madness: almost all the effort goes into the upper left block to which we apply the AMG preconditioner, whereas even an exact inversion of the pressure mass matrix costs basically nothing. Consequently, if it helps us reduce the overall number of iterations somewhat, then this effort is well spent.

    That said, even though the solver worked well for step-31, we have a problem here that is a bit more complicated (cells are deformed, the pressure varies by orders of magnitude, and we want to plan ahead for more complicated physics), and so we'll change a few things slightly:

    -

    As a final note, let us remark that in step-31 we computed the Schur complement $S=B A^{-1} B^T$ by approximating $-\text{div}(-\eta\Delta)^{-1}\nabla \approx \frac 1{\eta} \mathbf{1}$. Now, however, we have re-scaled the $B$ and $B^T$ operators. So $S$ should now approximate $-\frac{\eta}{L}\text{div}(-\eta\Delta)^{-1}\nabla \frac{\eta}{L} \approx
-\left(\frac{\eta}{L}\right)^2 \frac 1{\eta} \mathbf{1}$. We use the discrete form of the right hand side of this as our approximation $\tilde S$ to $S$.

    +

    As a final note, let us remark that in step-31 we computed the Schur complement $S=B A^{-1} B^T$ by approximating $-\text{div}(-\eta\Delta)^{-1}\nabla \approx \frac 1{\eta} \mathbf{1}$. Now, however, we have re-scaled the $B$ and $B^T$ operators. So $S$ should now approximate $-\frac{\eta}{L}\text{div}(-\eta\Delta)^{-1}\nabla \frac{\eta}{L} \approx
+\left(\frac{\eta}{L}\right)^2 \frac 1{\eta} \mathbf{1}$. We use the discrete form of the right hand side of this as our approximation $\tilde S$ to $S$.

    Changes to the artificial viscosity stabilization

    -

    Similarly to step-31, we will use an artificial viscosity for stabilization based on a residual of the equation. As a difference to step-31, we will provide two slightly different definitions of the stabilization parameter. For $\alpha=1$, we use the same definition as in step-31:

    -\begin{eqnarray*}
+<p>Similarly to <a class=step-31, we will use an artificial viscosity for stabilization based on a residual of the equation. As a difference to step-31, we will provide two slightly different definitions of the stabilization parameter. For $\alpha=1$, we use the same definition as in step-31:

    +\begin{eqnarray*}
   \nu_\alpha(T)|_K
   =
   \nu_1(T)|_K
@@ -325,76 +325,76 @@
     1,
     \frac{\|R_1(T)\|_{L^\infty(K)}}{c(\mathbf{u},T)}
   \right\}
-\end{eqnarray*} +\end{eqnarray*}" src="form_4168.png"/> /usr/share/doc/packages/dealii/doxygen/deal.II/step_33.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_33.html 2024-01-30 03:04:52.360880649 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_33.html 2024-01-30 03:04:52.360880649 +0000 @@ -165,17 +165,17 @@ While this program demonstrates the use of automatic differentiation well, it does not express the state of the art in Euler equation solvers. There are much faster and more accurate method for this equation, and you should take a look at step-67 and step-69 to see how this equation can be solved more efficiently.

    Introduction

    Euler flow

    -

    The equations that describe the movement of a compressible, inviscid gas (the so-called Euler equations of gas dynamics) are a basic system of conservation laws. In spatial dimension $d$ they read

    -\[
+<p>The equations that describe the movement of a compressible, inviscid gas (the so-called Euler equations of gas dynamics) are a basic system of conservation laws. In spatial dimension <picture><source srcset=$d$ they read

    +\[
 \partial_t \mathbf{w} + \nabla \cdot \mathbf{F}(\mathbf{w}) =
 \mathbf{G}(\mathbf w),
-\] +\]" src="form_4265.png"/>

    -

    with the solution $\mathbf{w}=(\rho v_1,\ldots,\rho v_d,\rho,
-E)^{\top}$ consisting of $\rho$ the fluid density, ${\mathbf v}=(v_1,\ldots v_d)^T$ the flow velocity (and thus $\rho\mathbf v$ being the linear momentum density), and $E$ the energy density of the gas. We interpret the equations above as $\partial_t \mathbf{w}_i + \nabla \cdot \mathbf{F}_i(\mathbf{w}) = \mathbf
-G_i(\mathbf w)$, $i=1,\ldots,dim+2$.

    -

    For the Euler equations, the flux matrix $\mathbf F$ (or system of flux functions) is defined as (shown here for the case $d=3$)

    -\begin{eqnarray*}
+<p> with the solution <picture><source srcset=$\mathbf{w}=(\rho v_1,\ldots,\rho v_d,\rho,
+E)^{\top}$ consisting of $\rho$ the fluid density, ${\mathbf v}=(v_1,\ldots v_d)^T$ the flow velocity (and thus $\rho\mathbf v$ being the linear momentum density), and $E$ the energy density of the gas. We interpret the equations above as $\partial_t \mathbf{w}_i + \nabla \cdot \mathbf{F}_i(\mathbf{w}) = \mathbf
+G_i(\mathbf w)$, $i=1,\ldots,dim+2$.

    +

    For the Euler equations, the flux matrix $\mathbf F$ (or system of flux functions) is defined as (shown here for the case $d=3$)

    +\begin{eqnarray*}
   \mathbf F(\mathbf w)
   =
   \left(
@@ -187,10 +187,10 @@
     (E+p) v_1 & (E+p) v_2 & (E+p) v_3
   \end{array}
   \right),
-\end{eqnarray*} +\end{eqnarray*}" src="form_4272.png"/>

    and we will choose as particular right hand side forcing only the effects of gravity, described by

    -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   \mathbf G(\mathbf w)
   =
   \left(
@@ -202,43 +202,43 @@
     \rho \mathbf g \cdot \mathbf v
   \end{array}
   \right),
-\end{eqnarray*} +\end{eqnarray*}" src="form_4273.png"/>

    -

    where $\mathbf g=(g_1,g_2,g_3)^T$ denotes the gravity vector. With this, the entire system of equations reads:

    -\begin{eqnarray*}
+<p> where <picture><source srcset=$\mathbf g=(g_1,g_2,g_3)^T$ denotes the gravity vector. With this, the entire system of equations reads:

    +\begin{eqnarray*}
   \partial_t (\rho v_i) + \sum_{s=1}^d \frac{\partial(\rho v_i v_s +
   \delta_{is} p)}{\partial x_s} &=& g_i \rho, \qquad i=1,\dots,d, \\
   \partial_t \rho + \sum_{s=1}^d \frac{\partial(\rho v_s)}{\partial x_s} &=& 0,  \\
   \partial_t E + \sum_{s=1}^d \frac{\partial((E+p)v_s)}{\partial x_s} &=&
   \rho \mathbf g \cdot \mathbf v.
-\end{eqnarray*} +\end{eqnarray*}" src="form_4275.png"/>

    -

    These equations describe, respectively, the conservation of momentum, mass, and energy. The system is closed by a relation that defines the pressure: $p =
-(\gamma -1)(E-\frac{1}{2} \rho |\mathbf v|^2)$. For the constituents of air (mainly nitrogen and oxygen) and other diatomic gases, the ratio of specific heats is $\gamma=1.4$.

    +

    These equations describe, respectively, the conservation of momentum, mass, and energy. The system is closed by a relation that defines the pressure: $p =
+(\gamma -1)(E-\frac{1}{2} \rho |\mathbf v|^2)$. For the constituents of air (mainly nitrogen and oxygen) and other diatomic gases, the ratio of specific heats is $\gamma=1.4$.

    This problem obviously falls into the class of vector-valued problems. A general overview of how to deal with these problems in deal.II can be found in the Handling vector valued problems module.

    Discretization

    -

    Discretization happens in the usual way, taking into account that this is a hyperbolic problem in the same style as the simple one discussed in step-12: We choose a finite element space $V_h$, and integrate our conservation law against our (vector-valued) test function $\mathbf{z} \in V_h$. We then integrate by parts and approximate the boundary flux with a numerical flux $\mathbf{H}$,

    -\begin{eqnarray*}
+<p>Discretization happens in the usual way, taking into account that this is a hyperbolic problem in the same style as the simple one discussed in <a class=step-12: We choose a finite element space $V_h$, and integrate our conservation law against our (vector-valued) test function $\mathbf{z} \in V_h$. We then integrate by parts and approximate the boundary flux with a numerical flux $\mathbf{H}$,

    +\begin{eqnarray*}
 &&\int_{\Omega} (\partial_t \mathbf{w}, \mathbf{z}) + (\nabla \cdot \mathbf{F}(\mathbf{w}), \mathbf{z}) \\
 &\approx &\int_{\Omega} (\partial_t \mathbf{w}, \mathbf{z}) - (\mathbf{F}(\mathbf{w}), \nabla \mathbf{z}) + h^{\eta}(\nabla \mathbf{w} , \nabla \mathbf{z}) + \int_{\partial \Omega} (\mathbf{H}(\mathbf{w}^+, \mathbf{w}^-, \mathbf{n}), \mathbf{z}^+),
-\end{eqnarray*} +\end{eqnarray*}" src="form_4280.png"/>

    -

    where a superscript $+$ denotes the interior trace of a function, and $-$ represents the outer trace. The diffusion term $h^{\eta}(\nabla \mathbf{w} , \nabla \mathbf{z})$ is introduced strictly for stability, where $h$ is the mesh size and $\eta$ is a parameter prescribing how much diffusion to add.

    -

    On the boundary, we have to say what the outer trace $\mathbf{w}^-$ is. Depending on the boundary condition, we prescribe either of the following:

    The output are the derivatives $\frac{\partial c(a,b)}{\partial a},
+\frac{\partial c(a,b)}{\partial b}$ of $c(a,b)=2a+\cos(ab)$ at $a=1,b=2$.

    It should be noted that Sacado provides more auto-differentiation capabilities than the small subset used in this program. However, understanding the example above is enough to understand the use of Sacado in this Euler flow program.

    Trilinos solvers

    The program uses either the Aztec iterative solvers, or the Amesos sparse direct solver, both provided by the Trilinos package. This package is inherently designed to be used in a parallel program, however, it may be used in serial just as easily, as is done here. The Epetra package is the basic vector/matrix library upon which the solvers are built. This very powerful package can be used to describe the parallel distribution of a vector, and to define sparse matrices that operate on these vectors. Please view the commented code for more details on how these solvers are used within the example.

    @@ -325,8 +325,8 @@

    Implementation

    The implementation of this program is split into three essential parts:

    Transformations between variables

    -

    Next, we define the gas constant. We will set it to 1.4 in its definition immediately following the declaration of this class (unlike integer variables, like the ones above, static const floating point member variables cannot be initialized within the class declaration in C++). This value of 1.4 is representative of a gas that consists of molecules composed of two atoms, such as air which consists up to small traces almost entirely of $N_2$ and $O_2$.

    +

    Next, we define the gas constant. We will set it to 1.4 in its definition immediately following the declaration of this class (unlike integer variables, like the ones above, static const floating point member variables cannot be initialized within the class declaration in C++). This value of 1.4 is representative of a gas that consists of molecules composed of two atoms, such as air which consists up to small traces almost entirely of $N_2$ and $O_2$.

      static const double gas_gamma;
     
     
    -

    In the following, we will need to compute the kinetic energy and the pressure from a vector of conserved variables. This we can do based on the energy density and the kinetic energy $\frac 12 \rho |\mathbf v|^2
-   = \frac{|\rho \mathbf v|^2}{2\rho}$ (note that the independent variables contain the momentum components $\rho v_i$, not the velocities $v_i$).

    +

    In the following, we will need to compute the kinetic energy and the pressure from a vector of conserved variables. This we can do based on the energy density and the kinetic energy $\frac 12 \rho |\mathbf v|^2
+   = \frac{|\rho \mathbf v|^2}{2\rho}$ (note that the independent variables contain the momentum components $\rho v_i$, not the velocities $v_i$).

    /usr/share/doc/packages/dealii/doxygen/deal.II/step_34.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_34.html 2024-01-30 03:04:52.440881316 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_34.html 2024-01-30 03:04:52.440881316 +0000 @@ -139,7 +139,7 @@

    Irrotational flow

    The incompressible motion of an inviscid fluid past a body (for example air past an airplane wing, or air or water past a propeller) is usually modeled by the Euler equations of fluid dynamics:

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   \frac{\partial }{\partial t}\mathbf{v} + (\mathbf{v}\cdot\nabla)\mathbf{v}
   &=
   -\frac{1}{\rho}\nabla p + \mathbf{g}
@@ -147,12 +147,12 @@
   \\
   \nabla \cdot \mathbf{v}&=0
   &\text{in } \mathbb{R}^n\backslash\Omega
-\end{align*} +\end{align*}" src="form_4379.png"/>

    -

    where the fluid density $\rho$ and the acceleration $\mathbf{g}$ due to external forces are given and the velocity $\mathbf{v}$ and the pressure $p$ are the unknowns. Here $\Omega$ is a closed bounded region representing the body around which the fluid moves.

    +

    where the fluid density $\rho$ and the acceleration $\mathbf{g}$ due to external forces are given and the velocity $\mathbf{v}$ and the pressure $p$ are the unknowns. Here $\Omega$ is a closed bounded region representing the body around which the fluid moves.

    The above equations can be derived from Navier-Stokes equations assuming that the effects due to viscosity are negligible compared to those due to the pressure gradient, inertial forces and the external forces. This is the opposite case of the Stokes equations discussed in step-22 which are the limit case of dominant viscosity, i.e. where the velocity is so small that inertia forces can be neglected. On the other hand, owing to the assumed incompressibility, the equations are not suited for very high speed gas flows where compressibility and the equation of state of the gas have to be taken into account, leading to the Euler equations of gas dynamics, a hyperbolic system.

    For the purpose of this tutorial program, we will consider only stationary flow without external forces:

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   (\mathbf{v}\cdot\nabla)\mathbf{v}
   &=
   -\frac{1}{\rho}\nabla p
@@ -160,159 +160,159 @@
   \\
   \nabla \cdot \mathbf{v}&=0
   &\text{in } \mathbb{R}^n\backslash\Omega
-\end{align*} +\end{align*}" src="form_4380.png"/>

    Uniqueness of the solution of the Euler equations is ensured by adding the boundary conditions

    -\[
+<picture><source srcset=\[
   \label{eq:boundary-conditions}
   \begin{aligned}
     \mathbf{n}\cdot\mathbf{v}& = 0 \qquad && \text{ on } \partial\Omega \\
     \mathbf{v}& = \mathbf{v}_\infty && \text{ when } |\mathbf{x}| \to \infty,
   \end{aligned}
-\] +\]" src="form_4381.png"/>

    -

    which is to say that the body is at rest in our coordinate systems and is not permeable, and that the fluid has (constant) velocity $\mathbf{v}_\infty$ at infinity. An alternative viewpoint is that our coordinate system moves along with the body whereas the background fluid is at rest at infinity. Notice that we define the normal $\mathbf{n}$ as the outer normal to the domain $\Omega$, which is the opposite of the outer normal to the integration domain.

    +

    which is to say that the body is at rest in our coordinate systems and is not permeable, and that the fluid has (constant) velocity $\mathbf{v}_\infty$ at infinity. An alternative viewpoint is that our coordinate system moves along with the body whereas the background fluid is at rest at infinity. Notice that we define the normal $\mathbf{n}$ as the outer normal to the domain $\Omega$, which is the opposite of the outer normal to the integration domain.

    For both stationary and non stationary flow, the solution process starts by solving for the velocity in the second equation and substituting in the first equation in order to find the pressure. The solution of the stationary Euler equations is typically performed in order to understand the behavior of the given (possibly complex) geometry when a prescribed motion is enforced on the system.

    -

    The first step in this process is to change the frame of reference from a coordinate system moving along with the body to one in which the body moves through a fluid that is at rest at infinity. This can be expressed by introducing a new velocity $\mathbf{\tilde{v}}=\mathbf{v}-\mathbf{v}_\infty$ for which we find that the same equations hold (because $\nabla\cdot
-\mathbf{v}_\infty=0$) and we have boundary conditions

    -\[
+<p>The first step in this process is to change the frame of reference from a coordinate system moving along with the body to one in which the body moves through a fluid that is at rest at infinity. This can be expressed by introducing a new velocity <picture><source srcset=$\mathbf{\tilde{v}}=\mathbf{v}-\mathbf{v}_\infty$ for which we find that the same equations hold (because $\nabla\cdot
+\mathbf{v}_\infty=0$) and we have boundary conditions

    +\[
   \label{eq:boundary-conditions-tilde}
   \begin{aligned}
     \mathbf{n}\cdot\mathbf{\tilde{v}}& = -\mathbf{n}\cdot\mathbf{v}_\infty \qquad && \text{ on } \partial\Omega \\
     \mathbf{\tilde{v}}& = 0 && \text{ when } |\mathbf{x}| \to \infty,
   \end{aligned}
-\] +\]" src="form_4385.png"/>

    -

    If we assume that the fluid is irrotational, i.e., $\nabla \times
-\mathbf{v}=0$ in $\mathbb{R}^n\backslash\Omega$, we can represent the velocity, and consequently also the perturbation velocity, as the gradient of a scalar function:

    -\[
+<p>If we assume that the fluid is irrotational, i.e., <picture><source srcset=$\nabla \times
+\mathbf{v}=0$ in $\mathbb{R}^n\backslash\Omega$, we can represent the velocity, and consequently also the perturbation velocity, as the gradient of a scalar function:

    +\[
   \mathbf{\tilde{v}}=\nabla\phi,
-\] +\]" src="form_4388.png"/>

    -

    and so the second part of Euler equations above can be rewritten as the homogeneous Laplace equation for the unknown $\phi$:

    -\begin{align*}
+<p> and so the second part of Euler equations above can be rewritten as the homogeneous Laplace equation for the unknown <picture><source srcset=$\phi$:

    +\begin{align*}
 \label{laplace}
 \Delta\phi &= 0 \qquad &&\text{in}\ \mathbb{R}^n\backslash\Omega,
            \\
            \mathbf{n}\cdot\nabla\phi &= -\mathbf{n}\cdot\mathbf{v}_\infty
            && \text{on}\ \partial\Omega
-\end{align*} +\end{align*}" src="form_4389.png"/>

    -

    while the momentum equation reduces to Bernoulli's equation that expresses the pressure $p$ as a function of the potential $\phi$:

    -\[
+<p> while the momentum equation reduces to Bernoulli's equation that expresses the pressure <picture><source srcset=$p$ as a function of the potential $\phi$:

    +\[
 \frac{p}{\rho} +\frac{1}{2} | \nabla \phi |^2 = 0 \in \Omega.
-\] +\]" src="form_4390.png"/>

    So we can solve the problem by solving the Laplace equation for the potential. We recall that the following functions, called fundamental solutions of the Laplace equation,

    -\[ \begin{aligned}
+<picture><source srcset=\[ \begin{aligned}
 \label{eq:3} G(\mathbf{y}-\mathbf{x}) = &
 -\frac{1}{2\pi}\ln|\mathbf{y}-\mathbf{x}| \qquad && \text{for } n=2 \\
 G(\mathbf{y}-\mathbf{x}) = &
 \frac{1}{4\pi}\frac{1}{|\mathbf{y}-\mathbf{x}|}&& \text{for } n=3,
 \end{aligned}
-\] +\]" src="form_4391.png"/>

    satisfy in a distributional sense the equation:

    -\[
+<picture><source srcset=\[
 -\Delta_y G(\mathbf{y}-\mathbf{x}) = \delta(\mathbf{y}-\mathbf{x}),
-\] +\]" src="form_4392.png"/>

    -

    where the derivative is done in the variable $\mathbf{y}$. By using the usual Green identities, our problem can be written on the boundary $\partial\Omega = \Gamma$ only. We recall the general definition of the second Green identity:

    +

    where the derivative is done in the variable $\mathbf{y}$. By using the usual Green identities, our problem can be written on the boundary $\partial\Omega = \Gamma$ only. We recall the general definition of the second Green identity:

    -\[\label{green}
+<picture><source srcset=\[\label{green}
   \int_{\omega}
   (-\Delta u)v\,dx + \int_{\partial\omega} \frac{\partial u}{\partial \tilde{\mathbf{n}} }v \,ds
   =
   \int_{\omega}
   (-\Delta v)u\,dx + \int_{\partial\omega} u\frac{\partial v}{\partial \tilde{\mathbf{n}}} \,ds,
-\] +\]" src="form_4395.png"/>

    -

    where $\tilde{\mathbf{n}}$ is the normal to the surface of $\omega$ pointing outwards from the domain of integration $\omega$.

    -

    In our case the domain of integration is the domain $\mathbb{R}^n\backslash\Omega$, whose boundary is $ \Gamma_\infty \cup
-\Gamma$, where the "boundary" at infinity is defined as

    +

    where $\tilde{\mathbf{n}}$ is the normal to the surface of $\omega$ pointing outwards from the domain of integration $\omega$.

    +

    In our case the domain of integration is the domain $\mathbb{R}^n\backslash\Omega$, whose boundary is $ \Gamma_\infty \cup
+\Gamma$, where the "boundary" at infinity is defined as

    -\[
+<picture><source srcset=\[
 \Gamma_\infty \dealcoloneq \lim_{r\to\infty} \partial B_r(0).
-\] +\]" src="form_4398.png"/>

    -

    In our program the normals are defined as outer to the domain $\Omega$, that is, they are in fact inner to the integration domain, and some care is required in defining the various integrals with the correct signs for the normals, i.e. replacing $\tilde{\mathbf{n}}$ by $-\mathbf{n}$.

    -

    If we substitute $u$ and $v$ in the Green identity with the solution $\phi$ and with the fundamental solution of the Laplace equation respectively, as long as $\mathbf{x}$ is chosen in the region $\mathbb{R}^n\backslash\Omega$, we obtain:

    -\[
+<p>In our program the normals are defined as <em>outer</em> to the domain <picture><source srcset=$\Omega$, that is, they are in fact inner to the integration domain, and some care is required in defining the various integrals with the correct signs for the normals, i.e. replacing $\tilde{\mathbf{n}}$ by $-\mathbf{n}$.

    +

    If we substitute $u$ and $v$ in the Green identity with the solution $\phi$ and with the fundamental solution of the Laplace equation respectively, as long as $\mathbf{x}$ is chosen in the region $\mathbb{R}^n\backslash\Omega$, we obtain:

    +\[
   \phi(\mathbf{x}) -
   \int_{\Gamma\cup\Gamma_\infty}\frac{\partial G(\mathbf{y}-\mathbf{x})}{\partial \mathbf{n}_y}\phi(\mathbf{y})\,ds_y
   =
   -\int_{\Gamma\cup\Gamma_\infty}G(\mathbf{y}-\mathbf{x})\frac{\partial \phi}{\partial \mathbf{n}_y}(\mathbf{y})\,ds_y
   \qquad \forall\mathbf{x}\in \mathbb{R}^n\backslash\Omega
-\] +\]" src="form_4400.png"/>

    where the normals are now pointing inward the domain of integration.

    -

    Notice that in the above equation, we also have the integrals on the portion of the boundary at $\Gamma_\infty$. Using the boundary conditions of our problem, we have that $\nabla \phi$ is zero at infinity (which simplifies the integral on $\Gamma_\infty$ on the right hand side).

    -

    The integral on $\Gamma_\infty$ that appears on the left hand side can be treated by observing that $\nabla\phi=0$ implies that $\phi$ at infinity is necessarily constant. We define its value to be $\phi_\infty$. It is an easy exercise to prove that

    +

    Notice that in the above equation, we also have the integrals on the portion of the boundary at $\Gamma_\infty$. Using the boundary conditions of our problem, we have that $\nabla \phi$ is zero at infinity (which simplifies the integral on $\Gamma_\infty$ on the right hand side).

    +

    The integral on $\Gamma_\infty$ that appears on the left hand side can be treated by observing that $\nabla\phi=0$ implies that $\phi$ at infinity is necessarily constant. We define its value to be $\phi_\infty$. It is an easy exercise to prove that

    -\[
+<picture><source srcset=\[
 -\int_{\Gamma_\infty} \frac{\partial G(\mathbf{y}-\mathbf{x})}
 {\partial \mathbf{n}_y}\phi_\infty \,ds_y =
 \lim_{r\to\infty} \int_{\partial B_r(0)} \frac{\mathbf{r}}{r} \cdot \nabla G(\mathbf{y}-\mathbf{x})
 \phi_\infty \,ds_y = -\phi_\infty.
-\] +\]" src="form_4405.png"/>

    Using this result, we can reduce the above equation only on the boundary $\Gamma$ using the so-called Single and Double Layer Potential operators:

    -\[\label{integral}
+<picture><source srcset=\[\label{integral}
   \phi(\mathbf{x}) - (D\phi)(\mathbf{x}) = \phi_\infty
   -\left(S \frac{\partial \phi}{\partial n_y}\right)(\mathbf{x})
   \qquad \forall\mathbf{x}\in \mathbb{R}^n\backslash\Omega.
-\] +\]" src="form_4406.png"/>

    -

    (The name of these operators comes from the fact that they describe the electric potential in $\mathbb{R}^n$ due to a single thin sheet of charges along a surface, and due to a double sheet of charges and anti-charges along the surface, respectively.)

    -

    In our case, we know the Neumann values of $\phi$ on the boundary: $\mathbf{n}\cdot\nabla\phi = -\mathbf{n}\cdot\mathbf{v}_\infty$. Consequently,

    -\[
+<p>(The name of these operators comes from the fact that they describe the electric potential in <picture><source srcset=$\mathbb{R}^n$ due to a single thin sheet of charges along a surface, and due to a double sheet of charges and anti-charges along the surface, respectively.)

    +

    In our case, we know the Neumann values of $\phi$ on the boundary: $\mathbf{n}\cdot\nabla\phi = -\mathbf{n}\cdot\mathbf{v}_\infty$. Consequently,

    +\[
   \phi(\mathbf{x}) - (D\phi)(\mathbf{x}) = \phi_\infty +
    \left(S[\mathbf{n}\cdot\mathbf{v}_\infty]\right)(\mathbf{x})
    \qquad \forall\mathbf{x} \in \mathbb{R}^n\backslash\Omega.
-\] +\]" src="form_4408.png"/>

    -

    If we take the limit for $\mathbf{x}$ tending to $\Gamma$ of the above equation, using well known properties of the single and double layer operators, we obtain an equation for $\phi$ just on the boundary $\Gamma$ of $\Omega$:

    +

    If we take the limit for $\mathbf{x}$ tending to $\Gamma$ of the above equation, using well known properties of the single and double layer operators, we obtain an equation for $\phi$ just on the boundary $\Gamma$ of $\Omega$:

    -\[\label{SD}
+<picture><source srcset=\[\label{SD}
   \alpha(\mathbf{x})\phi(\mathbf{x}) - (D\phi)(\mathbf{x}) = \phi_\infty +
   \left(S [\mathbf{n}\cdot\mathbf{v}_\infty]\right)(\mathbf{x})
   \quad \mathbf{x}\in \partial\Omega,
-\] +\]" src="form_4409.png"/>

    /usr/share/doc/packages/dealii/doxygen/deal.II/step_35.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_35.html 2024-01-30 03:04:52.520881982 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_35.html 2024-01-30 03:04:52.520881982 +0000 @@ -139,57 +139,57 @@

    Introduction

    Motivation

    The purpose of this program is to show how to effectively solve the incompressible time-dependent Navier-Stokes equations. These equations describe the flow of a viscous incompressible fluid and read

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   u_t + u \cdot \nabla u - \nu \Delta u + \nabla p = f, \\
   \nabla \cdot u = 0,
-\end{align*} +\end{align*}" src="form_4489.png"/>

    -

    where $u$ represents the velocity of the flow and $p$ the pressure. This system of equations is supplemented by the initial condition

    -\[
+<p> where <picture><source srcset=$u$ represents the velocity of the flow and $p$ the pressure. This system of equations is supplemented by the initial condition

    +\[
   u |_{t=0} = u_0,
-\] +\]" src="form_4490.png"/>

    -

    with $u_0$ sufficiently smooth and solenoidal, and suitable boundary conditions. For instance, an admissible boundary condition, is

    -\[
+<p> with <picture><source srcset=$u_0$ sufficiently smooth and solenoidal, and suitable boundary conditions. For instance, an admissible boundary condition, is

    +\[
   u|_{\partial\Omega} = u_b.
-\] +\]" src="form_4491.png"/>

    -

    It is possible to prescribe other boundary conditions as well. In the test case that we solve here the boundary is partitioned into two disjoint subsets $\partial\Omega = \Gamma_1 \cup \Gamma_2$ and we have

    -\[
+<p> It is possible to prescribe other boundary conditions as well. In the test case that we solve here the boundary is partitioned into two disjoint subsets <picture><source srcset=$\partial\Omega = \Gamma_1 \cup \Gamma_2$ and we have

    +\[
   u|_{\Gamma_1} = u_b,
-\] +\]" src="form_4493.png"/>

    and

    -\[
+<picture><source srcset=\[
  u\times n|_{\Gamma_2} = 0, \quad p|_{\Gamma_2} = 0
-\] +\]" src="form_4494.png"/>

    -

    where $n$ is the outer unit normal. The boundary conditions on $\Gamma_2$ are often used to model outflow conditions.

    +

    where $n$ is the outer unit normal. The boundary conditions on $\Gamma_2$ are often used to model outflow conditions.

    In previous tutorial programs (see for instance step-20 and step-22) we have seen how to solve the time-independent Stokes equations using a Schur complement approach. For the time-dependent case, after time discretization, we would arrive at a system like

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   \frac1\tau u^k - \nu \Delta u^k + \nabla p^k = F^k, \\
   \nabla \cdot u^k = 0,
-\end{align*} +\end{align*}" src="form_4495.png"/>

    -

    where $\tau$ is the time-step. Although the structure of this system is similar to the Stokes system and thus it could be solved using a Schur complement approach, it turns out that the condition number of the Schur complement is proportional to $\tau^{-2}$. This makes the system very difficult to solve, and means that for the Navier-Stokes equations, this is not a useful avenue to the solution.

    +

    where $\tau$ is the time-step. Although the structure of this system is similar to the Stokes system and thus it could be solved using a Schur complement approach, it turns out that the condition number of the Schur complement is proportional to $\tau^{-2}$. This makes the system very difficult to solve, and means that for the Navier-Stokes equations, this is not a useful avenue to the solution.

    Projection methods

    Rather, we need to come up with a different approach to solve the time-dependent Navier-Stokes equations. The difficulty in their solution comes from the fact that the velocity and the pressure are coupled through the constraint

    -\[
+<picture><source srcset=\[
   \nabla \cdot u = 0,
-\] +\]" src="form_4497.png"/>

    for which the pressure is the Lagrange multiplier. Projection methods aim at decoupling this constraint from the diffusion (Laplace) operator.

    -

    Let us shortly describe how the projection methods look like in a semi-discrete setting. The objective is to obtain a sequence of velocities $\{u^k\}$ and pressures $\{p^k\}$. We will also obtain a sequence $\{\phi^k\}$ of auxiliary variables. Suppose that from the initial conditions, and an application of a first order method we have found $(u^0,p^0,\phi^0=0)$ and $(u^1,p^1,\phi^1=p^1-p^0)$. Then the projection method consists of the following steps:

    The equation to solve here is as follows:

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   -\nabla \cdot a(\mathbf x) \nabla u(\mathbf x) &= 1 \qquad\qquad & \text{in}\ \Omega,
   \\
   u &= 0 \qquad\qquad & \text{on}\ \partial\Omega.
-\end{align*} +\end{align*}" src="form_5497.png"/>

    -

    If $a(\mathbf x)$ was a constant coefficient, this would simply be the Poisson equation. However, if it is indeed spatially variable, it is a more complex equation (often referred to as the "extended Poisson equation"). Depending on what the variable $u$ refers to it models a variety of situations with wide applicability:

    +

    If $a(\mathbf x)$ was a constant coefficient, this would simply be the Poisson equation. However, if it is indeed spatially variable, it is a more complex equation (often referred to as the "extended Poisson equation"). Depending on what the variable $u$ refers to it models a variety of situations with wide applicability:

    Since the Laplace/Poisson equation appears in so many contexts, there are many more interpretations than just the two listed above.

    When assembling the linear system for this equation, we need the weak form which here reads as follows:

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   (a \nabla \varphi, \nabla u) &= (\varphi, 1) \qquad \qquad \forall \varphi.
-\end{align*} +\end{align*}" src="form_5499.png"/>

    The implementation in the assemble_system function follows immediately from this.

    The commented program

    /usr/share/doc/packages/dealii/doxygen/deal.II/step_50.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_50.html 2024-01-30 03:04:53.712891913 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_50.html 2024-01-30 03:04:53.712891913 +0000 @@ -154,14 +154,14 @@ (\epsilon \nabla u, \nabla v) = (f,v) \quad \forall v \in V_h \end{align*}" src="form_5500.png"/>

    -

    on the domain $\Omega = [-1,1]^\text{dim} \setminus [0,1]^\text{dim}$ (an L-shaped domain for 2D and a Fichera corner for 3D) with $\epsilon = 1$ if $\min(x,y,z)>-\frac{1}{2}$ and $\epsilon = 100$ otherwise. In other words, $\epsilon$ is small along the edges or faces of the domain that run into the reentrant corner, as will be visible in the figure below.

    -

    The boundary conditions are $u=0$ on the whole boundary and the right-hand side is $f=1$. We use continuous $Q_2$ elements for the discrete finite element space $V_h$, and use a residual-based, cell-wise a posteriori error estimator $e(K) = e_{\text{cell}}(K) + e_{\text{face}}(K)$ from [karakashian2003posteriori] with

    +

    on the domain $\Omega = [-1,1]^\text{dim} \setminus [0,1]^\text{dim}$ (an L-shaped domain for 2D and a Fichera corner for 3D) with $\epsilon = 1$ if $\min(x,y,z)>-\frac{1}{2}$ and $\epsilon = 100$ otherwise. In other words, $\epsilon$ is small along the edges or faces of the domain that run into the reentrant corner, as will be visible in the figure below.

    +

    The boundary conditions are $u=0$ on the whole boundary and the right-hand side is $f=1$. We use continuous $Q_2$ elements for the discrete finite element space $V_h$, and use a residual-based, cell-wise a posteriori error estimator $e(K) = e_{\text{cell}}(K) + e_{\text{face}}(K)$ from [karakashian2003posteriori] with

    \begin{align*}
  e_{\text{cell}}(K) &= h^2 \| f + \epsilon \triangle u \|_K^2, \\
  e_{\text{face}}(K) &= \sum_F h_F \| \jump{ \epsilon \nabla u \cdot n } \|_F^2,
 \end{align*}

    -

    to adaptively refine the mesh. (This is a generalization of the Kelly error estimator used in the KellyErrorEstimator class that drives mesh refinement in most of the other tutorial programs.) The following figure visualizes the solution and refinement for 2D: In 3D, the solution looks similar (see below). On the left you can see the solution and on the right we show a slice for $x$ close to the center of the domain showing the adaptively refined mesh.

    +

    to adaptively refine the mesh. (This is a generalization of the Kelly error estimator used in the KellyErrorEstimator class that drives mesh refinement in most of the other tutorial programs.) The following figure visualizes the solution and refinement for 2D: In 3D, the solution looks similar (see below). On the left you can see the solution and on the right we show a slice for $x$ close to the center of the domain showing the adaptively refined mesh.

    @@ -171,7 +171,7 @@

    For the active mesh, we use the parallel::distributed::Triangulation class as done in step-40 which uses functionality in the external library p4est for the distribution of the active cells among processors. For the non-active cells in the multilevel hierarchy, deal.II implements what we will refer to as the "first-child rule" where, for each cell in the hierarchy, we recursively assign the parent of a cell to the owner of the first child cell. The following figures give an example of such a distribution. Here the left image represents the active cells for a sample 2D mesh partitioned using a space-filling curve (which is what p4est uses to partition cells); the center image gives the tree representation of the active mesh; and the right image gives the multilevel hierarchy of cells. The colors and numbers represent the different processors. The circular nodes in the tree are the non-active cells which are distributed using the "first-child rule".

    Included among the output to screen in this example is a value "Partition efficiency" given by one over MGTools::workload_imbalance(). This value, which will be denoted by $\mathbb{E}$, quantifies the overhead produced by not having a perfect work balance on each level of the multigrid hierarchy. This imbalance is evident from the example above: while level $\ell=2$ is about as well balanced as is possible with four cells among three processors, the coarse level $\ell=0$ has work for only one processor, and level $\ell=1$ has work for only two processors of which one has three times as much work as the other.

    -

    For defining $\mathbb{E}$, it is important to note that, as we are using local smoothing to define the multigrid hierarchy (see the multigrid paper for a description of local smoothing), the refinement level of a cell corresponds to that cell's multigrid level. Now, let $N_{\ell}$ be the number of cells on level $\ell$ (both active and non-active cells) and $N_{\ell,p}$ be the subset owned by process $p$. We will also denote by $P$ the total number of processors. Assuming that the workload for any one processor is proportional to the number of cells owned by that processor, the optimal workload per processor is given by

    +

    For defining $\mathbb{E}$, it is important to note that, as we are using local smoothing to define the multigrid hierarchy (see the multigrid paper for a description of local smoothing), the refinement level of a cell corresponds to that cell's multigrid level. Now, let $N_{\ell}$ be the number of cells on level $\ell$ (both active and non-active cells) and $N_{\ell,p}$ be the subset owned by process $p$. We will also denote by $P$ the total number of processors. Assuming that the workload for any one processor is proportional to the number of cells owned by that processor, the optimal workload per processor is given by

    \begin{align*}
 W_{\text{opt}} = \frac1{P}\sum_{\ell} N_{\ell} = \sum_{\ell}\left(\frac1{P}\sum_{p}N_{\ell,p}\right).
 \end{align*} @@ -204,7 +204,7 @@

    These sorts of considerations are considered in much greater detail in [clevenger_par_gmg], which contains a full discussion of the partition efficiency model and the effect the imbalance has on the GMG V-cycle timing. In summary, the value of $\mathbb{E}$ is highly dependent on the degree of local mesh refinement used and has an optimal value $\mathbb{E} \approx 1$ for globally refined meshes. Typically for adaptively refined meshes, the number of processors used to distribute a single mesh has a negative impact on $\mathbb{E}$ but only up to a leveling off point, where the imbalance remains relatively constant for an increasing number of processors, and further refinement has very little impact on $\mathbb{E}$. Finally, $1/\mathbb{E}$ was shown to give an accurate representation of the slowdown in parallel scaling expected for the timing of a V-cycle.

    It should be noted that there is potential for some asynchronous work between multigrid levels, specifically with purely nearest neighbor MPI communication, and an adaptive mesh could be constructed such that the efficiency model would far overestimate the V-cycle slowdown due to the asynchronous work "covering up" the imbalance (which assumes synchronization over levels). However, for most realistic adaptive meshes the expectation is that this asynchronous work will only cover up a very small portion of the imbalance and the efficiency model will describe the slowdown very well.

    Workload imbalance for algebraic multigrid methods

    -

    The considerations above show that one has to expect certain limits on the scalability of the geometric multigrid algorithm as it is implemented in deal.II because even in cases where the finest levels of a mesh are perfectly load balanced, the coarser levels may not be. At the same time, the coarser levels are weighted less (the contributions of $W_\ell$ to $W$ are small) because coarser levels have fewer cells and, consequently, do not contribute to the overall run time as much as finer levels. In other words, imbalances in the coarser levels may not lead to large effects in the big picture.

    +

    The considerations above show that one has to expect certain limits on the scalability of the geometric multigrid algorithm as it is implemented in deal.II because even in cases where the finest levels of a mesh are perfectly load balanced, the coarser levels may not be. At the same time, the coarser levels are weighted less (the contributions of $W_\ell$ to $W$ are small) because coarser levels have fewer cells and, consequently, do not contribute to the overall run time as much as finer levels. In other words, imbalances in the coarser levels may not lead to large effects in the big picture.

    Algebraic multigrid methods are of course based on an entirely different approach to creating a hierarchy of levels. In particular, they create these purely based on analyzing the system matrix, and very sophisticated algorithms for ensuring that the problem is well load-balanced on every level are implemented in both the hypre and ML/MueLu packages that underly the TrilinosWrappers::PreconditionAMG and PETScWrappers::PreconditionBoomerAMG classes. In some sense, these algorithms are simpler than for geometric multigrid methods because they only deal with the matrix itself, rather than all of the connotations of meshes, neighbors, parents, and other geometric entities. At the same time, much work has also been put into making algebraic multigrid methods scale to very large problems, including questions such as reducing the number of processors that work on a given level of the hierarchy to a subset of all processors, if otherwise processors would spend less time on computations than on communication. (One might note that it is of course possible to implement these same kinds of ideas also in geometric multigrid algorithms where one purposefully idles some processors on coarser levels to reduce the amount of communication. deal.II just doesn't do this at this time.)

    These are not considerations we typically have to worry about here, however: For most purposes, we use algebraic multigrid methods as black-box methods.

    Running the program

    @@ -1079,9 +1079,9 @@

    The result is a function that is similar to the one found in the "Use FEEvaluation::read_dof_values_plain() to avoid resolving constraints" subsection in the "Possibilities for extensions" section of step-37.

    -

    The reason for this function is that the MatrixFree operators do not take into account non-homogeneous Dirichlet constraints, instead treating all Dirichlet constraints as homogeneous. To account for this, the right-hand side here is assembled as the residual $r_0 = f-Au_0$, where $u_0$ is a zero vector except in the Dirichlet values. Then when solving, we have that the solution is $u = u_0 + A^{-1}r_0$. This can be seen as a Newton iteration on a linear system with initial guess $u_0$. The CG solve in the solve() function below computes $A^{-1}r_0$ and the call to constraints.distribute() (which directly follows) adds the $u_0$.

    +

    The reason for this function is that the MatrixFree operators do not take into account non-homogeneous Dirichlet constraints, instead treating all Dirichlet constraints as homogeneous. To account for this, the right-hand side here is assembled as the residual $r_0 = f-Au_0$, where $u_0$ is a zero vector except in the Dirichlet values. Then when solving, we have that the solution is $u = u_0 + A^{-1}r_0$. This can be seen as a Newton iteration on a linear system with initial guess $u_0$. The CG solve in the solve() function below computes $A^{-1}r_0$ and the call to constraints.distribute() (which directly follows) adds the $u_0$.

    Obviously, since we are considering a problem with zero Dirichlet boundary, we could have taken a similar approach to step-37 assemble_rhs(), but this additional work allows us to change the problem declaration if we so choose.

    -

    This function has two parts in the integration loop: applying the negative of matrix $A$ to $u_0$ by submitting the negative of the gradient, and adding the right-hand side contribution by submitting the value $f$. We must be sure to use read_dof_values_plain() for evaluating $u_0$ as read_dof_values() would set all Dirichlet values to zero.

    +

    This function has two parts in the integration loop: applying the negative of matrix $A$ to $u_0$ by submitting the negative of the gradient, and adding the right-hand side contribution by submitting the value $f$. We must be sure to use read_dof_values_plain() for evaluating $u_0$ as read_dof_values() would set all Dirichlet values to zero.

    Finally, the system_rhs vector is of type LA::MPI::Vector, but the MatrixFree class only work for LinearAlgebra::distributed::Vector. Therefore we must compute the right-hand side using MatrixFree functionality and then use the functions in the ChangeVectorType namespace to copy it to the correct type.

      template <int dim, int degree>
      void LaplaceProblem<dim, degree>::assemble_rhs()
    @@ -1860,7 +1860,7 @@

    Here, the timing of the solve() function is split up in 3 parts: setting up the multigrid preconditioner, execution of a single multigrid V-cycle, and the CG solver. The V-cycle that is timed is unnecessary for the overall solve and only meant to give an insight at the different costs for AMG and GMG. Also it should be noted that when using the AMG solver, "Workload imbalance" is not included in the output since the hierarchy of coarse meshes is not required.

    All results in this section are gathered on Intel Xeon Platinum 8280 (Cascade Lake) nodes which have 56 cores and 192GB per node and support AVX-512 instructions, allowing for vectorization over 8 doubles (vectorization used only in the matrix-free computations). The code is compiled using gcc 7.1.0 with intel-mpi 17.0.3. Trilinos 12.10.1 is used for the matrix-based GMG/AMG computations.

    We can then gather a variety of information by calling the program with the input files that are provided in the directory in which step-50 is located. Using these, and adjusting the number of mesh refinement steps, we can produce information about how well the program scales.

    -

    The following table gives weak scaling timings for this program on up to 256M DoFs and 7,168 processors. (Recall that weak scaling keeps the number of degrees of freedom per processor constant while increasing the number of processors; i.e., it considers larger and larger problems.) Here, $\mathbb{E}$ is the partition efficiency from the introduction (also equal to 1.0/workload imbalance), "Setup" is a combination of setup, setup multigrid, assemble, and assemble multigrid from the timing blocks, and "Prec" is the preconditioner setup. Ideally all times would stay constant over each problem size for the individual solvers, but since the partition efficiency decreases from 0.371 to 0.161 from largest to smallest problem size, we expect to see an approximately $0.371/0.161=2.3$ times increase in timings for GMG. This is, in fact, pretty close to what we really get:

    +

    The following table gives weak scaling timings for this program on up to 256M DoFs and 7,168 processors. (Recall that weak scaling keeps the number of degrees of freedom per processor constant while increasing the number of processors; i.e., it considers larger and larger problems.) Here, $\mathbb{E}$ is the partition efficiency from the introduction (also equal to 1.0/workload imbalance), "Setup" is a combination of setup, setup multigrid, assemble, and assemble multigrid from the timing blocks, and "Prec" is the preconditioner setup. Ideally all times would stay constant over each problem size for the individual solvers, but since the partition efficiency decreases from 0.371 to 0.161 from largest to smallest problem size, we expect to see an approximately $0.371/0.161=2.3$ times increase in timings for GMG. This is, in fact, pretty close to what we really get:

    @@ -1875,8 +1875,8 @@
    MF-GMG MB-GMG AMG
    7,168 19 256M 0.16 1.214 0.893 0.521 2.628 2.386 7.260 2.560 12.206 1.844 1.010 1.890 4.744
    -

    On the other hand, the algebraic multigrid in the last set of columns is relatively unaffected by the increasing imbalance of the mesh hierarchy (because it doesn't use the mesh hierarchy) and the growth in time is rather driven by other factors that are well documented in the literature (most notably that the algorithmic complexity of some parts of algebraic multigrid methods appears to be ${\cal O}(N
-\log N)$ instead of ${\cal O}(N)$ for geometric multigrid).

    +

    On the other hand, the algebraic multigrid in the last set of columns is relatively unaffected by the increasing imbalance of the mesh hierarchy (because it doesn't use the mesh hierarchy) and the growth in time is rather driven by other factors that are well documented in the literature (most notably that the algorithmic complexity of some parts of algebraic multigrid methods appears to be ${\cal O}(N
+\log N)$ instead of ${\cal O}(N)$ for geometric multigrid).

    The upshort of the table above is that the matrix-free geometric multigrid method appears to be the fastest approach to solving this equation if not by a huge margin. Matrix-based methods, on the other hand, are consistently the worst.

    The following figure provides strong scaling results for each method, i.e., we solve the same problem on more and more processors. Specifically, we consider the problems after 16 mesh refinement cycles (32M DoFs) and 19 cycles (256M DoFs), on between 56 to 28,672 processors:

    @@ -1886,7 +1886,7 @@

    The finite element degree is currently hard-coded as 2, see the template arguments of the main class. It is easy to change. To test, it would be interesting to switch to a test problem with a reference solution. This way, you can compare error rates.

    Coarse solver

    A more interesting example would involve a more complicated coarse mesh (see step-49 for inspiration). The issue in that case is that the coarsest level of the mesh hierarchy is actually quite large, and one would have to think about ways to solve the coarse level problem efficiently. (This is not an issue for algebraic multigrid methods because they would just continue to build coarser and coarser levels of the matrix, regardless of their geometric origin.)

    -

    In the program here, we simply solve the coarse level problem with a Conjugate Gradient method without any preconditioner. That is acceptable if the coarse problem is really small – for example, if the coarse mesh had a single cell, then the coarse mesh problems has a $9\times 9$ matrix in 2d, and a $27\times 27$ matrix in 3d; for the coarse mesh we use on the $L$-shaped domain of the current program, these sizes are $21\times 21$ in 2d and $117\times 117$ in 3d. But if the coarse mesh consists of hundreds or thousands of cells, this approach will no longer work and might start to dominate the overall run-time of each V-cycle. A common approach is then to solve the coarse mesh problem using an algebraic multigrid preconditioner; this would then, however, require assembling the coarse matrix (even for the matrix-free version) as input to the AMG implementation.

    +

    In the program here, we simply solve the coarse level problem with a Conjugate Gradient method without any preconditioner. That is acceptable if the coarse problem is really small – for example, if the coarse mesh had a single cell, then the coarse mesh problems has a $9\times 9$ matrix in 2d, and a $27\times 27$ matrix in 3d; for the coarse mesh we use on the $L$-shaped domain of the current program, these sizes are $21\times 21$ in 2d and $117\times 117$ in 3d. But if the coarse mesh consists of hundreds or thousands of cells, this approach will no longer work and might start to dominate the overall run-time of each V-cycle. A common approach is then to solve the coarse mesh problem using an algebraic multigrid preconditioner; this would then, however, require assembling the coarse matrix (even for the matrix-free version) as input to the AMG implementation.

    The plain program

    /* ---------------------------------------------------------------------
    *
    /usr/share/doc/packages/dealii/doxygen/deal.II/step_51.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_51.html 2024-01-30 03:04:53.804892680 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_51.html 2024-01-30 03:04:53.804892680 +0000 @@ -157,7 +157,7 @@

    Introduction

    This tutorial program presents the implementation of a hybridizable discontinuous Galkerin method for the convection-diffusion equation.

    Hybridizable discontinuous Galerkin methods

    -

    One common argument against the use of discontinuous Galerkin elements is the large number of globally coupled degrees of freedom that one must solve in an implicit system. This is because, unlike continuous finite elements, in typical discontinuous elements there is one degree of freedom at each vertex for each of the adjacent elements, rather than just one, and similarly for edges and faces. As an example of how fast the number of unknowns grows, consider the FE_DGPMonomial basis: each scalar solution component is represented by polynomials of degree $p$ with $(1/\text{dim}!) \prod_{i=1}^{\text{dim}}(p+i)$ degrees of freedom per element. Typically, all degrees of freedom in an element are coupled to all of the degrees of freedom in the adjacent elements. The resulting discrete equations yield very large linear systems very quickly, especially for systems of equations in 2 or 3 dimensions.

    +

    One common argument against the use of discontinuous Galerkin elements is the large number of globally coupled degrees of freedom that one must solve in an implicit system. This is because, unlike continuous finite elements, in typical discontinuous elements there is one degree of freedom at each vertex for each of the adjacent elements, rather than just one, and similarly for edges and faces. As an example of how fast the number of unknowns grows, consider the FE_DGPMonomial basis: each scalar solution component is represented by polynomials of degree $p$ with $(1/\text{dim}!) \prod_{i=1}^{\text{dim}}(p+i)$ degrees of freedom per element. Typically, all degrees of freedom in an element are coupled to all of the degrees of freedom in the adjacent elements. The resulting discrete equations yield very large linear systems very quickly, especially for systems of equations in 2 or 3 dimensions.

    Reducing the size of the linear system

    To alleviate the computational cost of solving such large linear systems, the hybridizable discontinuous Galerkin (HDG) methodology was introduced by Cockburn and co-workers (see the references in the recent HDG overview article by Nguyen and Peraire [Ngu2012]).

    The HDG method achieves this goal by formulating the mathematical problem using Dirichlet-to-Neumann mappings. The partial differential equations are first written as a first order system, and each field is then discretized via a DG method. At this point, the single-valued "trace" values on the skeleton of the mesh, i.e., element faces, are taken to be independent unknown quantities. This yields unknowns in the discrete formulation that fall into two categories:

    The Diffusion class

    -

    The next piece is the declaration of the main class. Most of the functions in this class are not new and have been explained in previous tutorials. The only interesting functions are evaluate_diffusion() and id_minus_tau_J_inverse(). evaluate_diffusion() evaluates the diffusion equation, $M^{-1}(f(t,y))$, at a given time and a given $y$. id_minus_tau_J_inverse() evaluates $\left(I-\tau
-   M^{-1} \frac{\partial f(t,y)}{\partial y}\right)^{-1}$ or equivalently $\left(M-\tau \frac{\partial f}{\partial y}\right)^{-1} M$ at a given time, for a given $\tau$ and $y$. This function is needed when an implicit method is used.

    +

    The next piece is the declaration of the main class. Most of the functions in this class are not new and have been explained in previous tutorials. The only interesting functions are evaluate_diffusion() and id_minus_tau_J_inverse(). evaluate_diffusion() evaluates the diffusion equation, $M^{-1}(f(t,y))$, at a given time and a given $y$. id_minus_tau_J_inverse() evaluates $\left(I-\tau
+   M^{-1} \frac{\partial f(t,y)}{\partial y}\right)^{-1}$ or equivalently $\left(M-\tau \frac{\partial f}{\partial y}\right)^{-1} M$ at a given time, for a given $\tau$ and $y$. This function is needed when an implicit method is used.

      class Diffusion
      {
      public:
    @@ -424,8 +424,8 @@
    void make_sparsity_pattern(const DoFHandler< dim, spacedim > &dof_handler, SparsityPatternBase &sparsity_pattern, const AffineConstraints< number > &constraints=AffineConstraints< number >(), const bool keep_constrained_dofs=true, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id)
    void interpolate_boundary_values(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const std::map< types::boundary_id, const Function< spacedim, number > * > &function_map, std::map< types::global_dof_index, number > &boundary_values, const ComponentMask &component_mask=ComponentMask())

    Diffusion::assemble_system

    -

    In this function, we compute $-\int D \nabla b_i \cdot \nabla b_j
-   d\boldsymbol{r} - \int \Sigma_a b_i b_j d\boldsymbol{r}$ and the mass matrix $\int b_i b_j d\boldsymbol{r}$. The mass matrix is then inverted using a direct solver; the inverse_mass_matrix variable will then store the inverse of the mass matrix so that $M^{-1}$ can be applied to a vector using the vmult() function of that object. (Internally, UMFPACK does not really store the inverse of the matrix, but its LU factors; applying the inverse matrix is then equivalent to doing one forward and one backward solves with these two factors, which has the same complexity as applying an explicit inverse of the matrix).

    +

    In this function, we compute $-\int D \nabla b_i \cdot \nabla b_j
+   d\boldsymbol{r} - \int \Sigma_a b_i b_j d\boldsymbol{r}$ and the mass matrix $\int b_i b_j d\boldsymbol{r}$. The mass matrix is then inverted using a direct solver; the inverse_mass_matrix variable will then store the inverse of the mass matrix so that $M^{-1}$ can be applied to a vector using the vmult() function of that object. (Internally, UMFPACK does not really store the inverse of the matrix, but its LU factors; applying the inverse matrix is then equivalent to doing one forward and one backward solves with these two factors, which has the same complexity as applying an explicit inverse of the matrix).

      void Diffusion::assemble_system()
      {
      system_matrix = 0.;
    @@ -513,8 +513,8 @@
    ::VectorizedArray< Number, width > cos(const ::VectorizedArray< Number, width > &)
    ::VectorizedArray< Number, width > sin(const ::VectorizedArray< Number, width > &)

    Diffusion::evaluate_diffusion

    /usr/share/doc/packages/dealii/doxygen/deal.II/step_53.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_53.html 2024-01-30 03:04:53.904893513 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_53.html 2024-01-30 03:04:53.904893513 +0000 @@ -142,18 +142,18 @@

    To illustrate how one describes geometries using charts in deal.II, we will consider a case that originates in an application of the ASPECT mantle convection code, using a data set provided by D. Sarah Stamps. In the concrete application, we were interested in describing flow in the Earth mantle under the East African Rift, a zone where two continental plates drift apart. Not to beat around the bush, the geometry we want to describe looks like this:

    In particular, though you cannot see this here, the top surface is not just colored by the elevation but is, in fact, deformed to follow the correct topography. While the actual application is not relevant here, the geometry is. The domain we are interested in is a part of the Earth that ranges from the surface to a depth of 500km, from 26 to 35 degrees East of the Greenwich meridian, and from 5 degrees North of the equator to 10 degrees South.

    -

    This description of the geometry suggests to start with a box $\hat U=[26,35]\times[-10,5]\times[-500000,0]$ (measured in degrees, degrees, and meters) and to provide a map $\varphi$ so that $\varphi^{-1}(\hat U)=\Omega$ where $\Omega$ is the domain we seek. $(\Omega,\varphi)$ is then a chart, $\varphi$ the pull-back operator, and $\varphi^{-1}$ the push-forward operator. If we need a point $q$ that is the "average" of other points $q_i\in\Omega$, the ChartManifold class then first applies the pull-back to obtain $\hat q_i=\varphi(q_i)$, averages these to a point $\hat p$ and then computes $p=\varphi^{-1}(\hat p)$.

    -

    Our goal here is therefore to implement a class that describes $\varphi$ and $\varphi^{-1}$. If Earth was a sphere, then this would not be difficult: if we denote by $(\hat \phi,\hat \theta,\hat d)$ the points of $\hat U$ (i.e., longitude counted eastward, latitude counted northward, and elevation relative to zero depth), then

    -\[
+<p>This description of the geometry suggests to start with a box <picture><source srcset=$\hat U=[26,35]\times[-10,5]\times[-500000,0]$ (measured in degrees, degrees, and meters) and to provide a map $\varphi$ so that $\varphi^{-1}(\hat U)=\Omega$ where $\Omega$ is the domain we seek. $(\Omega,\varphi)$ is then a chart, $\varphi$ the pull-back operator, and $\varphi^{-1}$ the push-forward operator. If we need a point $q$ that is the "average" of other points $q_i\in\Omega$, the ChartManifold class then first applies the pull-back to obtain $\hat q_i=\varphi(q_i)$, averages these to a point $\hat p$ and then computes $p=\varphi^{-1}(\hat p)$.

    +

    Our goal here is therefore to implement a class that describes $\varphi$ and $\varphi^{-1}$. If Earth was a sphere, then this would not be difficult: if we denote by $(\hat \phi,\hat \theta,\hat d)$ the points of $\hat U$ (i.e., longitude counted eastward, latitude counted northward, and elevation relative to zero depth), then

    +\[
   \mathbf x = \varphi^{-1}(\hat \phi,\hat \theta,\hat d)
   = (R+\hat d) (\cos\hat \phi\cos\hat \theta, \sin\hat \phi\cos\hat \theta, \sin\hat \theta)^T
-\] +\]" src="form_5667.png"/>

    -

    provides coordinates in a Cartesian coordinate system, where $R$ is the radius of the sphere. However, the Earth is not a sphere:

    +

    provides coordinates in a Cartesian coordinate system, where $R$ is the radius of the sphere. However, the Earth is not a sphere:

    1. It is flattened at the poles and larger at the equator: the semi-major axis is approximately 22km longer than the semi-minor axis. We will account for this using the WGS 84 reference standard for the Earth shape. The formula used in WGS 84 to obtain a position in Cartesian coordinates from longitude, latitude, and elevation is

      -\[
+<picture><source srcset=\[
   \mathbf x = \varphi_\text{WGS84}^{-1}(\phi,\theta,d)
   = \left(
     \begin{array}{c}
@@ -162,14 +162,14 @@
      ((1-e^2)\bar R(\theta)+d) \sin\theta
     \end{array}
     \right),
-\] +\]" src="form_5668.png"/>

      -

      where $\bar R(\theta)=\frac{R}{\sqrt{1-(e \sin\theta)^2}}$, and radius and ellipticity are given by $R=6378137\text{m}, e=0.081819190842622$. In this formula, we assume that the arguments to sines and cosines are evaluated in degree, not radians (though we will have to change this assumption in the code).

      +

      where $\bar R(\theta)=\frac{R}{\sqrt{1-(e \sin\theta)^2}}$, and radius and ellipticity are given by $R=6378137\text{m}, e=0.081819190842622$. In this formula, we assume that the arguments to sines and cosines are evaluated in degree, not radians (though we will have to change this assumption in the code).

    2. -It has topography in the form of mountains and valleys. We will account for this using real topography data (see below for a description of where this data comes from). Using this data set, we can look up elevations on a latitude-longitude mesh laid over the surface of the Earth. Starting with the box $\hat U=[26,35]\times[-10,5]\times[-500000,0]$, we will therefore first stretch it in vertical direction before handing it off to the WGS 84 function: if $h(\hat\phi,\hat\theta)$ is the height at longitude $\hat\phi$ and latitude $\hat\theta$, then we define

      -\[
+It has topography in the form of mountains and valleys. We will account for this using real topography data (see below for a description of where this data comes from). Using this data set, we can look up elevations on a latitude-longitude mesh laid over the surface of the Earth. Starting with the box <picture><source srcset=$\hat U=[26,35]\times[-10,5]\times[-500000,0]$, we will therefore first stretch it in vertical direction before handing it off to the WGS 84 function: if $h(\hat\phi,\hat\theta)$ is the height at longitude $\hat\phi$ and latitude $\hat\theta$, then we define

      +\[
   (\phi,\theta,d) =
   \varphi_\text{topo}^{-1}(\hat\phi,\hat\theta,\hat d)
   = \left(
@@ -177,30 +177,30 @@
       \hat\theta,
       \hat d + \frac{\hat d+500000}{500000}h(\hat\phi,\hat\theta)
     \right).
-\] +\]" src="form_5674.png"/>

      - Using this function, the top surface of the box $\hat U$ is displaced to the correct topography, the bottom surface remains where it was, and points in between are linearly interpolated.
    3. + Using this function, the top surface of the box $\hat U$ is displaced to the correct topography, the bottom surface remains where it was, and points in between are linearly interpolated.
    -

    Using these two functions, we can then define the entire push-forward function $\varphi^{-1}: \hat U \rightarrow \Omega$ as

    -\[
+<p>Using these two functions, we can then define the entire push-forward function <picture><source srcset=$\varphi^{-1}: \hat U \rightarrow \Omega$ as

    +\[
   \mathbf x
   =
   \varphi^{-1}(\hat\phi,\hat\theta,\hat d)
   =
   \varphi_\text{WGS84}^{-1}(\varphi_\text{topo}^{-1}(\hat\phi,\hat\theta,\hat d)).
-\] +\]" src="form_5676.png"/>

    In addition, we will have to define the inverse of this function, the pull-back operation, which we can write as

    -\[
+<picture><source srcset=\[
   (\hat\phi,\hat\theta,\hat d)
   =
   \varphi(\mathbf x)
   =
   \varphi_\text{topo}(\varphi_\text{WGS84}(\mathbf x)).
-\] +\]" src="form_5677.png"/>

    We can obtain one of the components of this function by inverting the formula above:

    -\[
+<picture><source srcset=\[
   (\hat\phi,\hat\theta,\hat d) =
   \varphi_\text{topo}(\phi,\theta,d)
   = \left(
@@ -208,11 +208,11 @@
       \theta,
       500000\frac{d-h(\phi,\theta)}{500000+h(\phi,\theta)}
     \right).
-\] +\]" src="form_5678.png"/>

    -

    Computing $\varphi_\text{WGS84}(\mathbf x)$ is also possible though a lot more awkward. We won't show the formula here but instead only provide the implementation in the program.

    +

    Computing $\varphi_\text{WGS84}(\mathbf x)$ is also possible though a lot more awkward. We won't show the formula here but instead only provide the implementation in the program.

    Implementation

    -

    There are a number of issues we need to address in the program. At the largest scale, we need to write a class that implements the interface of ChartManifold. This involves a function push_forward() that takes a point in the reference domain $\hat U$ and transform it into real space using the function $\varphi^{-1}$ outlined above, and its inverse function pull_back() implementing $\varphi$. We will do so in the AfricaGeometry class below that looks, in essence, like this:

    class AfricaGeometry : public ChartManifold<3,3>
    +

    There are a number of issues we need to address in the program. At the largest scale, we need to write a class that implements the interface of ChartManifold. This involves a function push_forward() that takes a point in the reference domain $\hat U$ and transform it into real space using the function $\varphi^{-1}$ outlined above, and its inverse function pull_back() implementing $\varphi$. We will do so in the AfricaGeometry class below that looks, in essence, like this:

    class AfricaGeometry : public ChartManifold<3,3>
    {
    public:
    virtual
    @@ -231,7 +231,7 @@
    virtual Point< chartdim > pull_back(const Point< spacedim > &space_point) const =0
    Definition point.h:112

    The transformations above have two parts: the WGS 84 transformations and the topography transformation. Consequently, the AfricaGeometry class will have additional (non-virtual) member functions AfricaGeometry::push_forward_wgs84() and AfricaGeometry::push_forward_topo() that implement these two pieces, and corresponding pull back functions.

    -

    The WGS 84 transformation functions are not particularly interesting (even though the formulas they implement are impressive). The more interesting part is the topography transformation. Recall that for this, we needed to evaluate the elevation function $h(\hat\phi,\hat\theta)$. There is of course no formula for this: Earth is what it is, the best one can do is look up the altitude from some table. This is, in fact what we will do.

    +

    The WGS 84 transformation functions are not particularly interesting (even though the formulas they implement are impressive). The more interesting part is the topography transformation. Recall that for this, we needed to evaluate the elevation function $h(\hat\phi,\hat\theta)$. There is of course no formula for this: Earth is what it is, the best one can do is look up the altitude from some table. This is, in fact what we will do.

    The data we use was originally created by the Shuttle Radar Topography Mission, was downloaded from the US Geologic Survey (USGS) and processed by D. Sarah Stamps who also wrote the initial version of the WGS 84 transformation functions. The topography data so processed is stored in a file topography.txt.gz that, when unpacked looks like this:

    6.983333 25.000000 700
    6.983333 25.016667 692
    6.983333 25.033333 701
    @@ -243,12 +243,12 @@
    -11.983333 35.966667 687
    -11.983333 35.983333 659

    The data is formatted as latitude longitude elevation where the first two columns are provided in degrees North of the equator and degrees East of the Greenwich meridian. The final column is given in meters above the WGS 84 zero elevation.

    -

    In the transformation functions, we need to evaluate $h(\hat\phi,\hat\theta)$ for a given longitude $\hat\phi$ and latitude $\hat\theta$. In general, this data point will not be available and we will have to interpolate between adjacent data points. Writing such an interpolation routine is not particularly difficult, but it is a bit tedious and error prone. Fortunately, we can somehow shoehorn this data set into an existing class: Functions::InterpolatedUniformGridData . Unfortunately, the class does not fit the bill quite exactly and so we need to work around it a bit. The problem comes from the way we initialize this class: in its simplest form, it takes a stream of values that it assumes form an equispaced mesh in the $x-y$ plane (or, here, the $\phi-\theta$ plane). Which is what they do here, sort of: they are ordered latitude first, longitude second; and more awkwardly, the first column starts at the largest values and counts down, rather than the usual other way around.

    +

    In the transformation functions, we need to evaluate $h(\hat\phi,\hat\theta)$ for a given longitude $\hat\phi$ and latitude $\hat\theta$. In general, this data point will not be available and we will have to interpolate between adjacent data points. Writing such an interpolation routine is not particularly difficult, but it is a bit tedious and error prone. Fortunately, we can somehow shoehorn this data set into an existing class: Functions::InterpolatedUniformGridData . Unfortunately, the class does not fit the bill quite exactly and so we need to work around it a bit. The problem comes from the way we initialize this class: in its simplest form, it takes a stream of values that it assumes form an equispaced mesh in the $x-y$ plane (or, here, the $\phi-\theta$ plane). Which is what they do here, sort of: they are ordered latitude first, longitude second; and more awkwardly, the first column starts at the largest values and counts down, rather than the usual other way around.

    Now, while tutorial programs are meant to illustrate how to code with deal.II, they do not necessarily have to satisfy the same quality standards as one would have to do with production codes. In a production code, we would write a function that reads the data and (i) automatically determines the extents of the first and second column, (ii) automatically determines the number of data points in each direction, (iii) does the interpolation regardless of the order in which data is arranged, if necessary by switching the order between reading and presenting it to the Functions::InterpolatedUniformGridData class.

    On the other hand, tutorial programs are best if they are short and demonstrate key points rather than dwell on unimportant aspects and, thereby, obscure what we really want to show. Consequently, we will allow ourselves a bit of leeway:

    All of this then calls for a class that essentially looks like this:

    class AfricaTopography
    {
    @@ -269,7 +269,7 @@
    };
    static constexpr double PI
    Definition numbers.h:259
    -

    Note how the value() function negates the latitude. It also switches from the format $\phi,\theta$ that we use everywhere else to the latitude-longitude format used in the table. Finally, it takes its arguments in radians as that is what we do everywhere else in the program, but then converts them to the degree-based system used for table lookup. As you will see in the implementation below, the function has a few more (static) member functions that we will call in the initialization of the topography_data member variable: the class type of this variable has a constructor that allows us to set everything right at construction time, rather than having to fill data later on, but this constructor takes a number of objects that can't be constructed in-place (at least not in C++98). Consequently, the construction of each of the objects we want to pass in the initialization happens in a number of static member functions.

    +

    Note how the value() function negates the latitude. It also switches from the format $\phi,\theta$ that we use everywhere else to the latitude-longitude format used in the table. Finally, it takes its arguments in radians as that is what we do everywhere else in the program, but then converts them to the degree-based system used for table lookup. As you will see in the implementation below, the function has a few more (static) member functions that we will call in the initialization of the topography_data member variable: the class type of this variable has a constructor that allows us to set everything right at construction time, rather than having to fill data later on, but this constructor takes a number of objects that can't be constructed in-place (at least not in C++98). Consequently, the construction of each of the objects we want to pass in the initialization happens in a number of static member functions.

    Having discussed the general outline of how we want to implement things, let us go to the program and show how it is done in practice.

    The commented program

    Let us start with the include files we need here. Obviously, we need the ones that describe the triangulation (tria.h), and that allow us to create and output triangulations (grid_generator.h and grid_out.h). Furthermore, we need the header file that declares the Manifold and ChartManifold classes that we will need to describe the geometry (manifold.h). We will then also need the GridTools::transform() function from the last of the following header files; the purpose for this function will become discussed at the point where we use it.

    @@ -299,7 +299,7 @@
     
    dealii
    Definition namespace_dealii.h:26

    Describing topography: AfricaTopography

    -

    The first significant part of this program is the class that describes the topography $h(\hat phi,\hat \theta)$ as a function of longitude and latitude. As discussed in the introduction, we will make our life a bit easier here by not writing the class in the most general way possible but by only writing it for the particular purpose we are interested in here: interpolating data obtained from one very specific data file that contains information about a particular area of the world for which we know the extents.

    +

    The first significant part of this program is the class that describes the topography $h(\hat phi,\hat \theta)$ as a function of longitude and latitude. As discussed in the introduction, we will make our life a bit easier here by not writing the class in the most general way possible but by only writing it for the particular purpose we are interested in here: interpolating data obtained from one very specific data file that contains information about a particular area of the world for which we know the extents.

    The general layout of the class has been discussed already above. Following is its declaration, including three static member functions that we will need in initializing the topography_data member variable.

      class AfricaTopography
      {
    @@ -315,7 +315,7 @@
      };
     
     
    -

    Let us move to the implementation of the class. The interesting parts of the class are the constructor and the value() function. The former initializes the Functions::InterpolatedUniformGridData member variable and we will use the constructor that requires us to pass in the end points of the 2-dimensional data set we want to interpolate (which are here given by the intervals $[-6.983333, 11.98333]$, using the trick of switching end points discussed in the introduction, and $[25, 35.983333]$, both given in degrees), the number of intervals into which the data is split (379 in latitude direction and 219 in longitude direction, for a total of $380\times 220$ data points), and a Table object that contains the data. The data then of course has size $380\times 220$ and we initialize it by providing an iterator to the first of the 83,600 elements of a std::vector object returned by the get_data() function below. Note that all of the member functions we call here are static because (i) they do not access any member variables of the class, and (ii) because they are called at a time when the object is not initialized fully anyway.

    +

    Let us move to the implementation of the class. The interesting parts of the class are the constructor and the value() function. The former initializes the Functions::InterpolatedUniformGridData member variable and we will use the constructor that requires us to pass in the end points of the 2-dimensional data set we want to interpolate (which are here given by the intervals $[-6.983333, 11.98333]$, using the trick of switching end points discussed in the introduction, and $[25, 35.983333]$, both given in degrees), the number of intervals into which the data is split (379 in latitude direction and 219 in longitude direction, for a total of $380\times 220$ data points), and a Table object that contains the data. The data then of course has size $380\times 220$ and we initialize it by providing an iterator to the first of the 83,600 elements of a std::vector object returned by the get_data() function below. Note that all of the member functions we call here are static because (i) they do not access any member variables of the class, and (ii) because they are called at a time when the object is not initialized fully anyway.

      AfricaTopography::AfricaTopography()
      : topography_data({{std::make_pair(-6.983333, 11.966667),
      std::make_pair(25, 35.95)}},
    @@ -415,7 +415,7 @@
      }
     
     
    -

    The following two functions then define the forward and inverse transformations that correspond to the WGS 84 reference shape of Earth. The forward transform follows the formula shown in the introduction. The inverse transform is significantly more complicated and is, at the very least, not intuitive. It also suffers from the fact that it returns an angle that at the end of the function we need to clip back into the interval $[0,2\pi]$ if it should have escaped from there.

    +

    The following two functions then define the forward and inverse transformations that correspond to the WGS 84 reference shape of Earth. The forward transform follows the formula shown in the introduction. The inverse transform is significantly more complicated and is, at the very least, not intuitive. It also suffers from the fact that it returns an angle that at the end of the function we need to clip back into the interval $[0,2\pi]$ if it should have escaped from there.

      Point<3> AfricaGeometry::push_forward_wgs84(const Point<3> &phi_theta_d) const
      {
      const double phi = phi_theta_d[0];
    @@ -485,7 +485,7 @@
     
    SymmetricTensor< 2, dim, Number > d(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)

    Creating the mesh

    -

    Having so described the properties of the geometry, not it is time to deal with the mesh used to discretize it. To this end, we create objects for the geometry and triangulation, and then proceed to create a $1\times 2\times 1$ rectangular mesh that corresponds to the reference domain $\hat U=[26,35]\times[-10,5]\times[-500000,0]$. We choose this number of subdivisions because it leads to cells that are roughly like cubes instead of stretched in one direction or another.

    +

    Having so described the properties of the geometry, not it is time to deal with the mesh used to discretize it. To this end, we create objects for the geometry and triangulation, and then proceed to create a $1\times 2\times 1$ rectangular mesh that corresponds to the reference domain $\hat U=[26,35]\times[-10,5]\times[-500000,0]$. We choose this number of subdivisions because it leads to cells that are roughly like cubes instead of stretched in one direction or another.

    Of course, we are not actually interested in meshing the reference domain. We are interested in meshing the real domain. Consequently, we will use the GridTools::transform() function that simply moves every point of a triangulation according to a given transformation. The transformation function it wants is a function that takes as its single argument a point in the reference domain and returns the corresponding location in the domain that we want to map to. This is, of course, exactly the push forward function of the geometry we use. We wrap it by a lambda function to obtain the kind of function object required for the transformation.

      void run()
      {
    @@ -519,8 +519,8 @@
      for (const auto &cell : triangulation.active_cell_iterators())
      cell->set_all_manifold_ids(0);
     
    -

    The last step is to refine the mesh beyond its initial $1\times 2\times
-   1$ coarse mesh. We could just refine globally a number of times, but since for the purpose of this tutorial program we're really only interested in what is happening close to the surface, we just refine 6 times all of the cells that have a face at a boundary with indicator 5. Looking this up in the documentation of the GridGenerator::subdivided_hyper_rectangle() function we have used above reveals that boundary indicator 5 corresponds to the top surface of the domain (and this is what the last true argument in the call to GridGenerator::subdivided_hyper_rectangle() above meant: to "color" the boundaries by assigning each boundary a unique boundary indicator).

    +

    The last step is to refine the mesh beyond its initial $1\times 2\times
+   1$ coarse mesh. We could just refine globally a number of times, but since for the purpose of this tutorial program we're really only interested in what is happening close to the surface, we just refine 6 times all of the cells that have a face at a boundary with indicator 5. Looking this up in the documentation of the GridGenerator::subdivided_hyper_rectangle() function we have used above reveals that boundary indicator 5 corresponds to the top surface of the domain (and this is what the last true argument in the call to GridGenerator::subdivided_hyper_rectangle() above meant: to "color" the boundaries by assigning each boundary a unique boundary indicator).

      for (unsigned int i = 0; i < 6; ++i)
      {
      for (const auto &cell : triangulation.active_cell_iterators())
    @@ -682,9 +682,9 @@

    This all begs two questions: first, does it matter, and second, could this be fixed. Let us discuss these in the following:

    -

    We are solving for a velocity $\textbf{u}$ and pressure $p$ that satisfy the Stokes equation, which reads

    +

    We are solving for a velocity $\textbf{u}$ and pressure $p$ that satisfy the Stokes equation, which reads

    \begin{eqnarray*}
   - \triangle \textbf{u} + \nabla p &=& \textbf{f}, \\
   -\textrm{div}\; \textbf{u} &=& 0.
@@ -149,8 +149,8 @@
 <p><a class=

    Optimal preconditioners

    Make sure that you read (even better: try) what is described in "Block Schur complement preconditioner" in the "Possible Extensions" section in step-22. Like described there, we are going to solve the block system using a Krylov method and a block preconditioner.

    -

    Our goal here is to construct a very simple (maybe the simplest?) optimal preconditioner for the linear system. A preconditioner is called "optimal" or "of optimal complexity", if the number of iterations of the preconditioned system is independent of the mesh size $h$. You can extend that definition to also require indepence of the number of processors used (we will discuss that in the results section), the computational domain and the mesh quality, the test case itself, the polynomial degree of the finite element space, and more.

    -

    Why is a constant number of iterations considered to be "optimal"? Assume the discretized PDE gives a linear system with N unknowns. Because the matrix coming from the FEM discretization is sparse, a matrix-vector product can be done in O(N) time. A preconditioner application can also only be O(N) at best (for example doable with multigrid methods). If the number of iterations required to solve the linear system is independent of $h$ (and therefore N), the total cost of solving the system will be O(N). It is not possible to beat this complexity, because even looking at all the entries of the right-hand side already takes O(N) time. For more information see [elman2005], Chapter 2.5 (Multigrid).

    +

    Our goal here is to construct a very simple (maybe the simplest?) optimal preconditioner for the linear system. A preconditioner is called "optimal" or "of optimal complexity", if the number of iterations of the preconditioned system is independent of the mesh size $h$. You can extend that definition to also require indepence of the number of processors used (we will discuss that in the results section), the computational domain and the mesh quality, the test case itself, the polynomial degree of the finite element space, and more.

    +

    Why is a constant number of iterations considered to be "optimal"? Assume the discretized PDE gives a linear system with N unknowns. Because the matrix coming from the FEM discretization is sparse, a matrix-vector product can be done in O(N) time. A preconditioner application can also only be O(N) at best (for example doable with multigrid methods). If the number of iterations required to solve the linear system is independent of $h$ (and therefore N), the total cost of solving the system will be O(N). It is not possible to beat this complexity, because even looking at all the entries of the right-hand side already takes O(N) time. For more information see [elman2005], Chapter 2.5 (Multigrid).

    The preconditioner described here is even simpler than the one described in step-22 and will typically require more iterations and consequently time to solve. When considering preconditioners, optimality is not the only important metric. But an optimal and expensive preconditioner is typically more desirable than a cheaper, non-optimal one. This is because, eventually, as the mesh size becomes smaller and smaller and linear problems become bigger and bigger, the former will eventually beat the latter.

    The solver and preconditioner

    We precondition the linear system

    @@ -181,14 +181,14 @@ \end{eqnarray*}" src="form_5701.png"/>

    where $S=-BA^{-1} B^T$ is the Schur complement.

    -

    With this choice of $P$, assuming that we handle $A^{-1}$ and $S^{-1}$ exactly (which is an "idealized" situation), the preconditioned linear system has three distinct eigenvalues independent of $h$ and is therefore "optimal". See section 6.2.1 (especially p. 292) in [elman2005]. For comparison, using the ideal version of the upper block-triangular preconditioner in step-22 (also used in step-56) would have all eigenvalues be equal to one.

    -

    We will use approximations of the inverse operations in $P^{-1}$ that are (nearly) independent of $h$. In this situation, one can again show, that the eigenvalues are independent of $h$. For the Krylov method we choose MINRES, which is attractive for the analysis (iteration count is proven to be independent of $h$, see the remainder of the chapter 6.2.1 in [elman2005]), great from the computational standpoint (simpler and cheaper than GMRES for example), and applicable (matrix and preconditioner are symmetric).

    -

    For the approximations we will use a CG solve with the mass matrix in the pressure space for approximating the action of $S^{-1}$. Note that the mass matrix is spectrally equivalent to $S$. We can expect the number of CG iterations to be independent of $h$, even with a simple preconditioner like ILU.

    -

    For the approximation of the velocity block $A$ we will perform a single AMG V-cycle. In practice this choice is not exactly independent of $h$, which can explain the slight increase in iteration numbers. A possible explanation is that the coarsest level will be solved exactly and the number of levels and size of the coarsest matrix is not predictable.

    +

    With this choice of $P$, assuming that we handle $A^{-1}$ and $S^{-1}$ exactly (which is an "idealized" situation), the preconditioned linear system has three distinct eigenvalues independent of $h$ and is therefore "optimal". See section 6.2.1 (especially p. 292) in [elman2005]. For comparison, using the ideal version of the upper block-triangular preconditioner in step-22 (also used in step-56) would have all eigenvalues be equal to one.

    +

    We will use approximations of the inverse operations in $P^{-1}$ that are (nearly) independent of $h$. In this situation, one can again show, that the eigenvalues are independent of $h$. For the Krylov method we choose MINRES, which is attractive for the analysis (iteration count is proven to be independent of $h$, see the remainder of the chapter 6.2.1 in [elman2005]), great from the computational standpoint (simpler and cheaper than GMRES for example), and applicable (matrix and preconditioner are symmetric).

    +

    For the approximations we will use a CG solve with the mass matrix in the pressure space for approximating the action of $S^{-1}$. Note that the mass matrix is spectrally equivalent to $S$. We can expect the number of CG iterations to be independent of $h$, even with a simple preconditioner like ILU.

    +

    For the approximation of the velocity block $A$ we will perform a single AMG V-cycle. In practice this choice is not exactly independent of $h$, which can explain the slight increase in iteration numbers. A possible explanation is that the coarsest level will be solved exactly and the number of levels and size of the coarsest matrix is not predictable.

    The testcase

    We will construct a manufactured solution based on the classical Kovasznay problem, see [kovasznay1948laminar]. Here is an image of the solution colored by the x velocity including streamlines of the velocity:

    -

    We have to cheat here, though, because we are not solving the non-linear Navier-Stokes equations, but the linear Stokes system without convective term. Therefore, to recreate the exact same solution, we use the method of manufactured solutions with the solution of the Kovasznay problem. This will effectively move the convective term into the right-hand side $f$.

    +

    We have to cheat here, though, because we are not solving the non-linear Navier-Stokes equations, but the linear Stokes system without convective term. Therefore, to recreate the exact same solution, we use the method of manufactured solutions with the solution of the Kovasznay problem. This will effectively move the convective term into the right-hand side $f$.

    The right-hand side is computed using the script "reference.py" and we use the exact solution for boundary conditions and error computation.

    The commented program

      #include <deal.II/base/quadrature_lib.h>
    @@ -1026,7 +1026,7 @@
    void interpolate(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const Function< spacedim, typename VectorType::value_type > &function, VectorType &vec, const ComponentMask &component_mask=ComponentMask())

    Results

    -

    As expected from the discussion above, the number of iterations is independent of the number of processors and only very slightly dependent on $h$:

    +

    As expected from the discussion above, the number of iterations is independent of the number of processors and only very slightly dependent on $h$:

    /usr/share/doc/packages/dealii/doxygen/deal.II/step_56.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_56.html 2024-01-30 03:04:54.040894646 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_56.html 2024-01-30 03:04:54.040894646 +0000 @@ -141,7 +141,7 @@
    Note
    If you use this program as a basis for your own work, please consider citing it in your list of references. The initial version of this work was contributed to the deal.II project by the authors listed in the following citation: 10.5281/zenodo.400995

    Introduction

    Stokes Problem

    -

    The purpose of this tutorial is to create an efficient linear solver for the Stokes equation and compare it to alternative approaches. Here, we will use FGMRES with geometric multigrid as a preconditioner velocity block, and we will show in the results section that this is a fundamentally better approach than the linear solvers used in step-22 (including the scheme described in "Possible Extensions"). Fundamentally, this is because only with multigrid it is possible to get $O(n)$ solve time, where $n$ is the number of unknowns of the linear system. Using the Timer class, we collect some statistics to compare setup times, solve times, and number of iterations. We also compute errors to make sure that what we have implemented is correct.

    +

    The purpose of this tutorial is to create an efficient linear solver for the Stokes equation and compare it to alternative approaches. Here, we will use FGMRES with geometric multigrid as a preconditioner velocity block, and we will show in the results section that this is a fundamentally better approach than the linear solvers used in step-22 (including the scheme described in "Possible Extensions"). Fundamentally, this is because only with multigrid it is possible to get $O(n)$ solve time, where $n$ is the number of unknowns of the linear system. Using the Timer class, we collect some statistics to compare setup times, solve times, and number of iterations. We also compute errors to make sure that what we have implemented is correct.

    Let $u \in H_0^1 = \{ u \in H^1(\Omega), u|_{\partial \Omega} = 0 \}$ and $p \in L_*^2 = \{ p \in L^2(\Omega), \int_\Omega p = 0
 \}$. The Stokes equations read as follows in non-dimensionalized form:

    @@ -160,7 +160,7 @@ \left(\begin{array}{c} F \\ 0 \end{array}\right). \end{eqnarray*}" src="form_5709.png"/>

    -

    Our goal is to compare several solution approaches. While step-22 solves the linear system using a "Schur complement approach" in two separate steps, we instead attack the block system at once using FMGRES with an efficient preconditioner, in the spirit of the approach outlined in the "Results" section of step-22. The idea is as follows: if we find a block preconditioner $P$ such that the matrix

    +

    Our goal is to compare several solution approaches. While step-22 solves the linear system using a "Schur complement approach" in two separate steps, we instead attack the block system at once using FMGRES with an efficient preconditioner, in the spirit of the approach outlined in the "Results" section of step-22. The idea is as follows: if we find a block preconditioner $P$ such that the matrix

    \begin{eqnarray*}
 \left(\begin{array}{cc} A & B^T \\ B & 0 \end{array}\right) P^{-1}
@@ -185,9 +185,9 @@
 \left(\begin{array}{cc} I & 0 \\ 0 & \widetilde{S^{-1}} \end{array}\right).
   \end{eqnarray*}

    -

    Since $P$ is aimed to be a preconditioner only, we shall use the approximations on the right in the equation above.

    +

    Since $P$ is aimed to be a preconditioner only, we shall use the approximations on the right in the equation above.

    As discussed in step-22, $-M_p^{-1}=:\widetilde{S^{-1}} \approx
-S^{-1}$, where $M_p$ is the pressure mass matrix and is solved approximately by using CG with ILU as a preconditioner, and $\widetilde{A^{-1}}$ is obtained by one of multiple methods: solving a linear system with CG and ILU as preconditioner, just using one application of an ILU, solving a linear system with CG and GMG (Geometric Multigrid as described in step-16) as a preconditioner, or just performing a single V-cycle of GMG.

    +S^{-1}$" src="form_5716.png"/>, where $M_p$ is the pressure mass matrix and is solved approximately by using CG with ILU as a preconditioner, and $\widetilde{A^{-1}}$ is obtained by one of multiple methods: solving a linear system with CG and ILU as preconditioner, just using one application of an ILU, solving a linear system with CG and GMG (Geometric Multigrid as described in step-16) as a preconditioner, or just performing a single V-cycle of GMG.

    As a comparison, instead of FGMRES, we also use the direct solver UMFPACK on the whole system to compare our results with. If you want to use a direct solver (like UMFPACK), the system needs to be invertible. To avoid the one dimensional null space given by the constant pressures, we fix the first pressure unknown to zero. This is not necessary for the iterative solvers.

    Reference Solution

    The test problem is a "Manufactured Solution" (see step-7 for details), and we choose $u=(u_1,u_2,u_3)=(2\sin (\pi x), - \pi y \cos
@@ -424,7 +424,7 @@
 <div class=  return return_value;

      }
     
    -

    Implementation of $f$. See the introduction for more information.

    +

    Implementation of $f$. See the introduction for more information.

      template <int dim>
      class RightHandSide : public Function<dim>
      {
    @@ -1156,7 +1156,7 @@
      template <int dim>
      void StokesProblem<dim>::compute_errors()
      {
    -

    Compute the mean pressure $\frac{1}{\Omega} \int_{\Omega} p(x) dx $ and then subtract it from each pressure coefficient. This will result in a pressure with mean value zero. Here we make use of the fact that the pressure is component $dim$ and that the finite element space is nodal.

    +

    Compute the mean pressure $\frac{1}{\Omega} \int_{\Omega} p(x) dx $ and then subtract it from each pressure coefficient. This will result in a pressure with mean value zero. Here we make use of the fact that the pressure is component $dim$ and that the finite element space is nodal.

      const double mean_pressure = VectorTools::compute_mean_value(
      dof_handler, QGauss<dim>(pressure_degree + 2), solution, dim);
      solution.block(1).add(-mean_pressure);
    @@ -1357,14 +1357,14 @@
      }

    Results

    Errors

    -

    We first run the code and confirm that the finite element solution converges with the correct rates as predicted by the error analysis of mixed finite element problems. Given sufficiently smooth exact solutions $u$ and $p$, the errors of the Taylor-Hood element $Q_k \times Q_{k-1}$ should be

    +

    We first run the code and confirm that the finite element solution converges with the correct rates as predicted by the error analysis of mixed finite element problems. Given sufficiently smooth exact solutions $u$ and $p$, the errors of the Taylor-Hood element $Q_k \times Q_{k-1}$ should be

    \[
 \| u -u_h \|_0 + h ( \| u- u_h\|_1 + \|p - p_h \|_0)
 \leq C h^{k+1} ( \|u \|_{k+1} + \| p \|_k )
 \]

    -

    see for example Ern/Guermond "Theory and Practice of Finite Elements", Section 4.2.5 p195. This is indeed what we observe, using the $Q_2 \times Q_1$ element as an example (this is what is done in the code, but is easily changed in main()):

    +

    see for example Ern/Guermond "Theory and Practice of Finite Elements", Section 4.2.5 p195. This is indeed what we observe, using the $Q_2 \times Q_1$ element as an example (this is what is done in the code, but is easily changed in main()):

    PETSc number of processors
    @@ -1394,7 +1394,7 @@

    As can be seen from the table:

    1. UMFPACK uses large amounts of memory, especially in 3d. Also, UMFPACK timings do not scale favorably with problem size.
    2. -
    3. Because we are using inner solvers for $A$ and $S$, ILU and GMG require the same number of outer iterations.
    4. +
    5. Because we are using inner solvers for $A$ and $S$, ILU and GMG require the same number of outer iterations.
    6. The number of (inner) iterations for $A$ increases for ILU with refinement, leading to worse than linear scaling in solve time. In contrast, the number of inner iterations for $A$ stays constant with GMG leading to nearly perfect scaling in solve time.
    7. GMG needs slightly more memory than ILU to store the level and interface matrices.
    @@ -1405,7 +1405,7 @@

    The introduction also outlined another option to precondition the overall system, namely one in which we do not choose $\widetilde
 {A^{-1}}=A^{-1}$ as in the table above, but in which $\widetilde{A^{-1}}$ is only a single preconditioner application with GMG or ILU, respectively.

    This is in fact implemented in the code: Currently, the boolean use_expensive in solve() is set to true. The option mentioned above is obtained by setting it to false.

    -

    What you will find is that the number of FGMRES iterations stays constant under refinement if you use GMG this way. This means that the Multigrid is optimal and independent of $h$.

    +

    What you will find is that the number of FGMRES iterations stays constant under refinement if you use GMG this way. This means that the Multigrid is optimal and independent of $h$.

    The plain program

    /* ---------------------------------------------------------------------
    *
    /usr/share/doc/packages/dealii/doxygen/deal.II/step_57.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_57.html 2024-01-30 03:04:54.100895146 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_57.html 2024-01-30 03:04:54.100895146 +0000 @@ -147,7 +147,7 @@

    Introduction

    Navier Stokes Equations

    In this tutorial we show how to solve the incompressible Navier Stokes equations (NSE) with Newton's method. The flow we consider here is assumed to be steady. In a domain $\Omega \subset
-\mathbb{R}^{d}$, $d=2,3$, with a piecewise smooth boundary $\partial \Omega$, and a given force field $\textbf{f}$, we seek a velocity field $\textbf{u}$ and a pressure field $\textbf{p}$ satisfying

    +\mathbb{R}^{d}$" src="form_5732.png"/>, $d=2,3$, with a piecewise smooth boundary $\partial \Omega$, and a given force field $\textbf{f}$, we seek a velocity field $\textbf{u}$ and a pressure field $\textbf{p}$ satisfying

    \begin{eqnarray*}
 - \nu \Delta\textbf{u} + (\textbf{u} \cdot \nabla)\textbf{u} + \nabla p &=& \textbf{f}\\
 - \nabla \cdot \textbf{u} &=& 0.
@@ -170,7 +170,7 @@
   \textbf{x}^{k+1} = \textbf{x}^{k} - (\nabla F(\textbf{x}^{k}))^{-1} F(\textbf{x}^{k}),
 \end{eqnarray*}

    -

    where $\textbf{x}^{k+1}$ is the approximate solution in step $k+1$, $\textbf{x}^{k}$ represents the solution from the previous step, and $\nabla
+<p>where <picture><source srcset=$\textbf{x}^{k+1}$ is the approximate solution in step $k+1$, $\textbf{x}^{k}$ represents the solution from the previous step, and $\nabla
 F(\textbf{x}^{k})$ is the Jacobian matrix evaluated at $\textbf{x}^{k}$. A similar iteration can be found in step-15.

    The Newton iteration formula implies the new solution is obtained by adding an update term to the old solution. Instead of evaluating the Jacobian matrix and taking its inverse, we consider the update term as a whole, that is

    \begin{eqnarray*}
@@ -229,7 +229,7 @@
 <p>Now, Newton's iteration can be used to solve for the update terms:</p>
 <ol>
 <li>
-Initialization: Initial guess <picture><source srcset=$u_0$ and $p_0$, tolerance $\tau$; +Initialization: Initial guess $u_0$ and $p_0$, tolerance $\tau$;

  • Linear solve to compute update term $\delta\textbf{u}^{k}$ and $\delta p^k$;
  • @@ -245,7 +245,7 @@

    Finding an Initial Guess

    The initial guess needs to be close enough to the solution for Newton's method to converge; hence, finding a good starting value is crucial to the nonlinear solver.

    -

    When the viscosity $\nu$ is large, a good initial guess can be obtained by solving the Stokes equation with viscosity $\nu$. While problem dependent, this works for $\nu \geq 1/400$ for the test problem considered here.

    +

    When the viscosity $\nu$ is large, a good initial guess can be obtained by solving the Stokes equation with viscosity $\nu$. While problem dependent, this works for $\nu \geq 1/400$ for the test problem considered here.

    However, the convective term $(\mathbf{u}\cdot\nabla)\mathbf{u}$ will be dominant if the viscosity is small, like $1/7500$ in test case 2. In this situation, we use a continuation method to set up a series of auxiliary NSEs with viscosity approaching the one in the target NSE. Correspondingly, we create a sequence $\{\nu_{i}\}$ with $\nu_{n}= \nu$, and accept that the solutions to two NSE with viscosity $\nu_{i}$ and $\nu_{i+1}$ are close if $|\nu_{i} -
 \nu_{i+1}|$ is small. Then we use the solution to the NSE with viscosity $\nu_{i}$ as the initial guess of the NSE with $\nu_{i+1}$. This can be thought of as a staircase from the Stokes equations to the NSE we want to solve.

    That is, we first solve a Stokes problem

    @@ -309,8 +309,8 @@ \end{pmatrix} \end{eqnarray*}" src="form_5776.png"/>

    -

    with a parameter $\gamma$ and an invertible matrix $W$. Here $\gamma B^TW^{-1}B$ is the Augmented Lagrangian term; see [Benzi2006] for details.

    -

    Denoting the system matrix of the new system by $G$ and the right-hand side by $b$, we solve it iteratively with right preconditioning $P^{-1}$ as $GP^{-1}y = b$, where

    +

    with a parameter $\gamma$ and an invertible matrix $W$. Here $\gamma B^TW^{-1}B$ is the Augmented Lagrangian term; see [Benzi2006] for details.

    +

    Denoting the system matrix of the new system by $G$ and the right-hand side by $b$, we solve it iteratively with right preconditioning $P^{-1}$ as $GP^{-1}y = b$, where

    \begin{eqnarray*}
 P^{-1} =
   \begin{pmatrix}
@@ -319,13 +319,13 @@
   \end{pmatrix}^{-1}
 \end{eqnarray*}

    -

    with $\tilde{A} = A + \gamma B^TW^{-1}B$ and $\tilde{S}$ is the corresponding Schur complement $\tilde{S} = B^T \tilde{A}^{-1} B$. We let $W = M_p$ where $M_p$ is the pressure mass matrix, then $\tilde{S}^{-1}$ can be approximated by

    +

    with $\tilde{A} = A + \gamma B^TW^{-1}B$ and $\tilde{S}$ is the corresponding Schur complement $\tilde{S} = B^T \tilde{A}^{-1} B$. We let $W = M_p$ where $M_p$ is the pressure mass matrix, then $\tilde{S}^{-1}$ can be approximated by

    \begin{eqnarray*}
 \tilde{S}^{-1} \approx -(\nu+\gamma)M_p^{-1}.
 \end{eqnarray*}

    See [Benzi2006] for details.

    -

    We decompose $P^{-1}$ as

    +

    We decompose $P^{-1}$ as

    \begin{eqnarray*}
 P^{-1} =
   \begin{pmatrix}
@@ -342,7 +342,7 @@
   \end{pmatrix}.
 \end{eqnarray*}

    -

    Here two inexact solvers will be needed for $\tilde{A}^{-1}$ and $\tilde{S}^{-1}$, respectively (see [Benzi2006]). Since the pressure mass matrix is symmetric and positive definite, CG with ILU as a preconditioner is appropriate to use for $\tilde{S}^{-1}$. For simplicity, we use the direct solver UMFPACK for $\tilde{A}^{-1}$. The last ingredient is a sparse matrix-vector product with $B^T$. Instead of computing the matrix product in the augmented Lagrangian term in $\tilde{A}$, we assemble Grad-Div stabilization $(\nabla \cdot \phi _{i}, \nabla \cdot \phi _{j}) \approx (B^T
+<p>Here two inexact solvers will be needed for <picture><source srcset=$\tilde{A}^{-1}$ and $\tilde{S}^{-1}$, respectively (see [Benzi2006]). Since the pressure mass matrix is symmetric and positive definite, CG with ILU as a preconditioner is appropriate to use for $\tilde{S}^{-1}$. For simplicity, we use the direct solver UMFPACK for $\tilde{A}^{-1}$. The last ingredient is a sparse matrix-vector product with $B^T$. Instead of computing the matrix product in the augmented Lagrangian term in $\tilde{A}$, we assemble Grad-Div stabilization $(\nabla \cdot \phi _{i}, \nabla \cdot \phi _{j}) \approx (B^T
 M_p^{-1}B)_{ij}$, as explained in [HeisterRapin2013].

    Test Case

    We use the lid driven cavity flow as our test case; see this page for details. The computational domain is the unit square and the right-hand side is $f=0$. The boundary condition is

    @@ -352,8 +352,8 @@ \end{eqnarray*}" src="form_5789.png"/>

    When solving this problem, the error consists of the nonlinear error (from Newton's iteration) and the discretization error (dependent on mesh size). The nonlinear part decreases with each Newton iteration and the discretization error reduces with mesh refinement. In this example, the solution from the coarse mesh is transferred to successively finer meshes and used as an initial guess. Therefore, the nonlinear error is always brought below the tolerance of Newton's iteration and the discretization error is reduced with each mesh refinement.

    -

    Inside the loop, we involve three solvers: one for $\tilde{A}^{-1}$, one for $M_p^{-1}$ and one for $Gx=b$. The first two solvers are invoked in the preconditioner and the outer solver gives us the update term. Overall convergence is controlled by the nonlinear residual; as Newton's method does not require an exact Jacobian, we employ FGMRES with a relative tolerance of only 1e-4 for the outer linear solver. In fact, we use the truncated Newton solve for this system. As described in step-22, the inner linear solves are also not required to be done very accurately. Here we use CG with a relative tolerance of 1e-6 for the pressure mass matrix. As expected, we still see convergence of the nonlinear residual down to 1e-14. Also, we use a simple line search algorithm for globalization of the Newton method.

    -

    The cavity reference values for $\mathrm{Re}=400$ and $\mathrm{Re}=7500$ are from [Ghia1982] and [Erturk2005], respectively, where $\mathrm{Re}$ is the Reynolds number. Here the viscosity is defined by $1/\mathrm{Re}$. Even though we can still find a solution for $\mathrm{Re}=10000$ and the papers cited throughout this introduction contain results for comparison, we limit our discussion here to $\mathrm{Re}=7500$. This is because the solution is no longer stationary starting around $\mathrm{Re}=8000$ but instead becomes periodic, see [Bruneau2006] for details.

    +

    Inside the loop, we involve three solvers: one for $\tilde{A}^{-1}$, one for $M_p^{-1}$ and one for $Gx=b$. The first two solvers are invoked in the preconditioner and the outer solver gives us the update term. Overall convergence is controlled by the nonlinear residual; as Newton's method does not require an exact Jacobian, we employ FGMRES with a relative tolerance of only 1e-4 for the outer linear solver. In fact, we use the truncated Newton solve for this system. As described in step-22, the inner linear solves are also not required to be done very accurately. Here we use CG with a relative tolerance of 1e-6 for the pressure mass matrix. As expected, we still see convergence of the nonlinear residual down to 1e-14. Also, we use a simple line search algorithm for globalization of the Newton method.

    +

    The cavity reference values for $\mathrm{Re}=400$ and $\mathrm{Re}=7500$ are from [Ghia1982] and [Erturk2005], respectively, where $\mathrm{Re}$ is the Reynolds number. Here the viscosity is defined by $1/\mathrm{Re}$. Even though we can still find a solution for $\mathrm{Re}=10000$ and the papers cited throughout this introduction contain results for comparison, we limit our discussion here to $\mathrm{Re}=7500$. This is because the solution is no longer stationary starting around $\mathrm{Re}=8000$ but instead becomes periodic, see [Bruneau2006] for details.

    The commented program

    Include files

    As usual, we start by including some well-known files:

    @@ -802,8 +802,8 @@
     
    DEAL_II_HOST constexpr Number trace(const SymmetricTensor< 2, dim2, Number > &)
  • If we were asked to assemble the Newton matrix, then we also built a pressure mass matrix in the bottom right block of the matrix. We only need this for the preconditioner, so we need to copy it in into a separate matrix object, followed by zeroing out this block in the Newton matrix.

    -

    Note that settings this bottom right block to zero is not identical to not assembling anything in this block, because applying boundary values and hanging node constraints (in the constraints_used.distribute_local_to_global() call above) puts entries into this block. As a consequence, setting the $(1,1)$ block to zero below does not result in what would have happened if we had just not assembled a pressure mass matrix in that block to begin with.

    -

    The difference is that if we had not assembled anything in this block, dealing with constraint degrees of freedom would have put entries on the diagonal of the $(1,1)$ block whereas the last operation below, zeroing out the entire block, results in a system matrix with rows and columns that are completely empty. In other words, the linear problem is singular. Luckily, however, the FGMRES solver we use appears to handle these rows and columns without any problem.

    +

    Note that settings this bottom right block to zero is not identical to not assembling anything in this block, because applying boundary values and hanging node constraints (in the constraints_used.distribute_local_to_global() call above) puts entries into this block. As a consequence, setting the $(1,1)$ block to zero below does not result in what would have happened if we had just not assembled a pressure mass matrix in that block to begin with.

    +

    The difference is that if we had not assembled anything in this block, dealing with constraint degrees of freedom would have put entries on the diagonal of the $(1,1)$ block whereas the last operation below, zeroing out the entire block, results in a system matrix with rows and columns that are completely empty. In other words, the linear problem is singular. Luckily, however, the FGMRES solver we use appears to handle these rows and columns without any problem.

      if (assemble_matrix)
      {
      pressure_mass_matrix.reinit(sparsity_pattern.block(1, 1));
    /usr/share/doc/packages/dealii/doxygen/deal.II/step_58.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_58.html 2024-01-30 03:04:54.160895645 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_58.html 2024-01-30 03:04:54.160895645 +0000 @@ -170,7 +170,7 @@ \psi$" src="form_5811.png"/> has no spatial or temporal derivatives, i.e., it is a purely local operator. It turns out that we have efficient methods for each of these terms (in particular, we have analytic solutions for the latter), and that we may be better off treating these terms differently and separately. We will explain this in more detail below.

    A note about the character of the equations

    -

    At first glance, the equations appear to be parabolic and similar to the heat equation (see step-26) as there is only a single time derivative and two spatial derivatives. But this is misleading. Indeed, that this is not the correct interpretation is more easily seen if we assume for a moment that the potential $V=0$ and $\kappa=0$. Then we have the equation

    +

    At first glance, the equations appear to be parabolic and similar to the heat equation (see step-26) as there is only a single time derivative and two spatial derivatives. But this is misleading. Indeed, that this is not the correct interpretation is more easily seen if we assume for a moment that the potential $V=0$ and $\kappa=0$. Then we have the equation

    \begin{align*}
   - i \frac{\partial \psi}{\partial t}
   - \frac 12 \Delta \psi
@@ -188,7 +188,7 @@
   &= 0.
 \end{align*}

    -

    Not surprisingly, the factor $i$ in front of the time derivative couples the real and imaginary parts of the equation. If we want to understand this equation further, take the time derivative of one of the equations, say

    +

    Not surprisingly, the factor $i$ in front of the time derivative couples the real and imaginary parts of the equation. If we want to understand this equation further, take the time derivative of one of the equations, say

    \begin{align*}
   \frac{\partial^2 w}{\partial t^2}
   - \frac 12 \Delta \frac{\partial v}{\partial t}
@@ -202,9 +202,9 @@
   &= 0.
 \end{align*}

    -

    This equation is hyperbolic and similar in character to the wave equation. (This will also be obvious if you look at the video in the "Results" section of this program.) Furthermore, we could have arrived at the same equation for $v$ as well. Consequently, a better assumption for the NLSE is to think of it as a hyperbolic, wave-propagation equation than as a diffusion equation such as the heat equation. (You may wonder whether it is correct that the operator $\Delta^2$ appears with a positive sign whereas in the wave equation, $\Delta$ has a negative sign. This is indeed correct: After multiplying by a test function and integrating by parts, we want to come out with a positive (semi-)definite form. So, from $-\Delta u$ we obtain $+(\nabla v,\nabla u)$. Likewise, after integrating by parts twice, we obtain from $+\Delta^2 u$ the form $+(\Delta v,\Delta u)$. In both cases do we get the desired positive sign.)

    +

    This equation is hyperbolic and similar in character to the wave equation. (This will also be obvious if you look at the video in the "Results" section of this program.) Furthermore, we could have arrived at the same equation for $v$ as well. Consequently, a better assumption for the NLSE is to think of it as a hyperbolic, wave-propagation equation than as a diffusion equation such as the heat equation. (You may wonder whether it is correct that the operator $\Delta^2$ appears with a positive sign whereas in the wave equation, $\Delta$ has a negative sign. This is indeed correct: After multiplying by a test function and integrating by parts, we want to come out with a positive (semi-)definite form. So, from $-\Delta u$ we obtain $+(\nabla v,\nabla u)$. Likewise, after integrating by parts twice, we obtain from $+\Delta^2 u$ the form $+(\Delta v,\Delta u)$. In both cases do we get the desired positive sign.)

    The real NLSE, of course, also has the terms $V\psi$ and $\kappa|\psi|^2\psi$. However, these are of lower order in the spatial derivatives, and while they are obviously important, they do not change the character of the equation.

    -

    In any case, the purpose of this discussion is to figure out what time stepping scheme might be appropriate for the equation. The conclusions is that, as a hyperbolic-kind of equation, we need to choose a time step that satisfies a CFL-type condition. If we were to use an explicit method (which we will not), we would have to investigate the eigenvalues of the matrix that corresponds to the spatial operator. If you followed the discussions of the video lectures (See also video lecture 26, video lecture 27, video lecture 28.) then you will remember that the pattern is that one needs to make sure that $k^s \propto h^t$ where $k$ is the time step, $h$ the mesh width, and $s,t$ are the orders of temporal and spatial derivatives. Whether you take the original equation ( $s=1,t=2$) or the reformulation for only the real or imaginary part, the outcome is that we would need to choose $k \propto h^2$ if we were to use an explicit time stepping method. This is not feasible for the same reasons as in step-26 for the heat equation: It would yield impractically small time steps for even only modestly refined meshes. Rather, we have to use an implicit time stepping method and can then choose a more balanced $k \propto h$. Indeed, we will use the implicit Crank-Nicolson method as we have already done in step-23 before for the regular wave equation.

    +

    In any case, the purpose of this discussion is to figure out what time stepping scheme might be appropriate for the equation. The conclusions is that, as a hyperbolic-kind of equation, we need to choose a time step that satisfies a CFL-type condition. If we were to use an explicit method (which we will not), we would have to investigate the eigenvalues of the matrix that corresponds to the spatial operator. If you followed the discussions of the video lectures (See also video lecture 26, video lecture 27, video lecture 28.) then you will remember that the pattern is that one needs to make sure that $k^s \propto h^t$ where $k$ is the time step, $h$ the mesh width, and $s,t$ are the orders of temporal and spatial derivatives. Whether you take the original equation ( $s=1,t=2$) or the reformulation for only the real or imaginary part, the outcome is that we would need to choose $k \propto h^2$ if we were to use an explicit time stepping method. This is not feasible for the same reasons as in step-26 for the heat equation: It would yield impractically small time steps for even only modestly refined meshes. Rather, we have to use an implicit time stepping method and can then choose a more balanced $k \propto h$. Indeed, we will use the implicit Crank-Nicolson method as we have already done in step-23 before for the regular wave equation.

    The general idea of operator splitting

    Note
    The material presented here is also discussed in video lecture 30.25. (All video lectures are also available here.)

    If one thought of the NLSE as an ordinary differential equation in which the right hand side happens to have spatial derivatives, i.e., write it as

    @@ -342,7 +342,7 @@ I^{(1)} + I^{(2)} + I^{(3)}. \end{align*}" src="form_5845.png"/>

    -

    This intuition is indeed correct, though the approximation is not exact: the difference between the exact left hand side and the term $I^{(1)}+I^{(2)}+I^{(3)}$ (i.e., the difference between the exact increment for the exact solution $\psi(t)$ when moving from $t_n$ to $t_{n+1}$, and the increment composed of the three parts on the right hand side), is proportional to $\Delta t=t_{n+1}-t_{n}$. In other words, this approach introduces an error of size ${\cal O}(\Delta t)$. Nothing we have done so far has discretized anything in time or space, so the overall error is going to be ${\cal O}(\Delta t)$ plus whatever error we commit when approximating the integrals (the temporal discretization error) plus whatever error we commit when approximating the spatial dependencies of $\psi$ (the spatial error).

    +

    This intuition is indeed correct, though the approximation is not exact: the difference between the exact left hand side and the term $I^{(1)}+I^{(2)}+I^{(3)}$ (i.e., the difference between the exact increment for the exact solution $\psi(t)$ when moving from $t_n$ to $t_{n+1}$, and the increment composed of the three parts on the right hand side), is proportional to $\Delta t=t_{n+1}-t_{n}$. In other words, this approach introduces an error of size ${\cal O}(\Delta t)$. Nothing we have done so far has discretized anything in time or space, so the overall error is going to be ${\cal O}(\Delta t)$ plus whatever error we commit when approximating the integrals (the temporal discretization error) plus whatever error we commit when approximating the spatial dependencies of $\psi$ (the spatial error).

    Before we continue with discussions about operator splitting, let us talk about why one would even want to go this way? The answer is simple: For some of the separate equations for the $\psi^{(k)}$, we may have ways to solve them more efficiently than if we throw everything together and try to solve it at once. For example, and particularly pertinent in the current case: The equation for $\psi^{(3)}$, i.e.,

    \begin{align*}
   \frac{d\psi^{(3)}}{dt}
@@ -375,8 +375,8 @@
 \end{align*}

    This is easy to see if (i) you plug this solution into the differential equation, and (ii) realize that the magnitude $|\psi^{(3)}|$ is constant, i.e., the term $|\psi(t_n)|^2$ in the exponent is in fact equal to $|\psi^{(3)}(t)|^2$. In other words, the solution of the ODE for $\psi^{(3)}(t)$ only changes its phase, but the magnitude of the complex-valued function $\psi^{(3)}(t)$ remains constant. This makes computing $I^{(3)}$ particularly convenient: we don't actually need to solve any ODE, we can write the solution down by hand. Using the operator splitting approach, none of the methods to compute $I^{(1)},I^{(2)}$ therefore have to deal with the nonlinear term and all of the associated unpleasantries: we can get away with solving only linear problems, as long as we allow ourselves the luxury of using an operator splitting approach.

    -

    Secondly, one often uses operator splitting if the different physical effects described by the different terms have different time scales. Imagine, for example, a case where we really did have some sort of diffusion equation. Diffusion acts slowly, but if $\kappa$ is large, then the "phase rotation" by the term $-i\kappa
-|\psi^{(3)}(t)|^2 \,\psi^{(3)}(t)$ acts quickly. If we treated everything together, this would imply having to take rather small time steps. But with operator splitting, we can take large time steps $\Delta t=t_{n+1}-t_{n}$ for the diffusion, and (assuming we didn't have an analytic solution) use an ODE solver with many small time steps to integrate the "phase rotation" equation for $\psi^{(3)}$ from $t_n$ to $t_{n+1}$. In other words, operator splitting allows us to decouple slow and fast time scales and treat them differently, with methods adjusted to each case.

    +

    Secondly, one often uses operator splitting if the different physical effects described by the different terms have different time scales. Imagine, for example, a case where we really did have some sort of diffusion equation. Diffusion acts slowly, but if $\kappa$ is large, then the "phase rotation" by the term $-i\kappa
+|\psi^{(3)}(t)|^2 \,\psi^{(3)}(t)$ acts quickly. If we treated everything together, this would imply having to take rather small time steps. But with operator splitting, we can take large time steps $\Delta t=t_{n+1}-t_{n}$ for the diffusion, and (assuming we didn't have an analytic solution) use an ODE solver with many small time steps to integrate the "phase rotation" equation for $\psi^{(3)}$ from $t_n$ to $t_{n+1}$. In other words, operator splitting allows us to decouple slow and fast time scales and treat them differently, with methods adjusted to each case.

    Operator splitting: the "Lie splitting" approach

    While the method above allows to compute the three contributions $I^{(k)}$ in parallel, if we want, the method can be made slightly more accurate and easy to implement if we don't let the trajectories for the $\psi^{(k)}$ start all at $\psi(t_n)$, but instead let the trajectory for $\psi^{(2)}$ start at the end point of the trajectory for $\psi^{(1)}$, namely $\psi^{(1)}(t_{n+1})$; similarly, we will start the trajectory for $\psi^{(3)}$ start at the end point of the trajectory for $\psi^{(2)}$, namely $\psi^{(2)}(t_{n+1})$. This method is then called "Lie splitting" and has the same order of error as the method above, i.e., the splitting error is ${\cal O}(\Delta
 t)$.

    @@ -474,7 +474,7 @@

    (Compare this again with the "exact" computation of $\psi(t_{n+1})$: It only differs in how we approximate $\psi(t)$ in each of the three integrals.) In other words, Lie splitting is a lot simpler to implement that the original method outlined above because data handling is so much simpler.

    Operator splitting: the "Strang splitting" approach

    -

    As mentioned above, Lie splitting is only ${\cal O}(\Delta t)$ accurate. This is acceptable if we were to use a first order time discretization, for example using the explicit or implicit Euler methods to solve the differential equations for $\psi^{(k)}$. This is because these time integration methods introduce an error proportional to $\Delta t$ themselves, and so the splitting error is proportional to an error that we would introduce anyway, and does not diminish the overall convergence order.

    +

    As mentioned above, Lie splitting is only ${\cal O}(\Delta t)$ accurate. This is acceptable if we were to use a first order time discretization, for example using the explicit or implicit Euler methods to solve the differential equations for $\psi^{(k)}$. This is because these time integration methods introduce an error proportional to $\Delta t$ themselves, and so the splitting error is proportional to an error that we would introduce anyway, and does not diminish the overall convergence order.

    But we typically want to use something higher order – say, a Crank-Nicolson or BDF2 method – since these are often not more expensive than a simple Euler method. It would be a shame if we were to use a time stepping method that is ${\cal O}(\Delta t^2)$, but then lose the accuracy again through the operator splitting.

    This is where the Strang splitting method comes in. It is easier to explain if we had only two parts, and so let us combine the effects of the Laplace operator and of the potential into one, and the phase rotation into a second effect. (Indeed, this is what we will do in the code since solving the equation with the Laplace equation with or without the potential costs the same – so we merge these two steps.) The Lie splitting method from above would then do the following: It computes solutions of the following two ODEs,

    \begin{align*}
@@ -552,7 +552,7 @@
   \frac 12 \Delta \psi^{(2)} + V \psi^{(2)} = 0.
 \end{align*}

    -

    This equation is linear. Furthermore, we only have to solve it from $t_n$ to $t_{n+1}$, i.e., for exactly one time step.

    +

    This equation is linear. Furthermore, we only have to solve it from $t_n$ to $t_{n+1}$, i.e., for exactly one time step.

    To do this, we will apply the second order accurate Crank-Nicolson scheme that we have already used in some of the other time dependent codes (specifically: step-23 and step-26). It reads as follows:

    \begin{align*}
   -i\frac{\psi^{(n,2)}-\psi^{(n,1)}}{k_{n+1}}
@@ -564,9 +564,9 @@
 \end{align*}

    Here, the "previous" solution $\psi^{(n,1)}$ (or the "initial -condition" for this part of the time step) is the output of the first phase rotation half-step; the output of the current step will be denoted by $\psi^{(n,2)}$. $k_{n+1}=t_{n+1}-t_n$ is the length of the time step. (One could argue whether $\psi^{(n,1)}$ and $\psi^{(n,1)}$ live at time step $n$ or $n+1$ and what their upper indices should be. This is a philosophical discussion without practical impact, and one might think of $\psi^{(n,1)}$ as something like $\psi^{(n+\tfrac 13)}$, and $\psi^{(n,2)}$ as $\psi^{(n+\tfrac 23)}$ if that helps clarify things – though, again $n+\frac 13$ is not to be understood as "one third time step after +condition" for this part of the time step) is the output of the first phase rotation half-step; the output of the current step will be denoted by $\psi^{(n,2)}$. $k_{n+1}=t_{n+1}-t_n$ is the length of the time step. (One could argue whether $\psi^{(n,1)}$ and $\psi^{(n,1)}$ live at time step $n$ or $n+1$ and what their upper indices should be. This is a philosophical discussion without practical impact, and one might think of $\psi^{(n,1)}$ as something like $\psi^{(n+\tfrac 13)}$, and $\psi^{(n,2)}$ as $\psi^{(n+\tfrac 23)}$ if that helps clarify things – though, again $n+\frac 13$ is not to be understood as "one third time step after \_form#href_anchor" but more like "we've already done one third of the work necessary -for time step \_form#3044".)

    +for time step \_form#2972".)

    If we multiply the whole equation with $k_{n+1}$ and sort terms with the unknown $\psi^{(n+1,2)}$ to the left and those with the known $\psi^{(n,2)}$ to the right, then we obtain the following (spatial) partial differential equation that needs to be solved in each time step:

    \begin{align*}
   -i\psi^{(n,2)}
@@ -585,7 +585,7 @@
 <p><a class=

    Spatial discretization and dealing with complex variables

    As mentioned above, the previous tutorial program dealing with complex-valued solutions (namely, step-29) separated real and imaginary parts of the solution. It thus reduced everything to real arithmetic. In contrast, we here want to keep things complex-valued.

    The first part of this is that we need to define the discretized solution as $\psi_h^n(\mathbf x)=\sum_j \Psi^n_j \varphi_j(\mathbf
-x) \approx \psi(\mathbf x,t_n)$ where the $\varphi_j$ are the usual shape functions (which are real valued) but the expansion coefficients $\Psi^n_j$ at time step $n$ are now complex-valued. This is easily done in deal.II: We just have to use Vector<std::complex<double>> instead of Vector<double> to store these coefficients.

    +x) \approx \psi(\mathbf x,t_n)$" src="form_5890.png"/> where the $\varphi_j$ are the usual shape functions (which are real valued) but the expansion coefficients $\Psi^n_j$ at time step $n$ are now complex-valued. This is easily done in deal.II: We just have to use Vector<std::complex<double>> instead of Vector<double> to store these coefficients.

    Of more interest is how to build and solve the linear system. Obviously, this will only be necessary for the second step of the Strang splitting discussed above, with the time discretization of the previous subsection. We obtain the fully discrete version through straightforward substitution of $\psi^n$ by $\psi^n_h$ and multiplication by a test function:

    \begin{align*}
   -iM\Psi^{(n,2)}
@@ -663,7 +663,7 @@
   \int_\Omega \alpha_k e^{-\frac{r_k^2}{R^2}}
 \]

    -

    is a positive integer. In other words, we need to choose $\alpha$ as an integer multiple of

    +

    is a positive integer. In other words, we need to choose $\alpha$ as an integer multiple of

    \[
   \left(\int_\Omega e^{-\frac{r_k^2}{R^2}}\right)^{-1}
   =
@@ -671,15 +671,15 @@
 \]

    assuming for the moment that $\Omega={\mathbb R}^d$ – which is of course not the case, but we'll ignore the small difference in integral.

    -

    Thus, we choose $\alpha_k=\left(R^d\sqrt{\pi^d}\right)^{-1}$ for all, and $R=0.1$. This $R$ is small enough that the difference between the exact (infinite) integral and the integral over $\Omega$ should not be too concerning. We choose the four points $\mathbf x_k$ as $(\pm 0.3, 0), (0, \pm
-0.3)$ – also far enough away from the boundary of $\Omega$ to keep ourselves on the safe side.

    -

    For simplicity, we pose the problem on the square $[-1,1]^2$. For boundary conditions, we will use time-independent Neumann conditions of the form

    +

    Thus, we choose $\alpha_k=\left(R^d\sqrt{\pi^d}\right)^{-1}$ for all, and $R=0.1$. This $R$ is small enough that the difference between the exact (infinite) integral and the integral over $\Omega$ should not be too concerning. We choose the four points $\mathbf x_k$ as $(\pm 0.3, 0), (0, \pm
+0.3)$ – also far enough away from the boundary of $\Omega$ to keep ourselves on the safe side.

    +

    For simplicity, we pose the problem on the square $[-1,1]^2$. For boundary conditions, we will use time-independent Neumann conditions of the form

    \[
   \nabla\psi(\mathbf x,t)\cdot \mathbf n=0 \qquad\qquad \forall \mathbf x\in\partial\Omega.
 \]

    This is not a realistic choice of boundary conditions but sufficient for what we want to demonstrate here. We will comment further on this in the Possibilities for extensions section below.

    -

    Finally, we choose $\kappa=1$, and the potential as

    +

    Finally, we choose $\kappa=1$, and the potential as

    \[
   V(\mathbf x)
   =
@@ -689,7 +689,7 @@
   \end{cases}
 \]

    -

    Using a large potential makes sure that the wave function $\psi$ remains small outside the circle of radius 0.7. All of the Gaussians that make up the initial conditions are within this circle, and the solution will mostly oscillate within it, with a small amount of energy radiating into the outside. The use of a large potential also makes sure that the nonphysical boundary condition does not have too large an effect.

    +

    Using a large potential makes sure that the wave function $\psi$ remains small outside the circle of radius 0.7. All of the Gaussians that make up the initial conditions are within this circle, and the solution will mostly oscillate within it, with a small amount of energy radiating into the outside. The use of a large potential also makes sure that the nonphysical boundary condition does not have too large an effect.

    The commented program

    Include files

    The program starts with the usual include files, all of which you should have seen before by now:

    @@ -858,7 +858,7 @@
    ::VectorizedArray< Number, width > sqrt(const ::VectorizedArray< Number, width > &)
    ::VectorizedArray< Number, width > pow(const ::VectorizedArray< Number, width > &, const Number p)

    Implementation of the NonlinearSchroedingerEquation class

    -

    We start by specifying the implementation of the constructor of the class. There is nothing of surprise to see here except perhaps that we choose quadratic ( $Q_2$) Lagrange elements – the solution is expected to be smooth, so we choose a higher polynomial degree than the bare minimum.

    +

    We start by specifying the implementation of the constructor of the class. There is nothing of surprise to see here except perhaps that we choose quadratic ( $Q_2$) Lagrange elements – the solution is expected to be smooth, so we choose a higher polynomial degree than the bare minimum.

      template <int dim>
      NonlinearSchroedingerEquation<dim>::NonlinearSchroedingerEquation()
      : fe(2)
    @@ -1016,7 +1016,7 @@
     
     
    ::VectorizedArray< Number, width > abs(const ::VectorizedArray< Number, width > &)
    -

    The next step is to solve for the linear system in each time step, i.e., the second half step of the Strang splitting we use. Recall that it had the form $C\Psi^{(n,2)} = R\Psi^{(n,1)}$ where $C$ and $R$ are the matrices we assembled earlier.

    +

    The next step is to solve for the linear system in each time step, i.e., the second half step of the Strang splitting we use. Recall that it had the form $C\Psi^{(n,2)} = R\Psi^{(n,1)}$ where $C$ and $R$ are the matrices we assembled earlier.

    The way we solve this here is using a direct solver. We first form the right hand side $r=R\Psi^{(n,1)}$ using the SparseMatrix::vmult() function and put the result into the system_rhs variable. We then call SparseDirectUMFPACK::solver() which takes as argument the matrix $C$ and the right hand side vector and returns the solution in the same vector system_rhs. The final step is then to put the solution so computed back into the solution variable.

      template <int dim>
      void NonlinearSchroedingerEquation<dim>::do_full_spatial_step()
    @@ -1263,7 +1263,7 @@ allowfullscreen>

    -

    So why did I end up shading the area where the potential $V(\mathbf x)$ is large? In that outside region, the solution is relatively small. It is also relatively smooth. As a consequence, to some approximate degree, the equation in that region simplifies to

    +

    So why did I end up shading the area where the potential $V(\mathbf x)$ is large? In that outside region, the solution is relatively small. It is also relatively smooth. As a consequence, to some approximate degree, the equation in that region simplifies to

    \[
   - i \frac{\partial \psi}{\partial t}
   + V \psi
@@ -1286,9 +1286,9 @@
 <p><a class=

    Better linear solvers

    The solver chosen here is just too simple. It is also not efficient. What we do here is give the matrix to a sparse direct solver in every time step and let it find the solution of the linear system. But we know that we could do far better:

    Boundary conditions

    In order to be usable for actual, realistic problems, solvers for the nonlinear Schrödinger equation need to utilize boundary conditions that make sense for the problem at hand. We have here restricted ourselves to simple Neumann boundary conditions – but these do not actually make sense for the problem. Indeed, the equations are generally posed on an infinite domain. But, since we can't compute on infinite domains, we need to truncate it somewhere and instead pose boundary conditions that make sense for this artificially small domain. The approach widely used is to use the Perfectly Matched Layer method that corresponds to a particular kind of attenuation. It is, in a different context, also used in step-62.

    /usr/share/doc/packages/dealii/doxygen/deal.II/step_59.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_59.html 2024-01-30 03:04:54.228896212 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_59.html 2024-01-30 03:04:54.228896212 +0000 @@ -124,7 +124,7 @@

    This work was partly supported by the German Research Foundation (DFG) through the project "High-order discontinuous Galerkin for the exa-scale" (ExaDG) within the priority program "Software for Exascale Computing" (SPPEXA).

    Introduction

    Matrix-free operator evaluation enables very efficient implementations of discretization with high-order polynomial bases due to a method called sum factorization. This concept has been introduced in the step-37 and step-48 tutorial programs. In this tutorial program, we extend those concepts to discontinuous Galerkin (DG) schemes that include face integrals, a class of methods where high orders are particularly widespread.

    -

    The underlying idea of the matrix-free evaluation is the same as for continuous elements: The matrix-vector product that appears in an iterative solver or multigrid smoother is not implemented by a classical sparse matrix kernel, but instead applied implicitly by the evaluation of the underlying integrals on the fly. For tensor product shape functions that are integrated with a tensor product quadrature rule, this evaluation is particularly efficient by using the sum-factorization technique, which decomposes the initially $(k+1)^{2d}$ operations for interpolation involving $(k+1)^d$ vector entries with associated shape functions at degree $k$ in $d$ dimensions to $(k+1)^d$ quadrature points into $d$ one-dimensional operations of cost $(k+1)^{d+1}$ each. In 3D, this reduces the order of complexity by two powers in $k$. When measured as the complexity per degree of freedom, the complexity is $\mathcal O(k)$ in the polynomial degree. Due to the presence of face integrals in DG, and due to the fact that operations on quadrature points involve more memory transfer, which both scale as $\mathcal O(1)$, the observed complexity is often constant for moderate $k\leq 10$. This means that a high order method can be evaluated with the same throughput in terms of degrees of freedom per second as a low-order method.

    +

    The underlying idea of the matrix-free evaluation is the same as for continuous elements: The matrix-vector product that appears in an iterative solver or multigrid smoother is not implemented by a classical sparse matrix kernel, but instead applied implicitly by the evaluation of the underlying integrals on the fly. For tensor product shape functions that are integrated with a tensor product quadrature rule, this evaluation is particularly efficient by using the sum-factorization technique, which decomposes the initially $(k+1)^{2d}$ operations for interpolation involving $(k+1)^d$ vector entries with associated shape functions at degree $k$ in $d$ dimensions to $(k+1)^d$ quadrature points into $d$ one-dimensional operations of cost $(k+1)^{d+1}$ each. In 3D, this reduces the order of complexity by two powers in $k$. When measured as the complexity per degree of freedom, the complexity is $\mathcal O(k)$ in the polynomial degree. Due to the presence of face integrals in DG, and due to the fact that operations on quadrature points involve more memory transfer, which both scale as $\mathcal O(1)$, the observed complexity is often constant for moderate $k\leq 10$. This means that a high order method can be evaluated with the same throughput in terms of degrees of freedom per second as a low-order method.

    More information on the algorithms are available in the preprint
    Fast matrix-free evaluation of discontinuous Galerkin finite element operators by Martin Kronbichler and Katharina Kormann, arXiv:1711.03590.

    The symmetric interior penalty formulation for the Laplacian

    @@ -136,8 +136,8 @@ \end{align*}" src="form_5958.png"/>

    where $\jump{v} = v^- \mathbf{n}^- + v^+ \mathbf{n}^+ = \mathbf n^{-}
-\left(v^- - v^+\right)$ denotes the directed jump of the quantity $v$ from the two associated cells $K^-$ and $K^+$, and $\average{v}=\frac{v^- + v^+}{2}$ is the average from both sides.

    -

    The terms in the equation represent the cell integral after integration by parts, the primal consistency term that arises at the element interfaces due to integration by parts and insertion of an average flux, the adjoint consistency term that is added for restoring symmetry of the underlying matrix, and a penalty term with factor $\sigma$, whose magnitude is equal the length of the cells in direction normal to face multiplied by $k(k+1)$, see step-39. The penalty term is chosen such that an inverse estimate holds and the final weak form is coercive, i.e., positive definite in the discrete setting. The adjoint consistency term and the penalty term involve the jump $\jump{u_h}$ at the element interfaces, which disappears for the analytic solution $u$. Thus, these terms are consistent with the original PDE, ensuring that the method can retain optimal orders of convergence.

    +\left(v^- - v^+\right)$" src="form_5959.png"/> denotes the directed jump of the quantity $v$ from the two associated cells $K^-$ and $K^+$, and $\average{v}=\frac{v^- + v^+}{2}$ is the average from both sides.

    +

    The terms in the equation represent the cell integral after integration by parts, the primal consistency term that arises at the element interfaces due to integration by parts and insertion of an average flux, the adjoint consistency term that is added for restoring symmetry of the underlying matrix, and a penalty term with factor $\sigma$, whose magnitude is equal the length of the cells in direction normal to face multiplied by $k(k+1)$, see step-39. The penalty term is chosen such that an inverse estimate holds and the final weak form is coercive, i.e., positive definite in the discrete setting. The adjoint consistency term and the penalty term involve the jump $\jump{u_h}$ at the element interfaces, which disappears for the analytic solution $u$. Thus, these terms are consistent with the original PDE, ensuring that the method can retain optimal orders of convergence.

    In the implementation below, we implement the weak form above by moving the normal vector $\mathbf{n}^-$ from the jump terms to the derivatives to form a normal derivative of the form $\mathbf{n}^-\cdot \nabla u_h$. This makes the implementation on quadrature points slightly more efficient because we only need to work with scalar terms rather than tensors, and is mathematically equivalent.

    For boundary conditions, we use the so-called mirror principle that defines artificial exterior values $u^+$ by extrapolation from the interior solution $u^-$ combined with the given boundary data, setting $u^+ = -u^- + 2
 g_\text{D}$ and $\mathbf{n}^-\cdot \nabla u^+ = \mathbf{n}^-\cdot \nabla u^-$ on Dirichlet boundaries and $u^+=u^-$ and $\mathbf{n}^-\cdot \nabla u^+ =
@@ -146,7 +146,7 @@
 <p>The matrix-free framework of deal.II provides the necessary infrastructure to implement the action of the discretized equation above. As opposed to the <a class=MatrixFree::cell_loop() that we used in step-37 and step-48, we now build a code in terms of MatrixFree::loop() that takes three function pointers, one for the cell integrals, one for the inner face integrals, and one for the boundary face integrals (in analogy to the design of MeshWorker used in the step-39 tutorial program). In each of these three functions, we then implement the respective terms on the quadrature points. For interpolation between the vector entries and the values and gradients on quadrature points, we use the class FEEvaluation for cell contributions and FEFaceEvaluation for face contributions. The basic usage of these functions has been discussed extensively in the step-37 tutorial program.

    In MatrixFree::loop(), all interior faces are visited exactly once, so one must make sure to compute the contributions from both the test functions $v_h^-$ and $v_h^+$. Given the fact that the test functions on both sides are indeed independent, the weak form above effectively means that we submit the same contribution to both an FEFaceEvaluation object called phi_inner and phi_outer for testing with the normal derivative of the test function, and values with opposite sign for testing with the values of the test function, because the latter involves opposite signs due to the jump term. For faces between cells of different refinement level, the integration is done from the refined side, and FEFaceEvaluation automatically performs interpolation to a subface on the coarse side. Thus, a hanging node never appears explicitly in a user implementation of a weak form.

    The fact that each face is visited exactly once also applies to those faces at subdomain boundaries between different processors when parallelized with MPI, where one cell belongs to one processor and one to the other. The setup in MatrixFree::reinit() splits the faces between the two sides, and eventually only reports the faces actually handled locally in MatrixFree::n_inner_face_batches() and MatrixFree::n_boundary_face_batches(), respectively. Note that, in analogy to the cell integrals discussed in step-37, deal.II applies vectorization over several faces to use SIMD, working on something we call a batch of faces with a single instruction. The face batches are independent from the cell batches, even though the time at which face integrals are processed is kept close to the time when the cell integrals of the respective cells are processed, in order to increase the data locality.

    -

    Another thing that is new in this program is the fact that we no longer split the vector access like FEEvaluation::read_dof_values() or FEEvaluation::distribute_local_to_global() from the evaluation and integration steps, but call combined functions FEEvaluation::gather_evaluate() and FEEvaluation::integrate_scatter(), respectively. This is useful for face integrals because, depending on what gets evaluated on the faces, not all vector entries of a cell must be touched in the first place. Think for example of the case of the nodal element FE_DGQ with node points on the element surface: If we are interested in the shape function values on a face, only $(k+ 1)^{d-1}$ degrees of freedom contribute to them in a non-trivial way (in a more technical way of speaking, only $(k+1)^{d-1}$ shape functions have a nonzero support on the face and return true for FiniteElement::has_support_on_face()). When compared to the $(k+1)^d$ degrees of freedom of a cell, this is one power less.

    +

    Another thing that is new in this program is the fact that we no longer split the vector access like FEEvaluation::read_dof_values() or FEEvaluation::distribute_local_to_global() from the evaluation and integration steps, but call combined functions FEEvaluation::gather_evaluate() and FEEvaluation::integrate_scatter(), respectively. This is useful for face integrals because, depending on what gets evaluated on the faces, not all vector entries of a cell must be touched in the first place. Think for example of the case of the nodal element FE_DGQ with node points on the element surface: If we are interested in the shape function values on a face, only $(k+ 1)^{d-1}$ degrees of freedom contribute to them in a non-trivial way (in a more technical way of speaking, only $(k+1)^{d-1}$ shape functions have a nonzero support on the face and return true for FiniteElement::has_support_on_face()). When compared to the $(k+1)^d$ degrees of freedom of a cell, this is one power less.

    Now of course we are not interested in only the function values, but also the derivatives on the cell. Fortunately, there is an element in deal.II that extends this property of reduced access also for derivatives on faces, the FE_DGQHermite element.

    The FE_DGQHermite element

    The element FE_DGQHermite belongs to the family of FE_DGQ elements, i.e., its shape functions are a tensor product of 1D polynomials and the element is fully discontinuous. As opposed to the nodal character in the usual FE_DGQ element, the FE_DGQHermite element is a mixture of nodal contributions and derivative contributions based on a Hermite-like concept. The underlying polynomial class is Polynomials::HermiteLikeInterpolation and can be summarized as follows: For cubic polynomials, we use two polynomials to represent the function value and first derivative at the left end of the unit interval, $x=0$, and two polynomials to represent the function value and first derivative and the right end of the unit interval, $x=1$. At the opposite ends, both the value and first derivative of the shape functions are zero, ensuring that only two out of the four basis functions contribute to values and derivative on the respective end. However, we deviate from the classical Hermite interpolation in not strictly assigning one degree of freedom for the value and one for the first derivative, but rather allow the first derivative to be a linear combination of the first and the second shape function. This is done to improve the conditioning of the interpolation. Also, when going to degrees beyond three, we add node points in the element interior in a Lagrange-like fashion, combined with double zeros in the points $x=0$ and $x=1$. The position of these extra nodes is determined by the zeros of some Jacobi polynomials as explained in the description of the class Polynomials::HermiteLikeInterpolation.

    @@ -154,7 +154,7 @@

    This optimization is not only useful for computing the face integrals, but also for the MPI ghost layer exchange: In a naive exchange, we would need to send all degrees of freedom of a cell to another processor if the other processor is responsible for computing the face's contribution. Since we know that only some of the degrees of freedom in the evaluation with FEFaceEvaluation are touched, it is natural to only exchange the relevant ones. The MatrixFree::loop() function has support for a selected data exchange when combined with LinearAlgebra::distributed::Vector. To make this happen, we need to tell the loop what kind of evaluation on faces we are going to do, using an argument of type MatrixFree::DataAccessOnFaces, as can be seen in the implementation of LaplaceOperator::vmult() below. The way data is exchanged in that case is as follows: The ghost layer data in the vector still pretends to represent all degrees of freedom, such that FEFaceEvaluation can continue to read the values as if the cell were a locally owned one. The data exchange routines take care of the task for packing and unpacking the data into this format. While this sounds pretty complicated, we will show in the results section below that this really pays off by comparing the performance to a baseline code that does not specify the data access on faces.

    An approximate block-Jacobi smoother using the fast diagonalization method

    In the tradition of the step-37 program, we again solve a Poisson problem with a geometric multigrid preconditioner inside a conjugate gradient solver. Instead of computing the diagonal and use the basic PreconditionChebyshev as a smoother, we choose a different strategy in this tutorial program. We implement a block-Jacobi preconditioner, where a block refers to all degrees of freedom on a cell. Rather than building the full cell matrix and applying its LU factorization (or inverse) in the preconditioner — an operation that would be heavily memory bandwidth bound and thus pretty slow — we approximate the inverse of the block by a special technique called fast diagonalization method.

    -

    The idea of the method is to take use of the structure of the cell matrix. In case of the Laplacian with constant coefficients discretized on a Cartesian mesh, the cell matrix $L$ can be written as

    +

    The idea of the method is to take use of the structure of the cell matrix. In case of the Laplacian with constant coefficients discretized on a Cartesian mesh, the cell matrix $L$ can be written as

    \begin{align*}
 L &= A_1 \otimes M_0 + M_1 \otimes A_0
 \end{align*} @@ -165,18 +165,18 @@ \end{align*}" src="form_5977.png"/>

    in 3D. The matrices $A_0$ and $A_1$ denote the 1D Laplace matrix (including the cell and face term associated to the current cell values $u^-_h$ and $v^-_h$) and $M_0$ and $M_1$ are the mass matrices. Note that this simple tensor product structure is lost once there are non-constant coefficients on the cell or the geometry is not constant any more. We mention that a similar setup could also be used to replace the computed integrals with this final tensor product form of the matrices, which would cut the operations for the operator evaluation into less than half. However, given the fact that this only holds for Cartesian cells and constant coefficients, which is a pretty narrow case, we refrain from pursuing this idea.

    -

    Interestingly, the exact inverse of the matrix $L$ can be found through tensor products due to a method introduced by Lynch et al. [Lynch1964] in 1964,

    +

    Interestingly, the exact inverse of the matrix $L$ can be found through tensor products due to a method introduced by Lynch et al. [Lynch1964] in 1964,

    \begin{align*}
 L^{-1} &= S_1 \otimes S_0 (\Lambda_1 \otimes I + I \otimes \Lambda_0)^{-1}
 S_1^\mathrm T \otimes S_0^\mathrm T,
 \end{align*}

    -

    where $S_d$ is the matrix of eigenvectors to the generalized eigenvalue problem in the given tensor direction $d$:

    +

    where $S_d$ is the matrix of eigenvectors to the generalized eigenvalue problem in the given tensor direction $d$:

    \begin{align*}
 A_d s  &= \lambda M_d s, \quad d = 0, \ldots,\mathrm{dim-1},
 \end{align*}

    -

    and $\Lambda_d$ is the diagonal matrix representing the generalized eigenvalues $\lambda$. Note that the vectors $s$ are such that they simultaneously diagonalize $A_d$ and $M_d$, i.e. $S_d^{\mathrm T} A_d S_d =
+<p> and <picture><source srcset=$\Lambda_d$ is the diagonal matrix representing the generalized eigenvalues $\lambda$. Note that the vectors $s$ are such that they simultaneously diagonalize $A_d$ and $M_d$, i.e. $S_d^{\mathrm T} A_d S_d =
 \Lambda_d$ and $S_d^{\mathrm T} M_d S_d = I$.

    The deal.II library implements a class using this concept, called TensorProductMatrixSymmetricSum.

    For the sake of this program, we stick with constant coefficients and Cartesian meshes, even though an approximate version based on tensor products would still be possible for a more general mesh, and the operator evaluation itself is of course generic. Also, we do not bother with adaptive meshes where the multigrid algorithm would need to get access to flux matrices over the edges of different refinement, as explained in step-39. One thing we do, however, is to still wrap our block-Jacobi preconditioner inside PreconditionChebyshev. That class relieves us from finding an appropriate relaxation parameter (which would be around 0.7 in 2D and 0.5 in 3D for the block-Jacobi smoother), and often increases smoothing efficiency somewhat over plain Jacobi smoothing, especially when using several iterations.

    @@ -229,7 +229,7 @@
      const unsigned int dimension = 3;
     

    Equation data

    -

    In analogy to step-7, we define an analytic solution that we try to reproduce with our discretization. Since the aim of this tutorial is to show matrix-free methods, we choose one of the simplest possibilities, namely a cosine function whose derivatives are simple enough for us to compute analytically. Further down, the wave number 2.4 we select here will be matched with the domain extent in $x$-direction that is 2.5, such that we obtain a periodic solution at $x = 2.5$ including $6pi$ or three full wave revolutions in the cosine. The first function defines the solution and its gradient for expressing the analytic solution for the Dirichlet and Neumann boundary conditions, respectively. Furthermore, a class representing the negative Laplacian of the solution is used to represent the right hand side (forcing) function that we use to match the given analytic solution in the discretized version (manufactured solution).

    +

    In analogy to step-7, we define an analytic solution that we try to reproduce with our discretization. Since the aim of this tutorial is to show matrix-free methods, we choose one of the simplest possibilities, namely a cosine function whose derivatives are simple enough for us to compute analytically. Further down, the wave number 2.4 we select here will be matched with the domain extent in $x$-direction that is 2.5, such that we obtain a periodic solution at $x = 2.5$ including $6pi$ or three full wave revolutions in the cosine. The first function defines the solution and its gradient for expressing the analytic solution for the Dirichlet and Neumann boundary conditions, respectively. Furthermore, a class representing the negative Laplacian of the solution is used to represent the right hand side (forcing) function that we use to match the given analytic solution in the discretized version (manufactured solution).

      template <int dim>
      class Solution : public Function<dim>
      {
    @@ -461,7 +461,7 @@

    The second new feature is the fact that we do not implement a vmult_add() function as we did in step-37 (through the virtual function MatrixFreeOperators::Base::vmult_add()), but directly implement a vmult() functionality. Since both cell and face integrals will sum into the destination vector, we must of course zero the vector somewhere. For DG elements, we are given two options – one is to use FEEvaluation::set_dof_values() instead of FEEvaluation::distribute_local_to_global() in the apply_cell function below. This works because the loop layout in MatrixFree is such that cell integrals always touch a given vector entry before the face integrals. However, this really only works for fully discontinuous bases where every cell has its own degrees of freedom, without any sharing with neighboring results. An alternative setup, the one chosen here, is to let the MatrixFree::loop() take care of zeroing the vector. This can be thought of as simply calling dst = 0; somewhere in the code. The implementation is more involved for supported vectors such as LinearAlgebra::distributed::Vector, because we aim to not zero the whole vector at once. Doing the zero operation on a small enough pieces of a few thousands of vector entries has the advantage that the vector entries that get zeroed remain in caches before they are accessed again in FEEvaluation::distribute_local_to_global() and FEFaceEvaluation::distribute_local_to_global(). Since matrix-free operator evaluation is really fast, just zeroing a large vector can amount to up to a 25% of the operator evaluation time, and we obviously want to avoid this cost. This option of zeroing the vector is also available for MatrixFree::cell_loop and for continuous bases, even though it was not used in the step-37 or step-48 tutorial programs.

    The third new feature is the way we provide the functions to compute on cells, inner faces, and boundary faces: The class MatrixFree has a function called loop that takes three function pointers to the three cases, allowing to separate the implementations of different things. As explained in step-37, these function pointers can be std::function objects or member functions of a class. In this case, we use pointers to member functions.

    The final new feature are the last two arguments of type MatrixFree::DataAccessOnFaces that can be given to MatrixFree::loop(). This class passes the type of data access for face integrals to the MPI data exchange routines LinearAlgebra::distributed::Vector::update_ghost_values() and LinearAlgebra::distributed::Vector::compress() of the parallel vectors. The purpose is to not send all degrees of freedom of a neighboring element, but to reduce the amount of data to what is really needed for the computations at hand. The data exchange is a real bottleneck in particular for high-degree DG methods, therefore a more restrictive way of exchange is often beneficial. The enum field MatrixFree::DataAccessOnFaces can take the value none, which means that no face integrals at all are done, which would be analogous to MatrixFree::cell_loop(), the value values meaning that only shape function values (but no derivatives) are used on faces, and the value gradients when also first derivatives on faces are accessed besides the values. A value unspecified means that all degrees of freedom will be exchanged for the faces that are located at the processor boundaries and designated to be worked on at the local processor.

    -

    To see how the data can be reduced, think of the case of the nodal element FE_DGQ with node points on the element surface, where only $(k+1)^{d-1}$ degrees of freedom contribute to the values on a face for polynomial degree $k$ in $d$ space dimensions, out of the $(k+1)^d$ degrees of freedom of a cell. A similar reduction is also possible for the interior penalty method that evaluates values and first derivatives on the faces. When using a Hermite-like basis in 1d, only up to two basis functions contribute to the value and derivative. The class FE_DGQHermite implements a tensor product of this concept, as discussed in the introduction. Thus, only $2(k+1)^{d-1}$ degrees of freedom must be exchanged for each face, which is a clear win once $k$ gets larger than four or five. Note that this reduced exchange of FE_DGQHermite is valid also on meshes with curved boundaries, as the derivatives are taken on the reference element, whereas the geometry only mixes them on the inside. Thus, this is different from the attempt to obtain $C^1$ continuity with continuous Hermite-type shape functions where the non-Cartesian case changes the picture significantly. Obviously, on non-Cartesian meshes the derivatives also include tangential derivatives of shape functions beyond the normal derivative, but those only need the function values on the element surface, too. Should the element not provide any compression, the loop automatically exchanges all entries for the affected cells.

    +

    To see how the data can be reduced, think of the case of the nodal element FE_DGQ with node points on the element surface, where only $(k+1)^{d-1}$ degrees of freedom contribute to the values on a face for polynomial degree $k$ in $d$ space dimensions, out of the $(k+1)^d$ degrees of freedom of a cell. A similar reduction is also possible for the interior penalty method that evaluates values and first derivatives on the faces. When using a Hermite-like basis in 1d, only up to two basis functions contribute to the value and derivative. The class FE_DGQHermite implements a tensor product of this concept, as discussed in the introduction. Thus, only $2(k+1)^{d-1}$ degrees of freedom must be exchanged for each face, which is a clear win once $k$ gets larger than four or five. Note that this reduced exchange of FE_DGQHermite is valid also on meshes with curved boundaries, as the derivatives are taken on the reference element, whereas the geometry only mixes them on the inside. Thus, this is different from the attempt to obtain $C^1$ continuity with continuous Hermite-type shape functions where the non-Cartesian case changes the picture significantly. Obviously, on non-Cartesian meshes the derivatives also include tangential derivatives of shape functions beyond the normal derivative, but those only need the function values on the element surface, too. Should the element not provide any compression, the loop automatically exchanges all entries for the affected cells.

      template <int dim, int fe_degree, typename number>
      void LaplaceOperator<dim, fe_degree, number>::vmult(
    @@ -532,7 +532,7 @@
      for (unsigned int face = face_range.first; face < face_range.second; ++face)
      {
    -

    On a given batch of faces, we first update the pointers to the current face and then access the vector. As mentioned above, we combine the vector access with the evaluation. In the case of face integrals, the data access into the vector can be reduced for the special case of an FE_DGQHermite basis as explained for the data exchange above: Since only $2(k+1)^{d-1}$ out of the $(k+1)^d$ cell degrees of freedom get multiplied by a non-zero value or derivative of a shape function, this structure can be utilized for the evaluation, significantly reducing the data access. The reduction of the data access is not only beneficial because it reduces the data in flight and thus helps caching, but also because the data access to faces is often more irregular than for cell integrals when gathering values from cells that are farther apart in the index list of cells.

    +

    On a given batch of faces, we first update the pointers to the current face and then access the vector. As mentioned above, we combine the vector access with the evaluation. In the case of face integrals, the data access into the vector can be reduced for the special case of an FE_DGQHermite basis as explained for the data exchange above: Since only $2(k+1)^{d-1}$ out of the $(k+1)^d$ cell degrees of freedom get multiplied by a non-zero value or derivative of a shape function, this structure can be utilized for the evaluation, significantly reducing the data access. The reduction of the data access is not only beneficial because it reduces the data in flight and thus helps caching, but also because the data access to faces is often more irregular than for cell integrals when gathering values from cells that are farther apart in the index list of cells.

      phi_inner.reinit(face);
      phi_inner.gather_evaluate(src,
    @@ -587,8 +587,8 @@

    The boundary face function follows by and large the interior face function. The only difference is the fact that we do not have a separate FEFaceEvaluation object that provides us with exterior values $u^+$, but we must define them from the boundary conditions and interior values $u^-$. As explained in the introduction, we use $u^+ = -u^- + 2
    g_\text{D}$ and $\mathbf{n}^-\cdot \nabla u^+ = \mathbf{n}^-\cdot \nabla
    u^-$ on Dirichlet boundaries and $u^+=u^-$ and $\mathbf{n}^-\cdot \nabla
-   u^+ = -\mathbf{n}^-\cdot \nabla u^- + 2 g_\text{N}$ on Neumann boundaries. Since this operation implements the homogeneous part, i.e., the matrix-vector product, we must neglect the boundary functions $g_\text{D}$ and $g_\text{N}$ here, and added them to the right hand side in LaplaceProblem::compute_rhs(). Note that due to extension of the solution $u^-$ to the exterior via $u^+$, we can keep all factors $0.5$ the same as in the inner face function, see also the discussion in step-39.

    -

    There is one catch at this point: The implementation below uses a boolean variable is_dirichlet to switch between the Dirichlet and the Neumann cases. However, we solve a problem where we also want to impose periodic boundary conditions on some boundaries, namely along those in the $x$ direction. One might wonder how those conditions should be handled here. The answer is that MatrixFree automatically treats periodic boundaries as what they are technically, namely an inner face where the solution values of two adjacent cells meet and must be treated by proper numerical fluxes. Thus, all the faces on the periodic boundaries will appear in the apply_face() function and not in this one.

    + u^+ = -\mathbf{n}^-\cdot \nabla u^- + 2 g_\text{N}$" src="form_5992.png"/> on Neumann boundaries. Since this operation implements the homogeneous part, i.e., the matrix-vector product, we must neglect the boundary functions $g_\text{D}$ and $g_\text{N}$ here, and added them to the right hand side in LaplaceProblem::compute_rhs(). Note that due to extension of the solution $u^-$ to the exterior via $u^+$, we can keep all factors $0.5$ the same as in the inner face function, see also the discussion in step-39.

    +

    There is one catch at this point: The implementation below uses a boolean variable is_dirichlet to switch between the Dirichlet and the Neumann cases. However, we solve a problem where we also want to impose periodic boundary conditions on some boundaries, namely along those in the $x$ direction. One might wonder how those conditions should be handled here. The answer is that MatrixFree automatically treats periodic boundaries as what they are technically, namely an inner face where the solution values of two adjacent cells meet and must be treated by proper numerical fluxes. Thus, all the faces on the periodic boundaries will appear in the apply_face() function and not in this one.

      template <int dim, int fe_degree, typename number>
      void LaplaceOperator<dim, fe_degree, number>::apply_boundary(
      const MatrixFree<dim, number> & data,
    @@ -699,7 +699,7 @@
      }
     

    Next, we go through the cells and pass the scaled matrices to TensorProductMatrixSymmetricSum to actually compute the generalized eigenvalue problem for representing the inverse: Since the matrix approximation is constructed as $A\otimes M + M\otimes A$ and the weights are constant for each element, we can apply all weights on the Laplace matrix and simply keep the mass matrices unscaled. In the loop over cells, we want to make use of the geometry compression provided by the MatrixFree class and check if the current geometry is the same as on the last cell batch, in which case there is nothing to do. This compression can be accessed by FEEvaluation::get_mapping_data_index_offset() once reinit() has been called.

    -

    Once we have accessed the inverse Jacobian through the FEEvaluation access function (we take the one for the zeroth quadrature point as they should be the same on all quadrature points for a Cartesian cell), we check that it is diagonal and then extract the determinant of the original Jacobian, i.e., the inverse of the determinant of the inverse Jacobian, and set the weight as $\text{det}(J) / h_d^2$ according to the 1d Laplacian times $d-1$ copies of the mass matrix.

    +

    Once we have accessed the inverse Jacobian through the FEEvaluation access function (we take the one for the zeroth quadrature point as they should be the same on all quadrature points for a Cartesian cell), we check that it is diagonal and then extract the determinant of the original Jacobian, i.e., the inverse of the determinant of the inverse Jacobian, and set the weight as $\text{det}(J) / h_d^2$ according to the 1d Laplacian times $d-1$ copies of the mass matrix.

      cell_matrices.clear();
      unsigned int old_mapping_data_index = numbers::invalid_unsigned_int;
    @@ -1138,7 +1138,7 @@
    void integrate_difference(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const InVector &fe_function, const Function< spacedim, typename InVector::value_type > &exact_solution, OutVector &difference, const Quadrature< dim > &q, const NormType &norm, const Function< spacedim, double > *weight=nullptr, const double exponent=2.)
    ::VectorizedArray< Number, width > sqrt(const ::VectorizedArray< Number, width > &)
    -

    The run() function sets up the initial grid and then runs the multigrid program in the usual way. As a domain, we choose a rectangle with periodic boundary conditions in the $x$-direction, a Dirichlet condition on the front face in $y$ direction (i.e., the face with index number 2, with boundary id equal to 0), and Neumann conditions on the back face as well as the two faces in $z$ direction for the 3d case (with boundary id equal to 1). The extent of the domain is a bit different in the $x$ direction (where we want to achieve a periodic solution given the definition of Solution) as compared to the $y$ and $z$ directions.

    +

    The run() function sets up the initial grid and then runs the multigrid program in the usual way. As a domain, we choose a rectangle with periodic boundary conditions in the $x$-direction, a Dirichlet condition on the front face in $y$ direction (i.e., the face with index number 2, with boundary id equal to 0), and Neumann conditions on the back face as well as the two faces in $z$ direction for the 3d case (with boundary id equal to 1). The extent of the domain is a bit different in the $x$ direction (where we want to achieve a periodic solution given the definition of Solution) as compared to the $y$ and $z$ directions.

      template <int dim, int fe_degree>
      void LaplaceProblem<dim, fe_degree>::run()
      {
    @@ -1334,7 +1334,7 @@
      L2 Velocity Reduction L2 Pressure Reduction H1 Velocity Reduction
    MDoFs/s 2.94 3.29 3.62 3.72 3.47 3.41 2.93 2.88 2.57 2.27 2.01 1.87
    -

    We clearly see how the efficiency per DoF initially improves until it reaches a maximum for the polynomial degree $k=4$. This effect is surprising, not only because higher polynomial degrees often yield a vastly better solution, but especially also when having matrix-based schemes in mind where the denser coupling at higher degree leads to a monotonously decreasing throughput (and a drastic one in 3D, with $k=4$ being more than ten times slower than $k=1$!). For higher degrees, the throughput decreases a bit, which is both due to an increase in the number of iterations (going from 12 at $k=2,3,4$ to 19 at $k=10$) and due to the $\mathcal O(k)$ complexity of operator evaluation. Nonetheless, efficiency as the time to solution would be still better for higher polynomial degrees because they have better convergence rates (at least for problems as simple as this one): For $k=12$, we reach roundoff accuracy already with 1 million DoFs (solver time less than a second), whereas for $k=8$ we need 24 million DoFs and 8 seconds. For $k=5$, the error is around $10^{-9}$ with 57m DoFs and thus still far away from roundoff, despite taking 16 seconds.

    +

    We clearly see how the efficiency per DoF initially improves until it reaches a maximum for the polynomial degree $k=4$. This effect is surprising, not only because higher polynomial degrees often yield a vastly better solution, but especially also when having matrix-based schemes in mind where the denser coupling at higher degree leads to a monotonously decreasing throughput (and a drastic one in 3D, with $k=4$ being more than ten times slower than $k=1$!). For higher degrees, the throughput decreases a bit, which is both due to an increase in the number of iterations (going from 12 at $k=2,3,4$ to 19 at $k=10$) and due to the $\mathcal O(k)$ complexity of operator evaluation. Nonetheless, efficiency as the time to solution would be still better for higher polynomial degrees because they have better convergence rates (at least for problems as simple as this one): For $k=12$, we reach roundoff accuracy already with 1 million DoFs (solver time less than a second), whereas for $k=8$ we need 24 million DoFs and 8 seconds. For $k=5$, the error is around $10^{-9}$ with 57m DoFs and thus still far away from roundoff, despite taking 16 seconds.

    Note that the above numbers are a bit pessimistic because they include the time it takes the Chebyshev smoother to compute an eigenvalue estimate, which is around 10 percent of the solver time. If the system is solved several times (as e.g. common in fluid dynamics), this eigenvalue cost is only paid once and faster times become available.

    Evaluation of efficiency of ingredients

    Finally, we take a look at some of the special ingredients presented in this tutorial program, namely the FE_DGQHermite basis in particular and the specification of MatrixFree::DataAccessOnFaces. In the following table, the third row shows the optimized solver above, the fourth row shows the timings with only the MatrixFree::DataAccessOnFaces set to unspecified rather than the optimal gradients, and the last one with replacing FE_DGQHermite by the basic FE_DGQ elements where both the MPI exchange are more expensive and the operations done by FEFaceEvaluation::gather_evaluate() and FEFaceEvaluation::integrate_scatter().

    @@ -1351,7 +1351,7 @@ Solver time FE_DGQ [s] 0.712 2.041 5.066 9.335 2.379 3.802 6.564 9.714 14.54 22.76 4.148 5.857

    The data in the table shows that not using MatrixFree::DataAccessOnFaces increases costs by around 10% for higher polynomial degrees. For lower degrees, the difference is obviously less pronounced because the volume-to-surface ratio is more beneficial and less data needs to be exchanged. The difference is larger when looking at the matrix-vector product only, rather than the full multigrid solver shown here, with around 20% worse timings just because of the MPI communication.

    -

    For $k=1$ and $k=2$, the Hermite-like basis functions do obviously not really pay off (indeed, for $k=1$ the polynomials are exactly the same as for FE_DGQ) and the results are similar as with the FE_DGQ basis. However, for degrees starting at three, we see an increasing advantage for FE_DGQHermite, showing the effectiveness of these basis functions.

    +

    For $k=1$ and $k=2$, the Hermite-like basis functions do obviously not really pay off (indeed, for $k=1$ the polynomials are exactly the same as for FE_DGQ) and the results are similar as with the FE_DGQ basis. However, for degrees starting at three, we see an increasing advantage for FE_DGQHermite, showing the effectiveness of these basis functions.

    Possibilities for extension

    As mentioned in the introduction, the fast diagonalization method as realized here is tied to a Cartesian mesh with constant coefficients. When dealing with meshes that contain deformed cells or with variable coefficients, it is common to determine a nearby Cartesian mesh cell as an approximation. This can be done with the class TensorProductMatrixSymmetricSumCollection. Here, one can insert cell matrices similarly to the PreconditionBlockJacobi::initialize() function of this tutorial program. The benefit of the collection class is that cells on which the coefficient of the PDE has the same value can re-use the same Laplacian matrix, which reduces the memory consumption for the inverse matrices. As compared to the algorithm implemented in this tutorial program, one would define the length scales as the distances between opposing faces. For continuous elements, the code project <a href=https://github.com/peterrum/dealii-dd-and-schwarz">Cache-optimized and low-overhead implementations of multigrid smoothers for high-order FEM /usr/share/doc/packages/dealii/doxygen/deal.II/step_6.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_6.html 2024-01-30 03:04:54.276896612 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_6.html 2024-01-30 03:04:54.276896612 +0000 @@ -170,14 +170,14 @@ \|\nabla(u-u_h)\|_{\Omega} \le C h_\text{max}^p \| \nabla^{p+1} u \|_{\Omega}, \end{align*}" src="form_6004.png"/>

    -

    where $C$ is some constant independent of $h$ and $u$, $p$ is the polynomial degree of the finite element in use, and $h_\text{max}$ is the diameter of the largest cell. So if the largest cell is important, then why would we want to make the mesh fine in some parts of the domain but not all?

    +

    where $C$ is some constant independent of $h$ and $u$, $p$ is the polynomial degree of the finite element in use, and $h_\text{max}$ is the diameter of the largest cell. So if the largest cell is important, then why would we want to make the mesh fine in some parts of the domain but not all?

    The answer lies in the observation that the formula above is not optimal. In fact, some more work shows that the following is a better estimate (which you should compare to the square of the estimate above):

    \begin{align*}
   \|\nabla(u-u_h)\|_{\Omega}^2 \le C \sum_K h_K^{2p} \| \nabla^{p+1} u \|^2_K.
 \end{align*}

    -

    (Because $h_K\le h_\text{max}$, this formula immediately implies the previous one if you just pull the mesh size out of the sum.) What this formula suggests is that it is not necessary to make the largest cell small, but that the cells really only need to be small where $\| \nabla^{p+1} u \|_K$ is large! In other words: The mesh really only has to be fine where the solution has large variations, as indicated by the $p+1$st derivative. This makes intuitive sense: if, for example, we use a linear element $p=1$, then places where the solution is nearly linear (as indicated by $\nabla^2 u$ being small) will be well resolved even if the mesh is coarse. Only those places where the second derivative is large will be poorly resolved by large elements, and consequently that's where we should make the mesh small.

    -

    Of course, this a priori estimate is not very useful in practice since we don't know the exact solution $u$ of the problem, and consequently, we cannot compute $\nabla^{p+1}u$. But, and that is the approach commonly taken, we can compute numerical approximations of $\nabla^{p+1}u$ based only on the discrete solution $u_h$ that we have computed before. We will discuss this in slightly more detail below. This will then help us determine which cells have a large $p+1$st derivative, and these are then candidates for refining the mesh.

    +

    (Because $h_K\le h_\text{max}$, this formula immediately implies the previous one if you just pull the mesh size out of the sum.) What this formula suggests is that it is not necessary to make the largest cell small, but that the cells really only need to be small where $\| \nabla^{p+1} u \|_K$ is large! In other words: The mesh really only has to be fine where the solution has large variations, as indicated by the $p+1$st derivative. This makes intuitive sense: if, for example, we use a linear element $p=1$, then places where the solution is nearly linear (as indicated by $\nabla^2 u$ being small) will be well resolved even if the mesh is coarse. Only those places where the second derivative is large will be poorly resolved by large elements, and consequently that's where we should make the mesh small.

    +

    Of course, this a priori estimate is not very useful in practice since we don't know the exact solution $u$ of the problem, and consequently, we cannot compute $\nabla^{p+1}u$. But, and that is the approach commonly taken, we can compute numerical approximations of $\nabla^{p+1}u$ based only on the discrete solution $u_h$ that we have computed before. We will discuss this in slightly more detail below. This will then help us determine which cells have a large $p+1$st derivative, and these are then candidates for refining the mesh.

    How to deal with hanging nodes in theory

    The methods using triangular meshes mentioned above go to great lengths to make sure that each vertex is a vertex of all adjacent cells – i.e., that there are no hanging nodes. This then automatically makes sure that we can define shape functions in such a way that they are globally continuous (if we use the common $Q_p$ Lagrange finite element methods we have been using so far in the tutorial programs, as represented by the FE_Q class).

    On the other hand, if we define shape functions on meshes with hanging nodes, we may end up with shape functions that are not continuous. To see this, think about the situation above where the top right cell is not refined, and consider for a moment the use of a bilinear finite element. In that case, the shape functions associated with the hanging nodes are defined in the obvious way on the two small cells adjacent to each of the hanging nodes. But how do we extend them to the big adjacent cells? Clearly, the function's extension to the big cell cannot be bilinear because then it needs to be linear along each edge of the large cell, and that means that it needs to be zero on the entire edge because it needs to be zero on the two vertices of the large cell on that edge. But it is not zero at the hanging node itself when seen from the small cells' side – so it is not continuous. The following three figures show three of the shape functions along the edges in question that turn out to not be continuous when defined in the usual way simply based on the cells they are adjacent to:

    @@ -193,7 +193,7 @@
    A discontinuous shape function adjacent to a hanging node
    -

    But we do want the finite element solution to be continuous so that we have a “conforming finite element method” where the discrete finite element space is a proper subset of the $H^1$ function space in which we seek the solution of the Laplace equation. To guarantee that the global solution is continuous at these nodes as well, we have to state some additional constraints on the values of the solution at these nodes. The trick is to realize that while the shape functions shown above are discontinuous (and consequently an arbitrary linear combination of them is also discontinuous), that linear combinations in which the shape functions are added up as $u_h(\mathbf x)=\sum_j U_j \varphi_j(\mathbf x)$ can be continuous if the coefficients $U_j$ satisfy certain relationships. In other words, the coefficients $U_j$ can not be chosen arbitrarily but have to satisfy certain constraints so that the function $u_h$ is in fact continuous. What these constraints have to look is relatively easy to understand conceptually, but the implementation in software is complicated and takes several thousand lines of code. On the other hand, in user code, it is only about half a dozen lines you have to add when dealing with hanging nodes.

    +

    But we do want the finite element solution to be continuous so that we have a “conforming finite element method” where the discrete finite element space is a proper subset of the $H^1$ function space in which we seek the solution of the Laplace equation. To guarantee that the global solution is continuous at these nodes as well, we have to state some additional constraints on the values of the solution at these nodes. The trick is to realize that while the shape functions shown above are discontinuous (and consequently an arbitrary linear combination of them is also discontinuous), that linear combinations in which the shape functions are added up as $u_h(\mathbf x)=\sum_j U_j \varphi_j(\mathbf x)$ can be continuous if the coefficients $U_j$ satisfy certain relationships. In other words, the coefficients $U_j$ can not be chosen arbitrarily but have to satisfy certain constraints so that the function $u_h$ is in fact continuous. What these constraints have to look is relatively easy to understand conceptually, but the implementation in software is complicated and takes several thousand lines of code. On the other hand, in user code, it is only about half a dozen lines you have to add when dealing with hanging nodes.

    In the program below, we will show how we can get these constraints from deal.II, and how to use them in the solution of the linear system of equations. Before going over the details of the program below, you may want to take a look at the Constraints on degrees of freedom documentation module that explains how these constraints can be computed and what classes in deal.II work on them.

    How to deal with hanging nodes in practice

    The practice of hanging node constraints is rather simpler than the theory we have outlined above. In reality, you will really only have to add about half a dozen lines of additional code to a program like step-4 to make it work with adaptive meshes that have hanging nodes. The interesting part about this is that it is entirely independent of the equation you are solving: The algebraic nature of these constraints has nothing to do with the equation and only depends on the choice of finite element. As a consequence, the code to deal with these constraints is entirely contained in the deal.II library itself, and you do not need to worry about the details.

    @@ -206,7 +206,7 @@

    These four steps are really all that is necessary – it's that simple from a user perspective. The fact that, in the function calls mentioned above, you will run through several thousand lines of not-so-trivial code is entirely immaterial to this: In user code, there are really only four additional steps.

    How we obtain locally refined meshes

    The next question, now that we know how to deal with meshes that have these hanging nodes is how we obtain them.

    -

    A simple way has already been shown in step-1: If you know where it is necessary to refine the mesh, then you can create one by hand. But in reality, we don't know this: We don't know the solution of the PDE up front (because, if we did, we wouldn't have to use the finite element method), and consequently we do not know where it is necessary to add local mesh refinement to better resolve areas where the solution has strong variations. But the discussion above shows that maybe we can get away with using the discrete solution $u_h$ on one mesh to estimate the derivatives $\nabla^{p+1} u$, and then use this to determine which cells are too large and which already small enough. We can then generate a new mesh from the current one using local mesh refinement. If necessary, this step is then repeated until we are happy with our numerical solution – or, more commonly, until we run out of computational resources or patience.

    +

    A simple way has already been shown in step-1: If you know where it is necessary to refine the mesh, then you can create one by hand. But in reality, we don't know this: We don't know the solution of the PDE up front (because, if we did, we wouldn't have to use the finite element method), and consequently we do not know where it is necessary to add local mesh refinement to better resolve areas where the solution has strong variations. But the discussion above shows that maybe we can get away with using the discrete solution $u_h$ on one mesh to estimate the derivatives $\nabla^{p+1} u$, and then use this to determine which cells are too large and which already small enough. We can then generate a new mesh from the current one using local mesh refinement. If necessary, this step is then repeated until we are happy with our numerical solution – or, more commonly, until we run out of computational resources or patience.

    So that's exactly what we will do. The locally refined grids are produced using an error estimator which estimates the energy error for numerical solutions of the Laplace operator. Since it was developed by Kelly and co-workers, we often refer to it as the “Kelly refinement indicator” in the library, documentation, and mailing list. The class that implements it is called KellyErrorEstimator, and there is a great deal of information to be found in the documentation of that class that need not be repeated here. The summary, however, is that the class computes a vector with as many entries as there are active cells, and where each entry contains an estimate of the error on that cell. This estimate is then used to refine the cells of the mesh: those cells that have a large error will be marked for refinement, those that have a particularly small estimate will be marked for coarsening. We don't have to do this by hand: The functions in namespace GridRefinement will do all of this for us once we have obtained the vector of error estimates.

    It is worth noting that while the Kelly error estimator was developed for Laplace's equation, it has proven to be a suitable tool to generate locally refined meshes for a wide range of equations, not even restricted to elliptic only problems. Although it will create non-optimal meshes for other equations, it is often a good way to quickly produce meshes that are well adapted to the features of solutions, such as regions of great variation or discontinuities.

    Boundary conditions

    @@ -667,8 +667,8 @@

    As we can see, all preconditioners behave pretty much the same on this simple problem, with the number of iterations growing like ${\cal
-O}(N^{1/2})$ and because each iteration requires around ${\cal
-O}(N)$ operations the total CPU time grows like ${\cal
+O}(N^{1/2})$ and because each iteration requires around ${\cal
+O}(N)$ operations the total CPU time grows like ${\cal
 O}(N^{3/2})$ (for the few smallest meshes, the CPU time is so small that it doesn't record). Note that even though it is the simplest method, Jacobi is the fastest for this problem.

    The situation changes slightly when the finite element is not a bi-quadratic one (i.e., polynomial degree two) as selected in the constructor of this program, but a bi-linear one (polynomial degree one). If one makes this change, the results are as follows:

    @@ -676,7 +676,7 @@

    In other words, while the increase in iterations and CPU time is as before, Jacobi is now the method that requires the most iterations; it is still the fastest one, however, owing to the simplicity of the operations it has to perform. This is not to say that Jacobi is actually a good preconditioner – for problems of appreciable size, it is definitely not, and other methods will be substantially better – but really only that it is fast because its implementation is so simple that it can compensate for a larger number of iterations.

    -

    The message to take away from this is not that simplicity in preconditioners is always best. While this may be true for the current problem, it definitely is not once we move to more complicated problems (elasticity or Stokes, for examples step-8 or step-22). Secondly, all of these preconditioners still lead to an increase in the number of iterations as the number $N$ of degrees of freedom grows, for example ${\cal O}(N^\alpha)$; this, in turn, leads to a total growth in effort as ${\cal O}(N^{1+\alpha})$ since each iteration takes ${\cal O}(N)$ work. This behavior is undesirable: we would really like to solve linear systems with $N$ unknowns in a total of ${\cal O}(N)$ work; there is a class of preconditioners that can achieve this, namely geometric (step-16, step-37, step-39) or algebraic multigrid (step-31, step-40, and several others) preconditioners. They are, however, significantly more complex than the preconditioners outlined above, and so we will leave their use to these later tutorial programs. The point to make, however, is that "real" finite element programs do not use the preconditioners we mention above: These are simply shown for expository purposes.

    +

    The message to take away from this is not that simplicity in preconditioners is always best. While this may be true for the current problem, it definitely is not once we move to more complicated problems (elasticity or Stokes, for examples step-8 or step-22). Secondly, all of these preconditioners still lead to an increase in the number of iterations as the number $N$ of degrees of freedom grows, for example ${\cal O}(N^\alpha)$; this, in turn, leads to a total growth in effort as ${\cal O}(N^{1+\alpha})$ since each iteration takes ${\cal O}(N)$ work. This behavior is undesirable: we would really like to solve linear systems with $N$ unknowns in a total of ${\cal O}(N)$ work; there is a class of preconditioners that can achieve this, namely geometric (step-16, step-37, step-39) or algebraic multigrid (step-31, step-40, and several others) preconditioners. They are, however, significantly more complex than the preconditioners outlined above, and so we will leave their use to these later tutorial programs. The point to make, however, is that "real" finite element programs do not use the preconditioners we mention above: These are simply shown for expository purposes.

    Finally, the last message to take home is that when the data shown above was generated (in 2018), linear systems with 100,000 unknowns are easily solved on a desktop or laptop machine in about a second, making the solution of relatively simple 2d problems even to very high accuracy not that big a task as it used to be in the past. At the same time, the situation for 3d problems continues to be quite different: A uniform 2d mesh with 100,000 unknowns corresponds to a grid with about $300 \times 300$ nodes; the corresponding 3d mesh has $300 \times 300 \times 300$ nodes and 30 million unknowns. Because finite element matrices in 3d have many more nonzero entries than in 2d, solving these linear systems will not only take 300 times as much CPU time, but substantially longer. In other words, achieving the same resolution in 3d is quite a large problem, and solving it within a reasonable amount of time will require much more work to implement better linear solvers. As mentioned above, multigrid methods and matrix-free methods (see, for example, step-37), along with parallelization (step-40) will be necessary, but are then also able to comfortably solve such linear systems.

    A better mesh

    If you look at the meshes above, you will see even though the domain is the unit disk, and the jump in the coefficient lies along a circle, the cells that make up the mesh do not track this geometry well. The reason, already hinted at in step-1, is that in the absence of other information, the Triangulation class only sees a bunch of coarse grid cells but has, of course, no real idea what kind of geometry they might represent when looked at together. For this reason, we need to tell the Triangulation what to do when a cell is refined: where should the new vertices at the edge midpoints and the cell midpoint be located so that the child cells better represent the desired geometry than the parent cell.

    @@ -794,15 +794,15 @@ -\Delta u = f \]" src="form_6025.png"/>

    -

    on smoothly bounded, convex domains are known to be smooth themselves. The exact degree of smoothness, i.e., the function space in which the solution lives, depends on how smooth exactly the boundary of the domain is, and how smooth the right hand side is. Some regularity of the solution may be lost at the boundary, but one generally has that the solution is twice more differentiable in compact subsets of the domain than the right hand side. If, in particular, the right hand side satisfies $f\in C^\infty(\Omega)$, then $u \in C^\infty(\Omega_i)$ where $\Omega_i$ is any compact subset of $\Omega$ ( $\Omega$ is an open domain, so a compact subset needs to keep a positive distance from $\partial\Omega$).

    +

    on smoothly bounded, convex domains are known to be smooth themselves. The exact degree of smoothness, i.e., the function space in which the solution lives, depends on how smooth exactly the boundary of the domain is, and how smooth the right hand side is. Some regularity of the solution may be lost at the boundary, but one generally has that the solution is twice more differentiable in compact subsets of the domain than the right hand side. If, in particular, the right hand side satisfies $f\in C^\infty(\Omega)$, then $u \in C^\infty(\Omega_i)$ where $\Omega_i$ is any compact subset of $\Omega$ ( $\Omega$ is an open domain, so a compact subset needs to keep a positive distance from $\partial\Omega$).

    The situation we chose for the current example is different, however: we look at an equation with a non-constant coefficient $a(\mathbf x)$:

    \[
   -\nabla \cdot (a \nabla u) = f.
 \]

    -

    Here, if $a$ is not smooth, then the solution will not be smooth either, regardless of $f$. In particular, we expect that wherever $a$ is discontinuous along a line (or along a plane in 3d), the solution will have a kink. This is easy to see: if for example $f$ is continuous, then $f=-\nabla \cdot (a \nabla u)$ needs to be continuous. This means that $a \nabla u$ must be continuously differentiable (not have a kink). Consequently, if $a$ has a discontinuity, then $\nabla u$ must have an opposite discontinuity so that the two exactly cancel and their product yields a function without a discontinuity. But for $\nabla u$ to have a discontinuity, $u$ must have a kink. This is of course exactly what is happening in the current example, and easy to observe in the pictures of the solution.

    -

    In general, if the coefficient $a(\mathbf x)$ is discontinuous along a line in 2d, or a plane in 3d, then the solution may have a kink, but the gradient of the solution will not go to infinity. That means, that the solution is at least still in the Sobolev space $W^{1,\infty}$ (i.e., roughly speaking, in the space of functions whose derivatives are bounded). On the other hand, we know that in the most extreme cases – i.e., where the domain has reentrant corners, the right hand side only satisfies $f\in H^{-1}$, or the coefficient $a$ is only in $L^\infty$ – all we can expect is that $u\in H^1$ (i.e., the Sobolev space of functions whose derivative is square integrable), a much larger space than $W^{1,\infty}$. It is not very difficult to create cases where the solution is in a space $H^{1+s}$ where we can get $s$ to become as small as we want. Such cases are often used to test adaptive finite element methods because the mesh will have to resolve the singularity that causes the solution to not be in $W^{1,\infty}$ any more.

    -

    The typical example one uses for this is called the Kellogg problem (referring to [Kel74]), which in the commonly used form has a coefficient $a(\mathbf x)$ that has different values in the four quadrants of the plane (or eight different values in the octants of ${\mathbb R}^3$). The exact degree of regularity (the $s$ in the index of the Sobolev space above) depends on the values of $a(\mathbf x)$ coming together at the origin, and by choosing the jumps large enough, the regularity of the solution can be made as close as desired to $H^1$.

    +

    Here, if $a$ is not smooth, then the solution will not be smooth either, regardless of $f$. In particular, we expect that wherever $a$ is discontinuous along a line (or along a plane in 3d), the solution will have a kink. This is easy to see: if for example $f$ is continuous, then $f=-\nabla \cdot (a \nabla u)$ needs to be continuous. This means that $a \nabla u$ must be continuously differentiable (not have a kink). Consequently, if $a$ has a discontinuity, then $\nabla u$ must have an opposite discontinuity so that the two exactly cancel and their product yields a function without a discontinuity. But for $\nabla u$ to have a discontinuity, $u$ must have a kink. This is of course exactly what is happening in the current example, and easy to observe in the pictures of the solution.

    +

    In general, if the coefficient $a(\mathbf x)$ is discontinuous along a line in 2d, or a plane in 3d, then the solution may have a kink, but the gradient of the solution will not go to infinity. That means, that the solution is at least still in the Sobolev space $W^{1,\infty}$ (i.e., roughly speaking, in the space of functions whose derivatives are bounded). On the other hand, we know that in the most extreme cases – i.e., where the domain has reentrant corners, the right hand side only satisfies $f\in H^{-1}$, or the coefficient $a$ is only in $L^\infty$ – all we can expect is that $u\in H^1$ (i.e., the Sobolev space of functions whose derivative is square integrable), a much larger space than $W^{1,\infty}$. It is not very difficult to create cases where the solution is in a space $H^{1+s}$ where we can get $s$ to become as small as we want. Such cases are often used to test adaptive finite element methods because the mesh will have to resolve the singularity that causes the solution to not be in $W^{1,\infty}$ any more.

    +

    The typical example one uses for this is called the Kellogg problem (referring to [Kel74]), which in the commonly used form has a coefficient $a(\mathbf x)$ that has different values in the four quadrants of the plane (or eight different values in the octants of ${\mathbb R}^3$). The exact degree of regularity (the $s$ in the index of the Sobolev space above) depends on the values of $a(\mathbf x)$ coming together at the origin, and by choosing the jumps large enough, the regularity of the solution can be made as close as desired to $H^1$.

    To implement something like this, one could replace the coefficient function by the following (shown here only for the 2d case):

    template <int dim>
    double coefficient (const Point<dim> &p)
    {
    /usr/share/doc/packages/dealii/doxygen/deal.II/step_60.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_60.html 2024-01-30 03:04:54.348897212 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_60.html 2024-01-30 03:04:54.348897212 +0000 @@ -132,24 +132,24 @@
    Note
    If you use this program as a basis for your own work, please consider citing it in your list of references. The initial version of this work was contributed to the deal.II project by the authors listed in the following citation: 10.5281/zenodo.1243280

    Introduction

    Non-matching grid constraints through distributed Lagrange multipliers

    -

    In this tutorial we consider the case of two domains, $\Omega$ in $R^{\text{spacedim}}$ and $\Gamma$ in $R^{\text{dim}}$, where $\Gamma$ is embedded in $\Omega$ ( $\Gamma \subseteq \Omega$). We want to solve a partial differential equation on $\Omega$, enforcing some conditions on the solution of the problem on the embedded domain $\Gamma$.

    +

    In this tutorial we consider the case of two domains, $\Omega$ in $R^{\text{spacedim}}$ and $\Gamma$ in $R^{\text{dim}}$, where $\Gamma$ is embedded in $\Omega$ ( $\Gamma \subseteq \Omega$). We want to solve a partial differential equation on $\Omega$, enforcing some conditions on the solution of the problem on the embedded domain $\Gamma$.

    There are two interesting scenarios:

    -

    In both cases define the restriction operator $\gamma$ as the operator that, given a continuous function on $\Omega$, returns its (continuous) restriction on $\Gamma$, i.e.,

    +

    In both cases define the restriction operator $\gamma$ as the operator that, given a continuous function on $\Omega$, returns its (continuous) restriction on $\Gamma$, i.e.,

    \[
 \gamma : C^0(\Omega) \mapsto C^0(\Gamma), \quad \text{ s.t. } \gamma u = u|_{\Gamma} \in C^0(\Gamma),
 \quad \forall u \in C^0(\Omega).
 \]

    -

    It is well known that the operator $\gamma$ can be extended to a continuous operator on $H^1(\Omega)$, mapping functions in $H^1(\Omega)$ to functions in $H^1(\Gamma)$ when the intrinsic dimension of $\Gamma$ is the same of $\Omega$.

    -

    The same is true, with a less regular range space (namely $H^{1/2}(\Gamma)$), when the dimension of $\Gamma$ is one less with respect to $\Omega$, and $\Gamma$ does not have a boundary. In this second case, the operator $\gamma$ is also known as the trace operator, and it is well defined for Lipschitz co-dimension one curves and surfaces $\Gamma$ embedded in $\Omega$ (read this wikipedia article for further details on the trace operator).

    +

    It is well known that the operator $\gamma$ can be extended to a continuous operator on $H^1(\Omega)$, mapping functions in $H^1(\Omega)$ to functions in $H^1(\Gamma)$ when the intrinsic dimension of $\Gamma$ is the same of $\Omega$.

    +

    The same is true, with a less regular range space (namely $H^{1/2}(\Gamma)$), when the dimension of $\Gamma$ is one less with respect to $\Omega$, and $\Gamma$ does not have a boundary. In this second case, the operator $\gamma$ is also known as the trace operator, and it is well defined for Lipschitz co-dimension one curves and surfaces $\Gamma$ embedded in $\Omega$ (read this wikipedia article for further details on the trace operator).

    The co-dimension two case is a little more complicated, and in general it is not possible to construct a continuous trace operator, not even from $H^1(\Omega)$ to $L^2(\Gamma)$, when the dimension of $\Gamma$ is zero or one respectively in two and three dimensions.

    -

    In this tutorial program we're not interested in further details on $\gamma$: we take the extension $\gamma$ for granted, assuming that the dimension of the embedded domain (dim) is always smaller by one or equal with respect to the dimension of the embedding domain $\Omega$ (spacedim).

    -

    We are going to solve the following differential problem: given a sufficiently regular function $g$ on $\Gamma$, a forcing term $f \in L^2(\Omega)$ and a Dirichlet boundary condition $u_D$ on $\partial \Omega$, find the solution $u$ to

    +

    In this tutorial program we're not interested in further details on $\gamma$: we take the extension $\gamma$ for granted, assuming that the dimension of the embedded domain (dim) is always smaller by one or equal with respect to the dimension of the embedding domain $\Omega$ (spacedim).

    +

    We are going to solve the following differential problem: given a sufficiently regular function $g$ on $\Gamma$, a forcing term $f \in L^2(\Omega)$ and a Dirichlet boundary condition $u_D$ on $\partial \Omega$, find the solution $u$ to

    \begin{eqnarray*}
 - \Delta u + \gamma^T \lambda &=& f  \text{ in } \Omega\\
@@ -157,35 +157,35 @@
 u & = & u_D \text{ on } \partial\Omega.
 \end{eqnarray*}

    -

    This is a constrained problem, where we are looking for a function $u$ that solves the Poisson equation and that satisfies Dirichlet boundary conditions $u=u_D$ on $\partial \Omega$, subject to the constraint $\gamma u = g$ using a Lagrange multiplier.

    -

    When $f=0$ this problem has a physical interpretation: harmonic functions, i.e., functions that satisfy the Laplace equation, can be thought of as the displacements of a membrane whose boundary values are prescribed. The current situation then corresponds to finding the shape of a membrane for which not only the displacement at the boundary, but also on $\Gamma$ is prescribed. For example, if $\Gamma$ is a closed curve in 2d space, then that would model a soap film that is held in place by a wire loop along $\partial \Omega$ as well as a second loop along $\Gamma$. In cases where $\Gamma$ is a whole area, you can think of this as a membrane that is stretched over an obstacle where $\Gamma$ is the contact area. (If the contact area is not known we have a different problem – called the "obstacle problem" – which is modeled in step-41.)

    +

    This is a constrained problem, where we are looking for a function $u$ that solves the Poisson equation and that satisfies Dirichlet boundary conditions $u=u_D$ on $\partial \Omega$, subject to the constraint $\gamma u = g$ using a Lagrange multiplier.

    +

    When $f=0$ this problem has a physical interpretation: harmonic functions, i.e., functions that satisfy the Laplace equation, can be thought of as the displacements of a membrane whose boundary values are prescribed. The current situation then corresponds to finding the shape of a membrane for which not only the displacement at the boundary, but also on $\Gamma$ is prescribed. For example, if $\Gamma$ is a closed curve in 2d space, then that would model a soap film that is held in place by a wire loop along $\partial \Omega$ as well as a second loop along $\Gamma$. In cases where $\Gamma$ is a whole area, you can think of this as a membrane that is stretched over an obstacle where $\Gamma$ is the contact area. (If the contact area is not known we have a different problem – called the "obstacle problem" – which is modeled in step-41.)

    As a first example we study the zero Dirichlet boundary condition on $\partial\Omega$. The same equations apply if we apply zero Neumann boundary conditions on $\partial\Omega$ or a mix of the two.

    -

    The variational formulation can be derived by introducing two infinite dimensional spaces $V(\Omega)$ and $Q^*(\Gamma)$, respectively for the solution $u$ and for the Lagrange multiplier $\lambda$.

    +

    The variational formulation can be derived by introducing two infinite dimensional spaces $V(\Omega)$ and $Q^*(\Gamma)$, respectively for the solution $u$ and for the Lagrange multiplier $\lambda$.

    Multiplying the first equation by $v \in V(\Omega)$ and the second by $q \in
 Q(\Gamma)$, integrating by parts when possible, and exploiting the boundary conditions on $\partial\Omega$, we obtain the following variational problem:

    -

    Given a sufficiently regular function $g$ on $\Gamma$, find the solution $u$ to

    +

    Given a sufficiently regular function $g$ on $\Gamma$, find the solution $u$ to

    \begin{eqnarray*}
 (\nabla u, \nabla v)_{\Omega} + (\lambda, \gamma v)_{\Gamma} &=& (f,v)_{\Omega} \qquad \forall v \in V(\Omega) \\
 (\gamma u, q)_{\Gamma} &=& (g,q)_{\Gamma} \qquad \forall q \in Q(\Gamma),
 \end{eqnarray*}

    -

    where $(\cdot, \cdot)_{\Omega}$ and $(\cdot, \cdot)_{\Gamma}$ represent, respectively, $L^2$ scalar products in $\Omega$ and in $\Gamma$.

    -

    Inspection of the variational formulation tells us that the space $V(\Omega)$ can be taken to be $H^1_0(\Omega)$. The space $Q(\Gamma)$, in the co-dimension zero case, should be taken as $H^1(\Gamma)$, while in the co-dimension one case should be taken as $H^{1/2}(\Gamma)$.

    -

    The function $g$ should therefore be either in $H^1(\Gamma)$ (for the co-dimension zero case) or $H^{1/2}(\Gamma)$ (for the co-dimension one case). This leaves us with a Lagrange multiplier $\lambda$ in $Q^*(\Gamma)$, which is either $H^{-1}(\Gamma)$ or $H^{-1/2}(\Gamma)$.

    -

    There are two options for the discretization of the problem above. One could choose matching discretizations, where the Triangulation for $\Gamma$ is aligned with the Triangulation for $\Omega$, or one could choose to discretize the two domains in a completely independent way.

    -

    The first option is clearly more indicated for the simple problem we proposed above: it is sufficient to use a single Triangulation for $\Omega$ and then impose certain constraints depending $\Gamma$. An example of this approach is studied in step-40, where the solution has to stay above an obstacle and this is achieved imposing constraints on $\Omega$.

    +

    where $(\cdot, \cdot)_{\Omega}$ and $(\cdot, \cdot)_{\Gamma}$ represent, respectively, $L^2$ scalar products in $\Omega$ and in $\Gamma$.

    +

    Inspection of the variational formulation tells us that the space $V(\Omega)$ can be taken to be $H^1_0(\Omega)$. The space $Q(\Gamma)$, in the co-dimension zero case, should be taken as $H^1(\Gamma)$, while in the co-dimension one case should be taken as $H^{1/2}(\Gamma)$.

    +

    The function $g$ should therefore be either in $H^1(\Gamma)$ (for the co-dimension zero case) or $H^{1/2}(\Gamma)$ (for the co-dimension one case). This leaves us with a Lagrange multiplier $\lambda$ in $Q^*(\Gamma)$, which is either $H^{-1}(\Gamma)$ or $H^{-1/2}(\Gamma)$.

    +

    There are two options for the discretization of the problem above. One could choose matching discretizations, where the Triangulation for $\Gamma$ is aligned with the Triangulation for $\Omega$, or one could choose to discretize the two domains in a completely independent way.

    +

    The first option is clearly more indicated for the simple problem we proposed above: it is sufficient to use a single Triangulation for $\Omega$ and then impose certain constraints depending $\Gamma$. An example of this approach is studied in step-40, where the solution has to stay above an obstacle and this is achieved imposing constraints on $\Omega$.

    To solve more complex problems, for example one where the domain $\Gamma$ is time dependent, the second option could be a more viable solution. Handling non aligned meshes is complex by itself: to illustrate how is done we study a simple problem.

    The technique we describe here is presented in the literature using one of many names: the immersed finite element method, the fictitious boundary method, the distributed Lagrange multiplier method, and others. The main principle is that the discretization of the two grids and of the two finite element spaces are kept completely independent. This technique is particularly efficient for the simulation of fluid-structure interaction problems, where the configuration of the embedded structure is part of the problem itself, and one solves a (possibly non-linear) elastic problem to determine the (time dependent) configuration of $\Gamma$, and a (possibly non-linear) flow problem in $\Omega
 \setminus \Gamma$, plus coupling conditions on the interface between the fluid and the solid.

    In this tutorial program we keep things a little simpler, and we assume that the configuration of the embedded domain is given in one of two possible ways:

    -

    We define the embedded reference domain $\Gamma_0$ embedded_grid: on this triangulation we construct a finite dimensional space (embedded_configuration_dh) to describe either the deformation or the displacement through a FiniteElement system of FE_Q objects (embedded_configuration_fe). This finite dimensional space is used only to interpolate a user supplied function (embedded_configuration_function) representing either $\psi$ (if the parameter use_displacement is set to false) or $\delta\psi$ (if the parameter use_displacement is set to true).

    -

    The Lagrange multiplier $\lambda$ and the user supplied function $g$ are defined through another finite dimensional space embedded_dh, and through another FiniteElement embedded_fe, using the same reference domain. In order to take into account the deformation of the domain, either a MappingFEField or a MappingQEulerian object are initialized with the embedded_configuration vector.

    +

    We define the embedded reference domain $\Gamma_0$ embedded_grid: on this triangulation we construct a finite dimensional space (embedded_configuration_dh) to describe either the deformation or the displacement through a FiniteElement system of FE_Q objects (embedded_configuration_fe). This finite dimensional space is used only to interpolate a user supplied function (embedded_configuration_function) representing either $\psi$ (if the parameter use_displacement is set to false) or $\delta\psi$ (if the parameter use_displacement is set to true).

    +

    The Lagrange multiplier $\lambda$ and the user supplied function $g$ are defined through another finite dimensional space embedded_dh, and through another FiniteElement embedded_fe, using the same reference domain. In order to take into account the deformation of the domain, either a MappingFEField or a MappingQEulerian object are initialized with the embedded_configuration vector.

    In the embedding space, a standard finite dimensional space space_dh is constructed on the embedding grid space_grid, using the FiniteElement space_fe, following almost verbatim the approach taken in step-6.

    -

    We represent the discretizations of the spaces $V$ and $Q$ with

    +

    We represent the discretizations of the spaces $V$ and $Q$ with

    \[
 V_h(\Omega) = \text{span} \{v_i\}_{i=1}^n
 \] @@ -195,7 +195,7 @@ Q_h(\Gamma) = \text{span} \{q_i\}_{i=1}^m \]" src="form_6065.png"/>

    -

    respectively, where $n$ is the dimension of space_dh, and $m$ the dimension of embedded_dh.

    +

    respectively, where $n$ is the dimension of space_dh, and $m$ the dimension of embedded_dh.

    Once all the finite dimensional spaces are defined, the variational formulation of the problem above leaves us with the following finite dimensional system of equations:

    \[
@@ -223,25 +223,25 @@
 G_{\alpha} &\dealcoloneq& (g, q_\alpha)_\Gamma \qquad \alpha = 1,\dots, m.
 \end{eqnarray*}

    -

    While the matrix $K$ is the standard stiffness matrix for the Poisson problem on $\Omega$, and the vector $G$ is a standard right-hand-side vector for a finite element problem with forcing term $g$ on $\Gamma$, (see, for example, step-3), the matrix $C$ or its transpose $C^T$ are non-standard since they couple information on two non-matching grids.

    -

    In particular, the integral that appears in the computation of a single entry of $C$, is computed on $\Gamma$. As usual in finite elements we split this integral into contributions from all cells of the triangulation used to discretize $\Gamma$, we transform the integral on $K$ to an integral on the reference element $\hat K$, where $F_{K}$ is the mapping from $\hat K$ to $K$, and compute the integral on $\hat K$ using a quadrature formula:

    +

    While the matrix $K$ is the standard stiffness matrix for the Poisson problem on $\Omega$, and the vector $G$ is a standard right-hand-side vector for a finite element problem with forcing term $g$ on $\Gamma$, (see, for example, step-3), the matrix $C$ or its transpose $C^T$ are non-standard since they couple information on two non-matching grids.

    +

    In particular, the integral that appears in the computation of a single entry of $C$, is computed on $\Gamma$. As usual in finite elements we split this integral into contributions from all cells of the triangulation used to discretize $\Gamma$, we transform the integral on $K$ to an integral on the reference element $\hat K$, where $F_{K}$ is the mapping from $\hat K$ to $K$, and compute the integral on $\hat K$ using a quadrature formula:

    -\[
+<picture><source srcset=\[
 C_{\alpha j} \dealcoloneq (v_j, q_\alpha)_\Gamma  = \sum_{K\in \Gamma} \int_{\hat K}
 \hat q_\alpha(\hat x) (v_j \circ F_{K}) (\hat x) J_K (\hat x) \mathrm{d} \hat x =
 \sum_{K\in \Gamma} \sum_{i=1}^{n_q}  \big(\hat q_\alpha(\hat x_i)  (v_j \circ F_{K}) (\hat x_i) J_K (\hat x_i) w_i \big)
-\] +\]" src="form_6070.png"/>

    -

    Computing this sum is non-trivial because we have to evaluate $(v_j \circ F_{K})
-(\hat x_i)$. In general, if $\Gamma$ and $\Omega$ are not aligned, the point $F_{K}(\hat x_i)$ is completely arbitrary with respect to $\Omega$, and unless we figure out a way to interpolate all basis functions of $V_h(\Omega)$ on an arbitrary point on $\Omega$, we cannot compute the integral needed for an entry of the matrix $C$.

    -

    To evaluate $(v_j \circ F_{K}) (\hat x_i)$ the following steps needs to be taken (as shown in the picture below):

    +

    Computing this sum is non-trivial because we have to evaluate $(v_j \circ F_{K})
+(\hat x_i)$. In general, if $\Gamma$ and $\Omega$ are not aligned, the point $F_{K}(\hat x_i)$ is completely arbitrary with respect to $\Omega$, and unless we figure out a way to interpolate all basis functions of $V_h(\Omega)$ on an arbitrary point on $\Omega$, we cannot compute the integral needed for an entry of the matrix $C$.

    +

    To evaluate $(v_j \circ F_{K}) (\hat x_i)$ the following steps needs to be taken (as shown in the picture below):

    @@ -254,13 +254,13 @@

    This is what the deal.II function VectorTools::point_value() does when evaluating a finite element field (not just a single shape function) at an arbitrary point; but this would be inefficient in this case.

    A better solution is to use a convenient wrapper to perform the first three steps on a collection of points: GridTools::compute_point_locations(). If one is actually interested in computing the full coupling matrix, then it is possible to call the method NonMatching::create_coupling_mass_matrix(), that performs the above steps in an efficient way, reusing all possible data structures, and gathering expensive steps together. This is the function we'll be using later in this tutorial.

    -

    We solve the final saddle point problem by an iterative solver, applied to the Schur complement $S$ (whose construction is described, for example, in step-20), and we construct $S$ using LinearOperator classes.

    +

    We solve the final saddle point problem by an iterative solver, applied to the Schur complement $S$ (whose construction is described, for example, in step-20), and we construct $S$ using LinearOperator classes.

    The testcase

    The problem we solve here is identical to step-4, with the difference that we impose some constraints on an embedded domain $\Gamma$. The tutorial is written in a dimension independent way, and in the results section we show how to vary both dim and spacedim.

    The tutorial is compiled for dim equal to one and spacedim equal to two. If you want to run the program in embedding dimension spacedim equal to three, you will most likely want to change the reference domain for $\Gamma$ to be, for example, something you read from file, or a closed sphere that you later deform to something more interesting.

    In the default scenario, $\Gamma$ has co-dimension one, and this tutorial program implements the Fictitious Boundary Method. As it turns out, the same techniques are used in the Variational Immersed Finite Element Method, and the coupling operator $C$ defined above is the same in almost all of these non-matching methods.

    -

    The embedded domain is assumed to be included in $\Omega$, which we take as the unit square $[0,1]^2$. The definition of the fictitious domain $\Gamma$ can be modified through the parameter file, and can be given as a mapping from the reference interval $[0,1]$ to a curve in $\Omega$.

    -

    If the curve is closed, then the results will be similar to running the same problem on a grid whose boundary is $\Gamma$. The program will happily run also with a non-closed $\Gamma$, although in those cases the mathematical formulation of the problem is more difficult, since $\Gamma$ will have a boundary by itself that has co-dimension two with respect to the domain $\Omega$.

    +

    The embedded domain is assumed to be included in $\Omega$, which we take as the unit square $[0,1]^2$. The definition of the fictitious domain $\Gamma$ can be modified through the parameter file, and can be given as a mapping from the reference interval $[0,1]$ to a curve in $\Omega$.

    +

    If the curve is closed, then the results will be similar to running the same problem on a grid whose boundary is $\Gamma$. The program will happily run also with a non-closed $\Gamma$, although in those cases the mathematical formulation of the problem is more difficult, since $\Gamma$ will have a boundary by itself that has co-dimension two with respect to the domain $\Omega$.

    References

    DistributedLagrangeProblem

    -

    In the DistributedLagrangeProblem, we need two parameters describing the dimensions of the domain $\Gamma$ (dim) and of the domain $\Omega$ (spacedim).

    -

    These will be used to initialize a Triangulation<dim,spacedim> (for $\Gamma$) and a Triangulation<spacedim,spacedim> (for $\Omega$).

    +

    In the DistributedLagrangeProblem, we need two parameters describing the dimensions of the domain $\Gamma$ (dim) and of the domain $\Omega$ (spacedim).

    +

    These will be used to initialize a Triangulation<dim,spacedim> (for $\Gamma$) and a Triangulation<spacedim,spacedim> (for $\Omega$).

    A novelty with respect to other tutorial programs is the heavy use of std::unique_ptr. These behave like classical pointers, with the advantage of doing automatic house-keeping: the contained object is automatically destroyed as soon as the unique_ptr goes out of scope, even if it is inside a container or there's an exception. Moreover it does not allow for duplicate pointers, which prevents ownership problems. We do this, because we want to be able to i) construct the problem, ii) read the parameters, and iii) initialize all objects according to what is specified in a parameter file.

    We construct the parameters of our problem in the internal class Parameters, derived from ParameterAcceptor. The DistributedLagrangeProblem class takes a const reference to a Parameters object, so that it is not possible to modify the parameters from within the DistributedLagrangeProblem class itself.

    We could have initialized the parameters first, and then pass the parameters to the DistributedLagrangeProblem assuming all entries are set to the desired values, but this has two disadvantages:

    @@ -357,22 +357,22 @@
     
    ParameterAcceptor
    Definition parameter_acceptor.h:361

    The parameters now described can all be set externally using a parameter file: if no parameter file is present when running the executable, the program will create a "parameters.prm" file with the default values defined here, and then abort to give the user a chance to modify the parameters.prm file.

    -

    Initial refinement for the embedding grid, corresponding to the domain $\Omega$.

    +

    Initial refinement for the embedding grid, corresponding to the domain $\Omega$.

      unsigned int initial_refinement = 4;
     
    -

    The interaction between the embedded grid $\Omega$ and the embedding grid $\Gamma$ is handled through the computation of $C$, which involves all cells of $\Omega$ overlapping with parts of $\Gamma$: a higher refinement of such cells might improve quality of our computations. For this reason we define delta_refinement: if it is greater than zero, then we mark each cell of the space grid that contains a vertex of the embedded grid and its neighbors, execute the refinement, and repeat this process delta_refinement times.

    +

    The interaction between the embedded grid $\Omega$ and the embedding grid $\Gamma$ is handled through the computation of $C$, which involves all cells of $\Omega$ overlapping with parts of $\Gamma$: a higher refinement of such cells might improve quality of our computations. For this reason we define delta_refinement: if it is greater than zero, then we mark each cell of the space grid that contains a vertex of the embedded grid and its neighbors, execute the refinement, and repeat this process delta_refinement times.

      unsigned int delta_refinement = 3;
     

    Starting refinement of the embedded grid, corresponding to the domain $\Gamma$.

      unsigned int initial_embedded_refinement = 8;
     
    -

    The list of boundary ids where we impose (possibly inhomogeneous) Dirichlet boundary conditions. On the remaining boundary ids (if any), we impose homogeneous Neumann boundary conditions. As a default problem we have zero Dirichlet boundary conditions on $\partial \Omega$

    +

    The list of boundary ids where we impose (possibly inhomogeneous) Dirichlet boundary conditions. On the remaining boundary ids (if any), we impose homogeneous Neumann boundary conditions. As a default problem we have zero Dirichlet boundary conditions on $\partial \Omega$

      std::list<types::boundary_id> dirichlet_ids{0, 1, 2, 3};
     
    -

    FiniteElement degree of the embedding space: $V_h(\Omega)$

    +

    FiniteElement degree of the embedding space: $V_h(\Omega)$

      unsigned int embedding_space_finite_element_degree = 1;
     
    -

    FiniteElement degree of the embedded space: $Q_h(\Gamma)$

    +

    FiniteElement degree of the embedded space: $Q_h(\Gamma)$

      unsigned int embedded_space_finite_element_degree = 1;
     

    FiniteElement degree of the space used to describe the deformation of the embedded domain

    @@ -445,7 +445,7 @@
      std::unique_ptr<Mapping<dim, spacedim>> embedded_mapping;
     
    ParameterAcceptorProxy
    Definition parameter_acceptor.h:699
    -

    We do the same thing to specify the value of the forcing term $f$. In this case the Function is a scalar one.

    +

    We do the same thing to specify the value of the forcing term $f$. In this case the Function is a scalar one.

      ParameterAcceptorProxy<Functions::ParsedFunction<spacedim>>
      embedding_rhs_function;
     
    @@ -453,8 +453,8 @@
    /usr/share/doc/packages/dealii/doxygen/deal.II/step_61.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_61.html 2024-01-30 03:04:54.424897845 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_61.html 2024-01-30 03:04:54.424897845 +0000 @@ -154,145 +154,145 @@

    Weak Galerkin Finite Element Methods (WGFEMs) use discrete weak functions to approximate scalar unknowns, and discrete weak gradients to approximate classical gradients. The method was originally introduced by Junping Wang and Xiu Ye in the paper A weak Galerkin finite element method for second order elliptic problems, J. Comput. Appl. Math., 103-115, 2013. Compared to the continuous Galerkin method, the weak Galerkin method satisfies important physical properties, namely local mass conservation and bulk normal flux continuity. It results in a SPD linear system, and optimal convergence rates can be obtained with mesh refinement.

    The equation to solve

    This program solves the Poisson equation using the weak Galerkin finite element method:

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   \nabla \cdot \left( -\mathbf{K} \nabla p \right)
     &= f,
     \qquad \mathbf{x} \in \Omega, \\
   p &=  p_D,\qquad \mathbf{x} \in \Gamma^D, \\
   \mathbf{u} \cdot \mathbf{n} &= u_N,
   \qquad \mathbf{x} \in \Gamma^N,
-\end{align*} +\end{align*}" src="form_6088.png"/>

    -

    where $\Omega \subset \mathbb{R}^n (n=2,3)$ is a bounded domain. In the context of the flow of a fluid through a porous medium, $p$ is the pressure, $\mathbf{K}$ is a permeability tensor, $f$ is the source term, and $p_D, u_N$ represent Dirichlet and Neumann boundary conditions. We can introduce a flux, $\mathbf{u} = -\mathbf{K} \nabla p$, that corresponds to the Darcy velocity (in the way we did in step-20) and this variable will be important in the considerations below.

    -

    In this program, we will consider a test case where the exact pressure is $p = \sin \left( \pi x\right)\sin\left(\pi y \right)$ on the unit square domain, with homogeneous Dirichelet boundary conditions and $\mathbf{K}$ the identity matrix. Then we will calculate $L_2$ errors of pressure, velocity, and flux.

    +

    where $\Omega \subset \mathbb{R}^n (n=2,3)$ is a bounded domain. In the context of the flow of a fluid through a porous medium, $p$ is the pressure, $\mathbf{K}$ is a permeability tensor, $f$ is the source term, and $p_D, u_N$ represent Dirichlet and Neumann boundary conditions. We can introduce a flux, $\mathbf{u} = -\mathbf{K} \nabla p$, that corresponds to the Darcy velocity (in the way we did in step-20) and this variable will be important in the considerations below.

    +

    In this program, we will consider a test case where the exact pressure is $p = \sin \left( \pi x\right)\sin\left(\pi y \right)$ on the unit square domain, with homogeneous Dirichelet boundary conditions and $\mathbf{K}$ the identity matrix. Then we will calculate $L_2$ errors of pressure, velocity, and flux.

    Weak Galerkin scheme

    -

    The Poisson equation above has a solution $p$ that needs to satisfy the weak formulation of the problem,

    -\begin{equation*}
+<p>The Poisson equation above has a solution <picture><source srcset=$p$ that needs to satisfy the weak formulation of the problem,

    +\begin{equation*}
 \mathcal{A}\left(p,q \right) = \mathcal{F} \left(q \right),
-\end{equation*} +\end{equation*}" src="form_6093.png"/>

    -

    for all test functions $q$, where

    -\begin{equation*}
+<p> for all test functions <picture><source srcset=$q$, where

    +\begin{equation*}
 \mathcal{A}\left(p,q\right)
   \dealcoloneq \int_\Omega \left(\mathbf{K} \nabla p\right) \cdot \nabla q \;\mathrm{d}x,
-\end{equation*} +\end{equation*}" src="form_6094.png"/>

    and

    -\begin{equation*}
+<picture><source srcset=\begin{equation*}
 \mathcal{F}\left(q\right)
   \dealcoloneq \int_\Omega f \, q \;\mathrm{d}x
   - \int_{\Gamma^N} u_N q \; \mathrm{d}x.
-\end{equation*} +\end{equation*}" src="form_6095.png"/>

    -

    Here, we have integrated by parts in the bilinear form, and we are evaluating the gradient of $p,p$ in the interior and the values of $q$ on the boundary of the domain. All of this is well defined because we assume that the solution is in $H^1$ for which taking the gradient and evaluating boundary values are valid operations.

    -

    The idea of the weak Galerkin method is now to approximate the exact $p$ solution with a discontinuous function $p_h$. This function may only be discontinuous along interfaces between cells, and because we will want to evaluate this function also along interfaces, we have to prescribe not only what values it is supposed to have in the cell interiors but also its values along interfaces. We do this by saying that $p_h$ is actually a tuple, $p_h=(p^\circ,p^\partial)$, though it's really just a single function that is either equal to $p^\circ(x)$ or $p^\partial(x)$, depending on whether it is evaluated at a point $x$ that lies in the cell interior or on cell interfaces.

    -

    We would then like to simply stick this approximation into the bilinear form above. This works for the case where we have to evaluate the test function $q_h$ on the boundary (where we would simply take its interface part $q_h^\partial$) but we have to be careful with the gradient because that is only defined in cell interiors. Consequently, the weak Galerkin scheme for the Poisson equation is defined by

    -\begin{equation*}
+<p> Here, we have integrated by parts in the bilinear form, and we are evaluating the gradient of <picture><source srcset=$p,p$ in the interior and the values of $q$ on the boundary of the domain. All of this is well defined because we assume that the solution is in $H^1$ for which taking the gradient and evaluating boundary values are valid operations.

    +

    The idea of the weak Galerkin method is now to approximate the exact $p$ solution with a discontinuous function $p_h$. This function may only be discontinuous along interfaces between cells, and because we will want to evaluate this function also along interfaces, we have to prescribe not only what values it is supposed to have in the cell interiors but also its values along interfaces. We do this by saying that $p_h$ is actually a tuple, $p_h=(p^\circ,p^\partial)$, though it's really just a single function that is either equal to $p^\circ(x)$ or $p^\partial(x)$, depending on whether it is evaluated at a point $x$ that lies in the cell interior or on cell interfaces.

    +

    We would then like to simply stick this approximation into the bilinear form above. This works for the case where we have to evaluate the test function $q_h$ on the boundary (where we would simply take its interface part $q_h^\partial$) but we have to be careful with the gradient because that is only defined in cell interiors. Consequently, the weak Galerkin scheme for the Poisson equation is defined by

    +\begin{equation*}
 \mathcal{A}_h\left(p_h,q \right) = \mathcal{F} \left(q_h \right),
-\end{equation*} +\end{equation*}" src="form_6102.png"/>

    for all discrete test functions $q_h$, where

    -\begin{equation*}
+<picture><source srcset=\begin{equation*}
 \mathcal{A}_h\left(p_h,q_h\right)
   \dealcoloneq \sum_{K \in \mathbb{T}}
     \int_K \mathbf{K} \nabla_{w,d} p_h \cdot \nabla_{w,d} q_h \;\mathrm{d}x,
-\end{equation*} +\end{equation*}" src="form_6103.png"/>

    and

    -\begin{equation*}
+<picture><source srcset=\begin{equation*}
 \mathcal{F}\left(q_h\right)
   \dealcoloneq \sum_{K \in \mathbb{T}} \int_K f \, q_h^\circ \;\mathrm{d}x
   - \sum_{\gamma \in \Gamma_h^N} \int_\gamma u_N q_h^\partial \;\mathrm{d}x,
-\end{equation*} +\end{equation*}" src="form_6104.png"/>

    -

    The key point is that here, we have replaced the gradient $\nabla p_h$ by the discrete weak gradient operator $\nabla_{w,d} p_h$ that makes sense for our peculiarly defined approximation $p_h$.

    -

    The question is then how that operator works. For this, let us first say how we think of the discrete approximation $p_h$ of the pressure. As mentioned above, the "function" $p_h$ actually consists of two parts: the values $p_h^\circ$ in the interior of cells, and $p_h^\partial$ on the interfaces. We have to define discrete (finite-dimensional) function spaces for both of these; in this program, we will use FE_DGQ for $p_h^\circ$ as the space in the interior of cells (defined on each cell, but in general discontinuous along interfaces), and FE_FaceQ for $p_h^\partial$ as the space on the interfaces.

    -

    Then let us consider just a single cell (because the integrals above are all defined cell-wise, and because the weak discrete gradient is defined cell-by-cell). The restriction of $p_h$ to cell $K$, $p_h|_K$ then consists of the pair $(p_h^\circ|_K,p_h^\partial|_{\partial K})$. In essence, we can think of $\nabla_{w,d} p_h$ of some function defined on $K$ that approximates the gradient; in particular, if $p_h|_K$ was the restriction of a differentiable function (to the interior and boundary of $K$ – which would make it continuous between the interior and boundary), then $\nabla_{w,d} p_h$ would simply be the exact gradient $\nabla p_h$. But, since $p_h|_K$ is not continuous between interior and boundary of $K$, we need a more general definition; furthermore, we can not deal with arbitrary functions, and so require that $\nabla_{w,d} p_h$ is also in a finite element space (which, since the gradient is a vector, has to be vector-valued, and because the weak gradient is defined on each cell separately, will also be discontinuous between cells).

    -

    The way this is done is to define this weak gradient operator $\nabla_{w,d}|_K :
-DGQ_k(K) \times DGQ_r(\partial K) \rightarrow RT_s(K)$ (where $RT_s(K)$ is the vector-valued Raviart-Thomas space of order $s$ on cell $K$) in the following way:

    -\begin{equation*}
+<p> The key point is that here, we have replaced the gradient <picture><source srcset=$\nabla p_h$ by the discrete weak gradient operator $\nabla_{w,d} p_h$ that makes sense for our peculiarly defined approximation $p_h$.

    +

    The question is then how that operator works. For this, let us first say how we think of the discrete approximation $p_h$ of the pressure. As mentioned above, the "function" $p_h$ actually consists of two parts: the values $p_h^\circ$ in the interior of cells, and $p_h^\partial$ on the interfaces. We have to define discrete (finite-dimensional) function spaces for both of these; in this program, we will use FE_DGQ for $p_h^\circ$ as the space in the interior of cells (defined on each cell, but in general discontinuous along interfaces), and FE_FaceQ for $p_h^\partial$ as the space on the interfaces.

    +

    Then let us consider just a single cell (because the integrals above are all defined cell-wise, and because the weak discrete gradient is defined cell-by-cell). The restriction of $p_h$ to cell $K$, $p_h|_K$ then consists of the pair $(p_h^\circ|_K,p_h^\partial|_{\partial K})$. In essence, we can think of $\nabla_{w,d} p_h$ of some function defined on $K$ that approximates the gradient; in particular, if $p_h|_K$ was the restriction of a differentiable function (to the interior and boundary of $K$ – which would make it continuous between the interior and boundary), then $\nabla_{w,d} p_h$ would simply be the exact gradient $\nabla p_h$. But, since $p_h|_K$ is not continuous between interior and boundary of $K$, we need a more general definition; furthermore, we can not deal with arbitrary functions, and so require that $\nabla_{w,d} p_h$ is also in a finite element space (which, since the gradient is a vector, has to be vector-valued, and because the weak gradient is defined on each cell separately, will also be discontinuous between cells).

    +

    The way this is done is to define this weak gradient operator $\nabla_{w,d}|_K :
+DGQ_k(K) \times DGQ_r(\partial K) \rightarrow RT_s(K)$ (where $RT_s(K)$ is the vector-valued Raviart-Thomas space of order $s$ on cell $K$) in the following way:

    +\begin{equation*}
   \int_K \mathbf v_h \cdot (\nabla_{w,d} p_h)
   =
   -\int_K (\nabla \cdot \mathbf v_h) p_h^\circ
   +\int_{\partial K} (\mathbf v_h \cdot \mathbf n) p_h^\partial,
-\end{equation*} +\end{equation*}" src="form_6113.png"/>

    -

    for all test functions $\mathbf v_h \in RT_s(K)$. This is, in essence, simply an application of the integration-by-parts formula. In other words, for a given $p_h=(p^\circ_h,p^\partial_h)$, we need to think of $\nabla_{w,d} p_h|_K$ as that Raviart-Thomas function of degree $s$ for which the left hand side and right hand side are equal for all test functions.

    -

    A key point to make is then the following: While the usual gradient $\nabla$ is a local operator that computes derivatives based simply on the value of a function at a point and its (infinitesimal) neighborhood, the weak discrete gradient $\nabla_{w,d}$ does not have this property: It depends on the values of the function it is applied to on the entire cell, including the cell's boundary. Both are, however, linear operators as is clear from the definition of $\nabla_{w,d}$ above, and that will allow us to represent $\nabla_{w,d}$ via a matrix in the discussion below.

    -
    Note
    It may be worth pointing out that while the weak discrete gradient is an element of the Raviart-Thomas space $RT_s(K)$ on each cell $K$, it is discontinuous between cells. On the other hand, the Raviart-Thomas space $RT_s=RT_s({\mathbb T})$ defined on the entire mesh and implemented by the FE_RaviartThomas class represents functions that have continuous normal components at interfaces between cells. This means that globally, $\nabla_{w,d} p_h$ is not in $RT_s$, even though it is on every cell $K$ in $RT_s(K)$. Rather, it is in a "broken" Raviart-Thomas space that below we will represent by the symbol $DGRT_s$. (The term "broken" here refers to the process of "breaking something apart", and not to the synonym to the expression "not functional".) One might therefore (rightfully) argue that the notation used in the weak Galerkin literature is a bit misleading, but as so often it all depends on the context in which a certain notation is used – in the current context, references to the Raviart-Thomas space or element are always understood to be to the "broken" spaces.
    +

    for all test functions $\mathbf v_h \in RT_s(K)$. This is, in essence, simply an application of the integration-by-parts formula. In other words, for a given $p_h=(p^\circ_h,p^\partial_h)$, we need to think of $\nabla_{w,d} p_h|_K$ as that Raviart-Thomas function of degree $s$ for which the left hand side and right hand side are equal for all test functions.

    +

    A key point to make is then the following: While the usual gradient $\nabla$ is a local operator that computes derivatives based simply on the value of a function at a point and its (infinitesimal) neighborhood, the weak discrete gradient $\nabla_{w,d}$ does not have this property: It depends on the values of the function it is applied to on the entire cell, including the cell's boundary. Both are, however, linear operators as is clear from the definition of $\nabla_{w,d}$ above, and that will allow us to represent $\nabla_{w,d}$ via a matrix in the discussion below.

    +
    Note
    It may be worth pointing out that while the weak discrete gradient is an element of the Raviart-Thomas space $RT_s(K)$ on each cell $K$, it is discontinuous between cells. On the other hand, the Raviart-Thomas space $RT_s=RT_s({\mathbb T})$ defined on the entire mesh and implemented by the FE_RaviartThomas class represents functions that have continuous normal components at interfaces between cells. This means that globally, $\nabla_{w,d} p_h$ is not in $RT_s$, even though it is on every cell $K$ in $RT_s(K)$. Rather, it is in a "broken" Raviart-Thomas space that below we will represent by the symbol $DGRT_s$. (The term "broken" here refers to the process of "breaking something apart", and not to the synonym to the expression "not functional".) One might therefore (rightfully) argue that the notation used in the weak Galerkin literature is a bit misleading, but as so often it all depends on the context in which a certain notation is used – in the current context, references to the Raviart-Thomas space or element are always understood to be to the "broken" spaces.
    deal.II happens to have an implementation of this broken Raviart-Thomas space: The FE_DGRaviartThomas class. As a consequence, in this tutorial we will simply always use the FE_DGRaviartThomas class, even though in all of those places where we have to compute cell-local matrices and vectors, it makes no difference.

    Representing the weak gradient

    -

    Since $p_h$ is an element of a finite element space, we can expand it in a basis as we always do, i.e., we can write

    -\begin{equation*}
+<p>Since <picture><source srcset=$p_h$ is an element of a finite element space, we can expand it in a basis as we always do, i.e., we can write

    +\begin{equation*}
   p_h(\mathbf x) = \sum_j P_j \varphi_j(\mathbf x).
-\end{equation*} +\end{equation*}" src="form_6121.png"/>

    -

    Here, since $p_h$ has two components (the interior and the interface components), the same must hold true for the basis functions $\varphi_j(\mathbf x)$, which we can write as $\varphi_j = (\varphi_j^\circ,\varphi_j^\partial)$. If you've followed the descriptions in step-8, step-20, and the documentation module on vector-valued problems, it will be no surprise that for some values of $j$, $\varphi_j^\circ$ will be zero, whereas for other values of $j$, $\varphi_j^\partial$ will be zero – i.e., shape functions will be of either one or the other kind. That is not important, here, however. What is important is that we need to wonder how we can represent $\nabla_{w,d} \varphi_j$ because that is clearly what will appear in the problem when we want to implement the bilinear form

    -\begin{equation*}
+<p> Here, since <picture><source srcset=$p_h$ has two components (the interior and the interface components), the same must hold true for the basis functions $\varphi_j(\mathbf x)$, which we can write as $\varphi_j = (\varphi_j^\circ,\varphi_j^\partial)$. If you've followed the descriptions in step-8, step-20, and the documentation module on vector-valued problems, it will be no surprise that for some values of $j$, $\varphi_j^\circ$ will be zero, whereas for other values of $j$, $\varphi_j^\partial$ will be zero – i.e., shape functions will be of either one or the other kind. That is not important, here, however. What is important is that we need to wonder how we can represent $\nabla_{w,d} \varphi_j$ because that is clearly what will appear in the problem when we want to implement the bilinear form

    +\begin{equation*}
 \mathcal{A}_h\left(p_h,q_h\right)
   = \sum_{K \in \mathbb{T}}
     \int_K \mathbf{K} \nabla_{w,d} p_h \cdot \nabla_{w,d} q_h \;\mathrm{d}x,
-\end{equation*} +\end{equation*}" src="form_6126.png"/>

    -

    The key point is that $\nabla_{w,d} \varphi_j$ is known to be a member of the "broken" Raviart-Thomas space $DGRT_s$. What this means is that we can represent (on each cell $K$ separately)

    -\begin{equation*}
+<p>The key point is that <picture><source srcset=$\nabla_{w,d} \varphi_j$ is known to be a member of the "broken" Raviart-Thomas space $DGRT_s$. What this means is that we can represent (on each cell $K$ separately)

    +\begin{equation*}
 \nabla_{w,d} \varphi_j|_K
   = \sum_k C_{jk}^K \mathbf v_k|_K
-\end{equation*} +\end{equation*}" src="form_6127.png"/>

    -

    where the functions $\mathbf v_k \in DGRT_s$, and where $C^K$ is a matrix of dimension

    -\begin{align*}
+<p> where the functions <picture><source srcset=$\mathbf v_k \in DGRT_s$, and where $C^K$ is a matrix of dimension

    +\begin{align*}
  \text{dim}\left(DGQ_k(K) \times DGQ_r(K)\right) &\times \text{dim}\left(RT_s(K)\right)
   \\
  &=
  \left(\text{dim}(DGQ_k(K)) + \text{dim}(DGQ_r(K))\right) \times \text{dim}\left(RT_s(K)\right).
-\end{align*} +\end{align*}" src="form_6130.png"/>

    (That the weak discrete gradient can be represented as a matrix should not come as a surprise: It is a linear operator from one finite dimensional space to another finite dimensional space. If one chooses bases for both of these spaces, then every linear operator can of course be written as a matrix mapping the vector of expansion coefficients with regards to the basis of the domain space of the operator, to the vector of expansion coefficients with regards to the basis in the image space.)

    Using this expansion, we can easily use the definition of the weak discrete gradient above to define what the matrix is going to be:

    -\begin{equation*}
+<picture><source srcset=\begin{equation*}
   \int_K \mathbf v_i \cdot \left(\sum_k C_{jk}^K \mathbf v_k\right)
   =
   -\int_K (\nabla \cdot \mathbf v_i) \varphi_j^\circ
   +\int_{\partial K} (\mathbf v_i \cdot \mathbf n) \varphi_j^\partial,
-\end{equation*} +\end{equation*}" src="form_6131.png"/>

    -

    for all test functions $\mathbf v_i \in DGRT_s$.

    +

    for all test functions $\mathbf v_i \in DGRT_s$.

    This clearly leads to a linear system of the form

    -\begin{equation*}
+<picture><source srcset=\begin{equation*}
   \sum_k M_{ik}^K C_{jk}^K
   =
   G_{ij}^K
-\end{equation*} +\end{equation*}" src="form_6133.png"/>

    with

    -\begin{equation*}
+<picture><source srcset=\begin{equation*}
   M_{ik}^K = \int_K \mathbf v_i \cdot \mathbf v_k,
   \qquad\qquad
   G_{ij}^K = -\int_K (\nabla \cdot \mathbf v_i) \varphi_j^\circ
              +\int_{\partial K} (\mathbf v_i \cdot \mathbf n) \varphi_j^\partial,
-\end{equation*} +\end{equation*}" src="form_6134.png"/>

    and consequently

    -\begin{equation*}
+<picture><source srcset=\begin{equation*}
   \left(C^K\right)^T = \left(M^K\right)^{-1} G^K.
-\end{equation*} +\end{equation*}" src="form_6135.png"/>

    -

    (In this last step, we have assumed that the indices $i,j,k$ only range over those degrees of freedom active on cell $K$, thereby ensuring that the mass matrix on the space $RT_s(K)$ is invertible.) Equivalently, using the symmetry of the matrix $M$, we have that

    -\begin{equation*}
+<p> (In this last step, we have assumed that the indices <picture><source srcset=$i,j,k$ only range over those degrees of freedom active on cell $K$, thereby ensuring that the mass matrix on the space $RT_s(K)$ is invertible.) Equivalently, using the symmetry of the matrix $M$, we have that

    +\begin{equation*}
   C^K = \left(G^K\right)^{T} \left(M^K\right)^{-1}.
-\end{equation*} +\end{equation*}" src="form_6136.png"/>

    -

    Also worth pointing out is that the matrices $C^K$ and $G^K$ are of course not square but rectangular.

    +

    Also worth pointing out is that the matrices $C^K$ and $G^K$ are of course not square but rectangular.

    Assembling the linear system

    -

    Having explained how the weak discrete gradient is defined, we can now come back to the question of how the linear system for the equation in question should be assembled. Specifically, using the definition of the bilinear form ${\cal A}_h$ shown above, we then need to compute the elements of the local contribution to the global matrix,

    -\begin{equation*}
+<p>Having explained how the weak discrete gradient is defined, we can now come back to the question of how the linear system for the equation in question should be assembled. Specifically, using the definition of the bilinear form <picture><source srcset=${\cal A}_h$ shown above, we then need to compute the elements of the local contribution to the global matrix,

    +\begin{equation*}
   A^K_{ij} = \int_K \left({\mathbf K} \nabla_{w,d} \varphi_i\right) \cdot \nabla_{w,d} \varphi_j.
-\end{equation*} /usr/share/doc/packages/dealii/doxygen/deal.II/step_62.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_62.html 2024-01-30 03:04:54.516898612 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_62.html 2024-01-30 03:04:54.516898612 +0000 @@ -154,10 +154,10 @@

    Note
    As a prerequisite of this program, you need to have HDF5, complex PETSc, and the p4est libraries installed. The installation of deal.II together with these additional libraries is described in the README file.

    Introduction

    A phononic crystal is a periodic nanostructure that modifies the motion of mechanical vibrations or phonons. Phononic structures can be used to disperse, route and confine mechanical vibrations. These structures have potential applications in quantum information and have been used to study macroscopic quantum phenomena. Phononic crystals are usually fabricated in cleanrooms.

    -

    In this tutorial we show how to a design a phononic superlattice cavity which is a particular type of phononic crystal that can be used to confine mechanical vibrations. A phononic superlattice cavity is formed by two Distributed Bragg Reflector, mirrors and a $\lambda/2$ cavity where $\lambda$ is the acoustic wavelength. Acoustic DBRs are periodic structures where a set of bilayer stacks with contrasting physical properties (sound velocity index) is repeated $N$ times. Superlattice cavities are usually grown on a Gallium Arsenide wafer by Molecular Beam Epitaxy. The bilayers correspond to GaAs/AlAs mirror pairs. As shown below, the thickness of the mirror layers (brown and green) is $\lambda/4$ and the thickness of the cavity (blue) is $\lambda/2$.

    +

    In this tutorial we show how to a design a phononic superlattice cavity which is a particular type of phononic crystal that can be used to confine mechanical vibrations. A phononic superlattice cavity is formed by two Distributed Bragg Reflector, mirrors and a $\lambda/2$ cavity where $\lambda$ is the acoustic wavelength. Acoustic DBRs are periodic structures where a set of bilayer stacks with contrasting physical properties (sound velocity index) is repeated $N$ times. Superlattice cavities are usually grown on a Gallium Arsenide wafer by Molecular Beam Epitaxy. The bilayers correspond to GaAs/AlAs mirror pairs. As shown below, the thickness of the mirror layers (brown and green) is $\lambda/4$ and the thickness of the cavity (blue) is $\lambda/2$.

    Phononic superlattice cavity

    In this tutorial we calculate the band gap and the mechanical resonance of a phononic superlattice cavity but the code presented here can be easily used to design and calculate other types of phononic crystals.

    -

    The device is a waveguide in which the wave goes from left to right. The simulations of this tutorial are done in 2D, but the code is dimension independent and can be easily used with 3D simulations. The waveguide width is equal to the $y$ dimension of the domain and the waveguide length is equal to the $x$ dimension of the domain. There are two regimes that depend on the waveguide width:

      +

      The device is a waveguide in which the wave goes from left to right. The simulations of this tutorial are done in 2D, but the code is dimension independent and can be easily used with 3D simulations. The waveguide width is equal to the $y$ dimension of the domain and the waveguide length is equal to the $x$ dimension of the domain. There are two regimes that depend on the waveguide width:

      • Single mode: In this case the width of the structure is much smaller than the wavelength. This case can be solved either with FEM (the approach that we take here) or with a simple semi-analytical 1D transfer matrix formalism.
      • Multimode: In this case the width of the structure is larger than the wavelength. This case can be solved using FEM or with a scattering matrix formalism. Although we do not study this case in this tutorial, it is very easy to reach the multimode regime by increasing the parameter waveguide width (dimension_y in the jupyter notebook).
      @@ -168,64 +168,64 @@

      The transmission coefficient corresponds to the energy of the first simulation divided by the calibration energy. We repeat this procedure for each frequency step.

      Elastic equations

      What we want to simulate here is the transmission of elastic waves. Consequently, the right description of the problem uses the elastic equations, which in the time domain are given by

      -\[
+<picture><source srcset=\[
 \rho\partial_{tt} u_i - \partial_j (c_{ijkl} \varepsilon_{kl}) = f_i,
 \qquad i=0,1,2
-\] +\]" src="form_6194.png"/>

      -

      where the stiffness tensor $c_{ijkl}$ depends on the spatial coordinates and the strain is the symmetrized gradient of the displacement, given by

      -\[
+<p> where the stiffness tensor <picture><source srcset=$c_{ijkl}$ depends on the spatial coordinates and the strain is the symmetrized gradient of the displacement, given by

      +\[
 \varepsilon_{kl} =\frac{1}{2}(\partial_k u_l + \partial_l u_k)
-\] +\]" src="form_6196.png"/>

      A perfectly matched layer (PML) can be used to truncate the solution at the boundaries. A PML is a transformation that results in a complex coordinate stretching.

      Instead of a time domain approach, this tutorial program converts the equations above into the frequency domain by performing a Fourier transform with regard to the time variable. The elastic equations in the frequency domain then read as follows

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
 \nabla\cdot(\boldsymbol{\bar\sigma} \xi \boldsymbol{\Lambda})&=&-\omega^2\rho\xi\mathbf{\bar u}\\
 \boldsymbol{\bar \sigma} &=&\mathbf{C}\boldsymbol{\bar\varepsilon}\\
 \boldsymbol{\bar\varepsilon}&=&\frac{1}{2}[(\nabla\mathbf{\bar{u}}\boldsymbol{\Lambda}+\boldsymbol{\Lambda}^\mathrm{T}(\nabla\mathbf{\bar{u}})^\mathrm{T})]\\
 \xi &=&\prod_i^\textrm{dim}s_i\\
 \boldsymbol{\Lambda} &=& \operatorname{diag}(1/s_0,1/s_1,1/s_2)\qquad\textrm{for 3D}\\
 \boldsymbol{\Lambda} &=& \operatorname{diag}(1/s_0,1/s_1)\qquad\textrm{for 2D}
-\end{eqnarray*} +\end{eqnarray*}" src="form_6197.png"/>

      -

      where the coefficients $s_i = 1+is_i'(x,y,z)$ account for the absorption. There are 3 $s_i$ coefficients in 3D and 2 in 2D. The imaginary par of $s_i$ is equal to zero outside the PML. The PMLs are reflectionless only for the exact wave equations. When the set of equations is discretized the PML is no longer reflectionless. The reflections can be made arbitrarily small as long as the medium is slowly varying, see the adiabatic theorem. In the code a quadratic turn-on of the PML has been used. A linear and cubic turn-on is also known to work. These equations can be expanded into

      -\[
+<p> where the coefficients <picture><source srcset=$s_i = 1+is_i'(x,y,z)$ account for the absorption. There are 3 $s_i$ coefficients in 3D and 2 in 2D. The imaginary par of $s_i$ is equal to zero outside the PML. The PMLs are reflectionless only for the exact wave equations. When the set of equations is discretized the PML is no longer reflectionless. The reflections can be made arbitrarily small as long as the medium is slowly varying, see the adiabatic theorem. In the code a quadratic turn-on of the PML has been used. A linear and cubic turn-on is also known to work. These equations can be expanded into

      +\[
 -\omega^2\rho \xi  u_m - \partial_n \left(\frac{\xi}{s_n}c_{mnkl}
 \varepsilon_{kl}\right) = f_m
-\] +\]" src="form_6200.png"/>

      -\[
+<picture><source srcset=\[
 \varepsilon_{kl} =\frac{1}{2}\left(\frac{1}{s_k}\partial_k u_l
 + \frac{1}{s_l}\partial_l u_k\right)
-\] +\]" src="form_6201.png"/>

      -

      where summation over repeated indices (here $n$, as well as $k$ and $l$) is as always implied. Note that the strain is no longer symmetric after applying the complex coordinate stretching of the PML. This set of equations can be written as

      -\[
+<p> where summation over repeated indices (here <picture><source srcset=$n$, as well as $k$ and $l$) is as always implied. Note that the strain is no longer symmetric after applying the complex coordinate stretching of the PML. This set of equations can be written as

      +\[
 -\omega^2\rho \xi  u_m - \partial_n \left(\frac{\xi c_{mnkl}}{2s_n s_k} \partial_k u_l
 + \frac{\xi c_{mnkl}}{2s_n s_l} \partial_l u_k\right) = f_m
-\] +\]" src="form_6202.png"/>

      -

      The same as the strain, the stress tensor is not symmetric inside the PML ( $s_j\neq 0$). Indeed the fields inside the PML are not physical. It is useful to introduce the tensors $\alpha_{mnkl}$ and $\beta_{mnkl}$.

      -\[
+<p>The same as the strain, the stress tensor is not symmetric inside the PML ( <picture><source srcset=$s_j\neq 0$). Indeed the fields inside the PML are not physical. It is useful to introduce the tensors $\alpha_{mnkl}$ and $\beta_{mnkl}$.

      +\[
 -\omega^2\rho \xi  u_m - \partial_n \left(\alpha_{mnkl}\partial_k u_l
 +  \beta_{mnkl}\partial_l u_k\right) = f_m
-\] +\]" src="form_6206.png"/>

      -

      We can multiply by $\varphi_m$ and integrate over the domain $\Omega$ and integrate by parts.

      -\begin{eqnarray*}
+<p>We can multiply by <picture><source srcset=$\varphi_m$ and integrate over the domain $\Omega$ and integrate by parts.

      +\begin{eqnarray*}
 -\omega^2\int_\Omega\rho\xi\varphi_m u_m + \int_\Omega\partial_n\varphi_m \left(\frac{\xi c_{mnkl}}{2s_n s_k} \partial_k u_l
 + \frac{\xi c_{mnkl}}{2s_n s_l} \partial_l u_k\right) = \int_\Omega\varphi_m f_m
-\end{eqnarray*} +\end{eqnarray*}" src="form_6208.png"/>

      -

      It is this set of equations we want to solve for a set of frequencies $\omega$ in order to compute the transmission coefficient as function of frequency. The linear system becomes

      -\begin{eqnarray*}
+<p> It is this set of equations we want to solve for a set of frequencies <picture><source srcset=$\omega$ in order to compute the transmission coefficient as function of frequency. The linear system becomes

      +\begin{eqnarray*}
 AU&=&F\\
 A_{ij} &=& -\omega^2\int_\Omega\rho \xi\varphi_m^i \varphi_m^j + \int_\Omega\partial_n\varphi_m^i \left(\frac{\xi c_{mnkl}}{2s_n s_k} \partial_k \varphi_l^j
 + \frac{\xi c_{mnkl}}{2s_n s_l} \partial_l \varphi_k^j\right)\\
 F_i &=& \int_\Omega\varphi_m^i f_m
-\end{eqnarray*} +\end{eqnarray*}" src="form_6209.png"/>

      Simulation parameters

      In this tutorial we use a python jupyter notebook to set up the parameters and run the simulation. First we create a HDF5 file where we store the parameters and the results of the simulation.

      @@ -391,7 +391,7 @@
        const Point<dim> force_center;
       
        public:
      -

    In this particular simulation the force has only a $x$ component, $F_y=0$.

    +

    In this particular simulation the force has only a $x$ component, $F_y=0$.

      const unsigned int force_component = 0;
      };
     
    @@ -512,13 +512,13 @@
     
    Definition full_matrix.h:79

    The get_stiffness_tensor() function

    -

    This function returns the stiffness tensor of the material. For the sake of simplicity we consider the stiffness to be isotropic and homogeneous; only the density $\rho$ depends on the position. As we have previously shown in step-8, if the stiffness is isotropic and homogeneous, the stiffness coefficients $c_{ijkl}$ can be expressed as a function of the two coefficients $\lambda$ and $\mu$. The coefficient tensor reduces to

    -\[
+<p>This function returns the stiffness tensor of the material. For the sake of simplicity we consider the stiffness to be isotropic and homogeneous; only the density <picture><source srcset=$\rho$ depends on the position. As we have previously shown in step-8, if the stiffness is isotropic and homogeneous, the stiffness coefficients $c_{ijkl}$ can be expressed as a function of the two coefficients $\lambda$ and $\mu$. The coefficient tensor reduces to

    +\[
    c_{ijkl}
    =
    \lambda \delta_{ij} \delta_{kl} +
    \mu (\delta_{ik} \delta_{jl} + \delta_{il} \delta_{jk}).
-   \] + \]" src="form_6211.png"/>

      template <int dim>
      SymmetricTensor<4, dim> get_stiffness_tensor(const double lambda,
    @@ -606,7 +606,7 @@

    This vector contains the range of frequencies that we are going to simulate.

      std::vector<double> frequency;
     
    -

    This vector contains the coordinates $(x,y)$ of the points of the measurement probe.

    +

    This vector contains the coordinates $(x,y)$ of the points of the measurement probe.

      FullMatrix<double> probe_positions;
     

    HDF5 datasets to store the frequency and probe_positions vectors.

    @@ -643,7 +643,7 @@
      {}
     

    This function defines the spatial shape of the force vector pulse which takes the form of a Gaussian function

    -\begin{align*}
+<picture><source srcset=\begin{align*}
    F_x &=
    \left\{
    \begin{array}{ll}
@@ -653,9 +653,9 @@
    y_\textrm{min} <y<y_\textrm{max}  \\ 0 & \text{otherwise},
    \end{array}
    \right.\\ F_y &= 0
-   \end{align*} + \end{align*}" src="form_6213.png"/>

    -

    where $a$ is the maximum amplitude that takes the force and $\sigma_x$ and $\sigma_y$ are the standard deviations for the $x$ and $y$ components. Note that the pulse has been cropped to $x_\textrm{min}<x<x_\textrm{max}$ and $y_\textrm{min} <y<y_\textrm{max}$.

    +

    where $a$ is the maximum amplitude that takes the force and $\sigma_x$ and $\sigma_y$ are the standard deviations for the $x$ and $y$ components. Note that the pulse has been cropped to $x_\textrm{min}<x<x_\textrm{max}$ and $y_\textrm{min} <y<y_\textrm{max}$.

      template <int dim>
      double RightHandSide<dim>::value(const Point<dim> & p,
      const unsigned int component) const
    @@ -710,7 +710,7 @@
    STL namespace.
    -

    The PML coefficient for the x component takes the form $s'_x = a_x x^{\textrm{degree}}$

    +

    The PML coefficient for the x component takes the form $s'_x = a_x x^{\textrm{degree}}$

      template <int dim>
      std::complex<double> PML<dim>::value(const Point<dim> & p,
      const unsigned int component) const
    @@ -748,7 +748,7 @@
     
    ::VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)

    The Rho class implementation

    -

    This class is used to define the mass density. As we have explained before, a phononic superlattice cavity is formed by two Distributed Reflector, mirrors and a $\lambda/2$ cavity where $\lambda$ is the acoustic wavelength. Acoustic DBRs are periodic structures where a set of bilayer stacks with contrasting physical properties (sound velocity index) is repeated $N$ times. The change of in the wave velocity is generated by alternating layers with different density.

    +

    This class is used to define the mass density. As we have explained before, a phononic superlattice cavity is formed by two Distributed Reflector, mirrors and a $\lambda/2$ cavity where $\lambda$ is the acoustic wavelength. Acoustic DBRs are periodic structures where a set of bilayer stacks with contrasting physical properties (sound velocity index) is repeated $N$ times. The change of in the wave velocity is generated by alternating layers with different density.

      template <int dim>
      Rho<dim>::Rho(HDF5::Group &data)
      : Function<dim>(1)
    @@ -776,19 +776,19 @@
      {
    void set_attribute(const std::string &attr_name, const T value)
    Definition hdf5.h:1679

    The speed of sound is defined by

    -\[
+<picture><source srcset=\[
    c = \frac{K_e}{\rho}
-   \] + \]" src="form_6219.png"/>

    -

    where $K_e$ is the effective elastic constant and $\rho$ the density. Here we consider the case in which the waveguide width is much smaller than the wavelength. In this case it can be shown that for the two dimensional case

    -\[
+<p> where <picture><source srcset=$K_e$ is the effective elastic constant and $\rho$ the density. Here we consider the case in which the waveguide width is much smaller than the wavelength. In this case it can be shown that for the two dimensional case

    +\[
    K_e = 4\mu\frac{\lambda +\mu}{\lambda+2\mu}
-   \] + \]" src="form_6221.png"/>

    -

    and for the three dimensional case $K_e$ is equal to the Young's modulus.

    -\[
+<p> and for the three dimensional case <picture><source srcset=$K_e$ is equal to the Young's modulus.

    +\[
/usr/share/doc/packages/dealii/doxygen/deal.II/step_63.html differs (HTML document, UTF-8 Unicode text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_63.html	2024-01-30 03:04:54.596899278 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_63.html	2024-01-30 03:04:54.596899278 +0000
@@ -153,40 +153,40 @@
 <p>This program solves an advection-diffusion problem using a geometric multigrid (GMG) preconditioner. The basics of this preconditioner are discussed in <a class=step-16; here we discuss the necessary changes needed for a non-symmetric PDE. Additionally, we introduce the idea of block smoothing (as compared to point smoothing in step-16), and examine the effects of DoF renumbering for additive and multiplicative smoothers.

    Equation

    The advection-diffusion equation is given by

    -\begin{align*}
+<picture><source srcset=\begin{align*}
 -\varepsilon \Delta u + \boldsymbol{\beta}\cdot \nabla u & = f &
 \text{ in } \Omega\\
 u &= g & \text{ on } \partial\Omega
-\end{align*} +\end{align*}" src="form_6232.png"/>

    -

    where $\varepsilon>0$, $\boldsymbol{\beta}$ is the advection direction, and $f$ is a source. A few notes:

    +

    where $\varepsilon>0$, $\boldsymbol{\beta}$ is the advection direction, and $f$ is a source. A few notes:

      -
    1. If $\boldsymbol{\beta}=\boldsymbol{0}$, this is the Laplace equation solved in step-16 (and many other places).
    2. -
    3. If $\varepsilon=0$ then this is the stationary advection equation solved in step-9.
    4. -
    5. One can define a dimensionless number for this problem, called the Peclet number: $\mathcal{P} \dealcoloneq \frac{\|\boldsymbol{\beta}\|
-L}{\varepsilon}$, where $L$ is the length scale of the domain. It characterizes the kind of equation we are considering: If $\mathcal{P}>1$, we say the problem is advection-dominated, else if $\mathcal{P}<1$ we will say the problem is diffusion-dominated.
    6. +
    7. If $\boldsymbol{\beta}=\boldsymbol{0}$, this is the Laplace equation solved in step-16 (and many other places).
    8. +
    9. If $\varepsilon=0$ then this is the stationary advection equation solved in step-9.
    10. +
    11. One can define a dimensionless number for this problem, called the Peclet number: $\mathcal{P} \dealcoloneq \frac{\|\boldsymbol{\beta}\|
+L}{\varepsilon}$, where $L$ is the length scale of the domain. It characterizes the kind of equation we are considering: If $\mathcal{P}>1$, we say the problem is advection-dominated, else if $\mathcal{P}<1$ we will say the problem is diffusion-dominated.

    For the discussion in this tutorial we will be concerned with advection-dominated flow. This is the complicated case: We know that for diffusion-dominated problems, the standard Galerkin method works just fine, and we also know that simple multigrid methods such as those defined in step-16 are very efficient. On the other hand, for advection-dominated problems, the standard Galerkin approach leads to oscillatory and unstable discretizations, and simple solvers are often not very efficient. This tutorial program is therefore intended to address both of these issues.

    Streamline diffusion

    Using the standard Galerkin finite element method, for suitable test functions $v_h$, a discrete weak form of the PDE would read

    -\begin{align*}
+<picture><source srcset=\begin{align*}
 a(u_h,v_h) = F(v_h)
-\end{align*} +\end{align*}" src="form_6240.png"/>

    where

    -\begin{align*}
+<picture><source srcset=\begin{align*}
 a(u_h,v_h) &= (\varepsilon \nabla v_h,\, \nabla u_h) +
 (v_h,\,\boldsymbol{\beta}\cdot \nabla u_h),\\
 F(v_h) &= (v_h,\,f).
-\end{align*} +\end{align*}" src="form_6241.png"/>

    Unfortunately, one typically gets oscillatory solutions with this approach. Indeed, the following error estimate can be shown for this formulation:

    -\begin{align*}
+<picture><source srcset=\begin{align*}
 \|\nabla (u-u_h)\| \leq (1+\mathcal{P}) \inf_{v_h} \|\nabla (u-v_h)\|.
-\end{align*} +\end{align*}" src="form_6242.png"/>

    The infimum on the right can be estimated as follows if the exact solution is sufficiently smooth:

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   \inf_{v_h} \|\nabla (u-v_h)\|.
   \le
   \|\nabla (u-I_h u)\|
@@ -194,52 +194,52 @@
   h^k
   C
   \|\nabla^k u)\|
-\end{align*} +\end{align*}" src="form_6243.png"/>

    where $k$ is the polynomial degree of the finite elements used. As a consequence, we obtain the estimate

    -\begin{align*}
+<picture><source srcset=\begin{align*}
 \|\nabla (u-u_h)\|
 \leq (1+\mathcal{P}) C h^k
   \|\nabla^k u)\|.
-\end{align*} +\end{align*}" src="form_6244.png"/>

    -

    In other words, the numerical solution will converge. On the other hand, given the definition of $\mathcal{P}$ above, we have to expect poor numerical solutions with a large error when $\varepsilon \ll
-\|\boldsymbol{\beta}\| L$, i.e., if the problem has only a small amount of diffusion.

    +

    In other words, the numerical solution will converge. On the other hand, given the definition of $\mathcal{P}$ above, we have to expect poor numerical solutions with a large error when $\varepsilon \ll
+\|\boldsymbol{\beta}\| L$, i.e., if the problem has only a small amount of diffusion.

    To combat this, we will consider the new weak form

    -\begin{align*}
+<picture><source srcset=\begin{align*}
 a(u_h,\,v_h) + \sum_K (-\varepsilon \Delta u_h +
 \boldsymbol{\beta}\cdot \nabla u_h-f,\,\delta_K
 \boldsymbol{\beta}\cdot \nabla v_h)_K = F(v_h)
-\end{align*} +\end{align*}" src="form_6247.png"/>

    -

    where the sum is done over all cells $K$ with the inner product taken for each cell, and $\delta_K$ is a cell-wise constant stabilization parameter defined in [john2006discontinuity].

    -

    Essentially, adding in the discrete strong form residual enhances the coercivity of the bilinear form $a(\cdot,\cdot)$ which increases the stability of the discrete solution. This method is commonly referred to as streamline diffusion or SUPG (streamline upwind/Petrov-Galerkin).

    +

    where the sum is done over all cells $K$ with the inner product taken for each cell, and $\delta_K$ is a cell-wise constant stabilization parameter defined in [john2006discontinuity].

    +

    Essentially, adding in the discrete strong form residual enhances the coercivity of the bilinear form $a(\cdot,\cdot)$ which increases the stability of the discrete solution. This method is commonly referred to as streamline diffusion or SUPG (streamline upwind/Petrov-Galerkin).

    Smoothers

    One of the goals of this tutorial is to expand from using a simple (point-wise) Gauss-Seidel (SOR) smoother that is used in step-16 (class PreconditionSOR) on each level of the multigrid hierarchy. The term "point-wise" is traditionally used in solvers to indicate that one solves at one "grid point" at a time; for scalar problems, this means to use a solver that updates one unknown of the linear system at a time, keeping all of the others fixed; one would then iterate over all unknowns in the problem and, once done, start over again from the first unknown until these "sweeps" converge. Jacobi, Gauss-Seidel, and SOR iterations can all be interpreted in this way. In the context of multigrid, one does not think of these methods as "solvers", but as "smoothers". As such, one is not interested in actually solving the linear system. It is enough to remove the high-frequency part of the residual for the multigrid method to work, because that allows restricting the solution to a coarser mesh. Therefore, one only does a few, fixed number of "sweeps" over all unknowns. In the code in this tutorial this is controlled by the "Smoothing steps" parameter.

    But these methods are known to converge rather slowly when used as solvers. While as multigrid smoothers, they are surprisingly good, they can also be improved upon. In particular, we consider "cell-based" smoothers here as well. These methods solve for all unknowns on a cell at once, keeping all other unknowns fixed; they then move on to the next cell, and so on and so forth. One can think of them as "block" versions of Jacobi, Gauss-Seidel, or SOR, but because degrees of freedom are shared among multiple cells, these blocks overlap and the methods are in fact best be explained within the framework of additive and multiplicative Schwarz methods.

    -

    In contrast to step-16, our test problem contains an advective term. Especially with a small diffusion constant $\varepsilon$, information is transported along streamlines in the given advection direction. This means that smoothers are likely to be more effective if they allow information to travel in downstream direction within a single smoother application. If we want to solve one unknown (or block of unknowns) at a time in the order in which these unknowns (or blocks) are enumerated, then this information propagation property requires reordering degrees of freedom or cells (for the cell-based smoothers) accordingly so that the ones further upstream are treated earlier (have lower indices) and those further downstream are treated later (have larger indices). The influence of the ordering will be visible in the results section.

    +

    In contrast to step-16, our test problem contains an advective term. Especially with a small diffusion constant $\varepsilon$, information is transported along streamlines in the given advection direction. This means that smoothers are likely to be more effective if they allow information to travel in downstream direction within a single smoother application. If we want to solve one unknown (or block of unknowns) at a time in the order in which these unknowns (or blocks) are enumerated, then this information propagation property requires reordering degrees of freedom or cells (for the cell-based smoothers) accordingly so that the ones further upstream are treated earlier (have lower indices) and those further downstream are treated later (have larger indices). The influence of the ordering will be visible in the results section.

    Let us now briefly define the smoothers used in this tutorial. For a more detailed introduction, we refer to [KanschatNotesIterative] and the books [smith2004domain] and [toselli2006domain]. A Schwarz preconditioner requires a decomposition

    -\begin{align*}
+<picture><source srcset=\begin{align*}
 V = \sum_{j=1}^J V_j
-\end{align*} +\end{align*}" src="form_6250.png"/>

    -

    of our finite element space $V$. Each subproblem $V_j$ also has a Ritz projection $P_j: V \rightarrow V_j$ based on the bilinear form $a(\cdot,\cdot)$. This projection induces a local operator $A_j$ for each subproblem $V_j$. If $\Pi_j:V\rightarrow V_j$ is the orthogonal projector onto $V_j$, one can show $A_jP_j=\Pi_j^TA$.

    +

    of our finite element space $V$. Each subproblem $V_j$ also has a Ritz projection $P_j: V \rightarrow V_j$ based on the bilinear form $a(\cdot,\cdot)$. This projection induces a local operator $A_j$ for each subproblem $V_j$. If $\Pi_j:V\rightarrow V_j$ is the orthogonal projector onto $V_j$, one can show $A_jP_j=\Pi_j^TA$.

    With this we can define an additive Schwarz preconditioner for the operator $A$ as

    -\begin{align*}
+<picture><source srcset=\begin{align*}
  B^{-1} = \sum_{j=1}^J P_j A^{-1} = \sum_{j=1}^J A_j^{-1} \Pi_j^T.
-\end{align*} +\end{align*}" src="form_6255.png"/>

    -

    In other words, we project our solution into each subproblem, apply the inverse of the subproblem $A_j$, and sum the contributions up over all $j$.

    -

    Note that one can interpret the point-wise (one unknown at a time) Jacobi method as an additive Schwarz method by defining a subproblem $V_j$ for each degree of freedom. Then, $A_j^{-1}$ becomes a multiplication with the inverse of a diagonal entry of $A$.

    -

    For the "Block Jacobi" method used in this tutorial, we define a subproblem $V_j$ for each cell of the mesh on the current level. Note that we use a continuous finite element, so these blocks are overlapping, as degrees of freedom on an interface between two cells belong to both subproblems. The logic for the Schwarz operator operating on the subproblems (in deal.II they are called "blocks") is implemented in the class RelaxationBlock. The "Block -Jacobi" method is implemented in the class RelaxationBlockJacobi. Many aspects of the class (for example how the blocks are defined and how to invert the local subproblems $A_j$) can be configured in the smoother data, see RelaxationBlock::AdditionalData and DoFTools::make_cell_patches() for details.

    +

    In other words, we project our solution into each subproblem, apply the inverse of the subproblem $A_j$, and sum the contributions up over all $j$.

    +

    Note that one can interpret the point-wise (one unknown at a time) Jacobi method as an additive Schwarz method by defining a subproblem $V_j$ for each degree of freedom. Then, $A_j^{-1}$ becomes a multiplication with the inverse of a diagonal entry of $A$.

    +

    For the "Block Jacobi" method used in this tutorial, we define a subproblem $V_j$ for each cell of the mesh on the current level. Note that we use a continuous finite element, so these blocks are overlapping, as degrees of freedom on an interface between two cells belong to both subproblems. The logic for the Schwarz operator operating on the subproblems (in deal.II they are called "blocks") is implemented in the class RelaxationBlock. The "Block +Jacobi" method is implemented in the class RelaxationBlockJacobi. Many aspects of the class (for example how the blocks are defined and how to invert the local subproblems $A_j$) can be configured in the smoother data, see RelaxationBlock::AdditionalData and DoFTools::make_cell_patches() for details.

    So far, we discussed additive smoothers where the updates can be applied independently and there is no information flowing within a single smoother application. A multiplicative Schwarz preconditioner addresses this and is defined by

    -\begin{align*}
+<picture><source srcset=\begin{align*}
  B^{-1} = \left( I- \prod_{j=1}^J \left(I-P_j\right) \right) A^{-1}.
-\end{align*} +\end{align*}" src="form_6257.png"/>

    -

    In contrast to above, the updates on the subproblems $V_j$ are applied sequentially. This means that the update obtained when inverting the subproblem $A_j$ is immediately used in $A_{j+1}$. This becomes visible when writing out the project:

    -\begin{align*}
+<p> In contrast to above, the updates on the subproblems <picture><source srcset=$V_j$ are applied sequentially. This means that the update obtained when inverting the subproblem $A_j$ is immediately used in $A_{j+1}$. This becomes visible when writing out the project:

    +\begin{align*}
  B^{-1}
  =
  \left(
@@ -254,18 +254,18 @@
    \left[ \left(I-P_1\right)
    \left[ \left(I-P_2\right)\cdots
      \left[\left(I-P_J\right) A^{-1}\right] \cdots \right] \right]
-\end{align*} +\end{align*}" src="form_6259.png"/>

    -

    When defining the sub-spaces $V_j$ as whole blocks of degrees of freedom, this method is implemented in the class RelaxationBlockSOR and used when you select "Block SOR" in this tutorial. The class RelaxationBlockSOR is also derived from RelaxationBlock. As such, both additive and multiplicative Schwarz methods are implemented in a unified framework.

    +

    When defining the sub-spaces $V_j$ as whole blocks of degrees of freedom, this method is implemented in the class RelaxationBlockSOR and used when you select "Block SOR" in this tutorial. The class RelaxationBlockSOR is also derived from RelaxationBlock. As such, both additive and multiplicative Schwarz methods are implemented in a unified framework.

    Finally, let us note that the standard Gauss-Seidel (or SOR) method can be seen as a multiplicative Schwarz method with a subproblem for each DoF.

    Test problem

    -

    We will be considering the following test problem: $\Omega =
-[-1,\,1]\times[-1,\,1]\backslash B_{0.3}(0)$, i.e., a square with a circle of radius 0.3 centered at the origin removed. In addition, we use $\varepsilon=0.005$, $\boldsymbol{\beta} =
-[-\sin(\pi/6),\,\cos(\pi/6)]$, $f=0$, and Dirichlet boundary values

    -\begin{align*}
+<p>We will be considering the following test problem: <picture><source srcset=$\Omega =
+[-1,\,1]\times[-1,\,1]\backslash B_{0.3}(0)$, i.e., a square with a circle of radius 0.3 centered at the origin removed. In addition, we use $\varepsilon=0.005$, $\boldsymbol{\beta} =
+[-\sin(\pi/6),\,\cos(\pi/6)]$, $f=0$, and Dirichlet boundary values

    +\begin{align*}
 g = \left\{\begin{array}{ll} 1 & \text{if } x=-1 \text{ or } y=-1,\,x\geq 0.5 \\
 0 & \text{otherwise} \end{array}\right.
-\end{align*} +\end{align*}" src="form_6263.png"/>

    The following figures depict the solutions with (left) and without (right) streamline diffusion. Without streamline diffusion we see large oscillations around the boundary layer, demonstrating the instability of the standard Galerkin finite element method for this problem.

    @@ -657,7 +657,7 @@
      Assert(component == 0, ExcIndexRange(component, 0, 1));
      (void)component;
     
    -

    Set boundary to 1 if $x=1$, or if $x>0.5$ and $y=-1$.

    +

    Set boundary to 1 if $x=1$, or if $x>0.5$ and $y=-1$.

      if (std::fabs(p[0] - 1) < 1e-8 ||
      (std::fabs(p[1] + 1) < 1e-8 && p[0] >= 0.5))
      {
    @@ -945,7 +945,7 @@
      right_hand_side.value_list(scratch_data.fe_values.get_quadrature_points(),
      rhs_values);
     
    -

    If we are using streamline diffusion we must add its contribution to both the cell matrix and the cell right-hand side. If we are not using streamline diffusion, setting $\delta=0$ negates this contribution below and we are left with the standard, Galerkin finite element assembly.

    +

    If we are using streamline diffusion we must add its contribution to both the cell matrix and the cell right-hand side. If we are not using streamline diffusion, setting $\delta=0$ negates this contribution below and we are left with the standard, Galerkin finite element assembly.

      const double delta = (settings.with_streamline_diffusion ?
      compute_stabilization_delta(cell->diameter(),
      settings.epsilon,
    @@ -1062,7 +1062,7 @@
    level_cell_iterator begin_mg(const unsigned int level=0) const
    Definition index_set.h:67
    IndexSet extract_locally_relevant_level_dofs(const DoFHandler< dim, spacedim > &dof_handler, const unsigned int level)
    -

    If $(i,j)$ is an interface_out dof pair, then $(j,i)$ is an interface_in dof pair. Note: For interface_in, we load the transpose of the interface entries, i.e., the entry for dof pair $(j,i)$ is stored in interface_in(i,j). This is an optimization for the symmetric case which allows only one matrix to be used when setting the edge_matrices in solve(). Here, however, since our problem is non-symmetric, we must store both interface_in and interface_out matrices.

    +

    If $(i,j)$ is an interface_out dof pair, then $(j,i)$ is an interface_in dof pair. Note: For interface_in, we load the transpose of the interface entries, i.e., the entry for dof pair $(j,i)$ is stored in interface_in(i,j). This is an optimization for the symmetric case which allows only one matrix to be used when setting the edge_matrices in solve(). Here, however, since our problem is non-symmetric, we must store both interface_in and interface_out matrices.

      for (unsigned int i = 0; i < copy_data.dofs_per_cell; ++i)
      for (unsigned int j = 0; j < copy_data.dofs_per_cell; ++j)
      if (mg_constrained_dofs.is_interface_matrix_entry(
    @@ -1424,8 +1424,8 @@
      }

    Results

    GMRES Iteration Numbers

    -

    The major advantage for GMG is that it is an $\mathcal{O}(n)$ method, that is, the complexity of the problem increases linearly with the problem size. To show then that the linear solver presented in this tutorial is in fact $\mathcal{O}(n)$, all one needs to do is show that the iteration counts for the GMRES solve stay roughly constant as we refine the mesh.

    -

    Each of the following tables gives the GMRES iteration counts to reduce the initial residual by a factor of $10^8$. We selected a sufficient number of smoothing steps (based on the method) to get iteration numbers independent of mesh size. As can be seen from the tables below, the method is indeed $\mathcal{O}(n)$.

    +

    The major advantage for GMG is that it is an $\mathcal{O}(n)$ method, that is, the complexity of the problem increases linearly with the problem size. To show then that the linear solver presented in this tutorial is in fact $\mathcal{O}(n)$, all one needs to do is show that the iteration counts for the GMRES solve stay roughly constant as we refine the mesh.

    +

    Each of the following tables gives the GMRES iteration counts to reduce the initial residual by a factor of $10^8$. We selected a sufficient number of smoothing steps (based on the method) to get iteration numbers independent of mesh size. As can be seen from the tables below, the method is indeed $\mathcal{O}(n)$.

    DoF/Cell Renumbering

    The point-wise smoothers ("Jacobi" and "SOR") get applied in the order the DoFs are numbered on each level. We can influence this using the DoFRenumbering namespace. The block smoothers are applied based on the ordering we set in setup_smoother(). We can visualize this numbering. The following pictures show the cell numbering of the active cells in downstream, random, and upstream numbering (left to right):

    @@ -1481,7 +1481,7 @@
    131072 132096 12 16 19 11 12 21
    /usr/share/doc/packages/dealii/doxygen/deal.II/step_64.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_64.html 2024-01-30 03:04:54.652899745 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_64.html 2024-01-30 03:04:54.652899745 +0000 @@ -131,7 +131,7 @@ \nabla u + a(\mathbf x) u &=&1,\\ u &=& 0 \quad \text{on } \partial \Omega \end{eqnarray*}" src="form_6270.png"/>

    where $a(\mathbf x)$ is a variable coefficient.

    -

    We choose as domain $\Omega=[0,1]^3$ and $a(\mathbf x)=\frac{10}{0.05 +
+<p>We choose as domain <picture><source srcset=$\Omega=[0,1]^3$ and $a(\mathbf x)=\frac{10}{0.05 +
 2\|\mathbf x\|^2}$. Since the coefficient is symmetric around the origin but the domain is not, we will end up with a non-symmetric solution.

    If you've made it this far into the tutorial, you will know how the weak formulation of this problem looks like and how, in principle, one assembles linear systems for it. Of course, in this program we will in fact not actually form the matrix, but rather only represent its action when one multiplies with it.

    Moving data to and from the device

    /usr/share/doc/packages/dealii/doxygen/deal.II/step_65.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_65.html 2024-01-30 03:04:54.704900178 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_65.html 2024-01-30 03:04:54.704900178 +0000 @@ -192,14 +192,14 @@ (1-\xi)\eta (x_2,y_2) + \xi\eta (x_3,y_3). \end{align*}" src="form_6280.png"/>

    -

    For the case of the curved surface, we want to modify this formula. For the top cell of the coarse mesh of the disk, we can assume that the points $(x_0,y_0)$ and $(x_1,y_1)$ sit along the straight line at the lower end and the points $(x_2,y_2)$ and $(x_3,y_3)$ are connected by a quarter circle along the top. We would then map a point $(\xi, \eta)$ as

    +

    For the case of the curved surface, we want to modify this formula. For the top cell of the coarse mesh of the disk, we can assume that the points $(x_0,y_0)$ and $(x_1,y_1)$ sit along the straight line at the lower end and the points $(x_2,y_2)$ and $(x_3,y_3)$ are connected by a quarter circle along the top. We would then map a point $(\xi, \eta)$ as

    \begin{align*}
 (x,y) = (1-\eta) \big[(1-\xi) (x_0,y_0) + \xi (x_1,y_1)\big] +
       \eta \mathbf{c}_3(\xi),
 \end{align*}

    -

    where $\mathbf{c}_3(\xi)$ is a curve that describes the $(x,y)$ coordinates of the quarter circle in terms of an arclength parameter $\xi\in (0,1)$. This represents a linear interpolation between the straight lower edge and the curved upper edge of the cell, and is the basis for the picture shown above.

    -

    This formula is easily generalized to the case where all four edges are described by a curve rather than a straight line. We call the four functions, parameterized by a single coordinate $\xi$ or $\eta$ in the horizontal and vertical directions, $\mathbf{c}_0, \mathbf{c}_1, \mathbf{c}_2,
+<p> where <picture><source srcset=$\mathbf{c}_3(\xi)$ is a curve that describes the $(x,y)$ coordinates of the quarter circle in terms of an arclength parameter $\xi\in (0,1)$. This represents a linear interpolation between the straight lower edge and the curved upper edge of the cell, and is the basis for the picture shown above.

    +

    This formula is easily generalized to the case where all four edges are described by a curve rather than a straight line. We call the four functions, parameterized by a single coordinate $\xi$ or $\eta$ in the horizontal and vertical directions, $\mathbf{c}_0, \mathbf{c}_1, \mathbf{c}_2,
 \mathbf{c}_3$ for the left, right, lower, and upper edge of a quadrilateral, respectively. The interpolation then reads

    \begin{align*}
 (x,y) =& (1-\xi)\mathbf{c}_0(\eta) + \xi \mathbf{c}_1(\eta)
@@ -212,11 +212,11 @@
 = (x_0,y_0)$ or $\mathbf{c}_0(1) = (x_2,y_2)$. The subtraction of the bilinear interpolation in the second line of the formula makes sure that the prescribed curves are followed exactly on the boundary: Along each of the four edges, we need to subtract the contribution of the two adjacent edges evaluated in the corners, which is then simply a vertex position. It is easy to check that the formula for the circle above is reproduced if three of the four curves $\mathbf{c}_i$ are straight and thus coincide with the bilinear interpolation.

    This formula, called transfinite interpolation, was introduced in 1973 by Gordon and Hall. Even though transfinite interpolation essentially only represents a linear blending of the bounding curves, the interpolation exactly follows the boundary curves for each real number $\xi\in (0,1)$ or $\eta\in (0,1)$, i.e., it interpolates in an infinite number of points, which was the original motivation to label this variant of interpolation a transfinite one by Gordon and Hall. Another interpretation is that the transfinite interpolation interpolates from the left and right and the top and bottom linearly, from which we need to subtract the bilinear interpolation to ensure a unit weight in the interior of the domain.

    The transfinite interpolation is easily generalized to three spatial dimensions. In that case, the interpolation allows to blend 6 different surface descriptions for any of the quads of a three-dimensional cell and 12 edge descriptions for the lines of a cell. Again, to ensure a consistent map, it is necessary to subtract the contribution of edges and add the contribution of vertices again to make the curves follow the prescribed surface or edge description. In the three-dimensional case, it is also possible to use a transfinite interpolation from a curved edge both into the adjacent faces and the adjacent cells.

    -

    The interpolation of the transfinite interpolation in deal.II is general in the sense that it can deal with arbitrary curves. It will evaluate the curves in terms of their original coordinates of the $d$-dimensional space but with one (or two, in the case of edges in 3D) coordinate held fixed at $0$ or $1$ to ensure that any other manifold class, including CAD files if desired, can be applied out of the box. Transfinite interpolation is a standard ingredient in mesh generators, so the main strength of the integration of this feature within the deal.II library is to enable it during adaptive refinement and coarsening of the mesh, and for creating higher-degree mappings that use manifolds to insert additional points beyond the mesh vertices.

    +

    The interpolation of the transfinite interpolation in deal.II is general in the sense that it can deal with arbitrary curves. It will evaluate the curves in terms of their original coordinates of the $d$-dimensional space but with one (or two, in the case of edges in 3D) coordinate held fixed at $0$ or $1$ to ensure that any other manifold class, including CAD files if desired, can be applied out of the box. Transfinite interpolation is a standard ingredient in mesh generators, so the main strength of the integration of this feature within the deal.II library is to enable it during adaptive refinement and coarsening of the mesh, and for creating higher-degree mappings that use manifolds to insert additional points beyond the mesh vertices.

    As a final remark on transfinite interpolation, we mention that the mesh refinement strategies in deal.II in absence of a volume manifold description are also based on the weights of the transfinite interpolation and optimal in that sense. The difference is that the default algorithm sees only one cell at a time, and so will apply the optimal algorithm only on those cells touching the curved manifolds. In contrast, using the transfinite mapping on entire patches of cells (originating from one coarser cell) allows to use the transfinite interpolation method in a way that propagates information from the boundary to cells far away.

    Transfinite interpolation is expensive and how to deal with it

    A mesh with a transfinite manifold description is typically set up in two steps. The first step is to create a coarse mesh (or read it in from a file) and to attach a curved manifold to some of the mesh entities. For the above example of the disk, we attach a polar manifold to the faces along the outer circle (this is done automatically by GridGenerator::hyper_ball()). Before we start refining the mesh, we then assign a TransfiniteInterpolationManifold to all interior cells and edges of the mesh, which of course needs to be based on some manifold id that we have assigned to those entities (everything except the circle on the boundary). It does not matter whether we also assign a TransfiniteInterpolationManifold to the inner square of the disk or not because the transfinite interpolation on a coarse cell with straight edges (or flat faces in 3d) simply yields subdivided children with straight edges (flat faces).

    -

    Later, when the mesh is refined or when a higher-order mapping is set up based on this mesh, the cells will query the underlying manifold object for new points. This process takes a set of surrounding points, for example the four vertices of a two-dimensional cell, and a set of weights to each of these points, for definition a new point. For the mid point of a cell, each of the four vertices would get weight 0.25. For the transfinite interpolation manifold, the process of building weighted sums requires some serious work. By construction, we want to combine the points in terms of the reference coordinates $\xi$ and $\eta$ (or $\xi, \eta, \zeta$ in 3D) of the surrounding points. However, the interface of the manifold classes in deal.II does not get the reference coordinates of the surrounding points (as they are not stored globally) but rather the physical coordinates only. Thus, the first step the transfinite interpolation manifold has to do is to invert the mapping and find the reference coordinates within one of the coarse cells of the transfinite interpolation (e.g. one of the four shaded coarse-grid cells of the disk mesh above). This inversion is done by a Newton iteration (or rather, finite-difference based Newton scheme combined with Broyden's method) and queries the transfinite interpolation according to the formula above several times. Each of these queries in turn might call an expensive manifold, e.g. a spherical description of a ball, and be expensive on its own. Since the Manifold interface class of deal.II only provides a set of points, the transfinite interpolation initially does not even know to which coarse grid cell the set of surrounding points belong to and needs to search among several cells based on some heuristics. In terms of charts, one could describe the implementation of the transfinite interpolation as an atlas-based implementation: Each cell of the initial coarse grid of the triangulation represents a chart with its own reference space, and the surrounding manifolds provide a way to transform from the chart space (i.e., the reference cell) to the physical space. The collection of the charts of the coarse grid cells is an atlas, and as usual, the first thing one does when looking up something in an atlas is to find the right chart.

    +

    Later, when the mesh is refined or when a higher-order mapping is set up based on this mesh, the cells will query the underlying manifold object for new points. This process takes a set of surrounding points, for example the four vertices of a two-dimensional cell, and a set of weights to each of these points, for definition a new point. For the mid point of a cell, each of the four vertices would get weight 0.25. For the transfinite interpolation manifold, the process of building weighted sums requires some serious work. By construction, we want to combine the points in terms of the reference coordinates $\xi$ and $\eta$ (or $\xi, \eta, \zeta$ in 3D) of the surrounding points. However, the interface of the manifold classes in deal.II does not get the reference coordinates of the surrounding points (as they are not stored globally) but rather the physical coordinates only. Thus, the first step the transfinite interpolation manifold has to do is to invert the mapping and find the reference coordinates within one of the coarse cells of the transfinite interpolation (e.g. one of the four shaded coarse-grid cells of the disk mesh above). This inversion is done by a Newton iteration (or rather, finite-difference based Newton scheme combined with Broyden's method) and queries the transfinite interpolation according to the formula above several times. Each of these queries in turn might call an expensive manifold, e.g. a spherical description of a ball, and be expensive on its own. Since the Manifold interface class of deal.II only provides a set of points, the transfinite interpolation initially does not even know to which coarse grid cell the set of surrounding points belong to and needs to search among several cells based on some heuristics. In terms of charts, one could describe the implementation of the transfinite interpolation as an atlas-based implementation: Each cell of the initial coarse grid of the triangulation represents a chart with its own reference space, and the surrounding manifolds provide a way to transform from the chart space (i.e., the reference cell) to the physical space. The collection of the charts of the coarse grid cells is an atlas, and as usual, the first thing one does when looking up something in an atlas is to find the right chart.

    Once the reference coordinates of the surrounding points have been found, a new point in the reference coordinate system is computed by a simple weighted sum. Finally, the reference point is inserted into the formula for the transfinite interpolation, which gives the desired new point.

    In a number of cases, the curved manifold is not only used during mesh refinement, but also to ensure a curved representation of boundaries within the cells of the computational domain. This is a necessity to guarantee high-order convergence for high-order polynomials on complex geometries anyway, but sometimes an accurate geometry is also desired with linear shape functions. This is often done by polynomial descriptions of the cells and called the isoparametric concept if the polynomial degree to represent the curved mesh elements is the same as the degree of the polynomials for the numerical solution. If the degree of the geometry is higher or lower than the solution, one calls that a super- or sub-parametric geometry representation, respectively. In deal.II, the standard class for polynomial representation is MappingQ. If, for example, this class is used with polynomial degree $4$ in 3D, a total of 125 (i.e., $(4+1)^3$) points are needed for the interpolation. Among these points, 8 are the cell's vertices and already available from the mesh, but the other 117 need to be provided by the manifold. In case the transfinite interpolation manifold is used, we can imagine that going through the pull-back into reference coordinates of some yet to be determined coarse cell, followed by subsequent push-forward on each of the 117 points, is a lot of work and can be very time consuming.

    What makes things worse is that the structure of many programs is such that the mapping is queried several times independently for the same cell. Its primary use is in the assembly of the linear system, i.e., the computation of the system matrix and the right hand side, via the mapping argument of the FEValues object. However, also the interpolation of boundary values, the computation of numerical errors, writing the output, and evaluation of error estimators must involve the same mapping to ensure a consistent interpretation of the solution vectors. Thus, even a linear stationary problem that is solved once will evaluate the points of the mapping several times. For the cubic case in 3D mentioned above, this means computing 117 points per cell by an expensive algorithm many times. The situation is more pressing for nonlinear or time-dependent problems where those operations are done over and over again.

    @@ -362,7 +362,7 @@
     
    std
    STL namespace.

    Grid creation and initialization of the manifolds

    -

    The next function presents the typical usage of TransfiniteInterpolationManifold. The first step is to create the desired grid, which can be done by composition of two grids from GridGenerator. The inner ball mesh is simple enough: We run GridGenerator::hyper_cube() centered at the origin with radius 0.5 (third function argument). The second mesh is more interesting and constructed as follows: We want to have a mesh that is spherical in the interior but flat on the outer surface. Furthermore, the mesh topology of the inner ball should be compatible with the outer grid in the sense that their vertices coincide so as to allow the two grid to be merged. The grid coming out of GridGenerator::hyper_shell fulfills the requirements on the inner side in case it is created with $2d$ coarse cells (6 coarse cells in 3d which we are going to use) – this is the same number of cells as there are boundary faces for the ball. For the outer surface, we use the fact that the 6 faces on the surface of the shell without a manifold attached would degenerate to the surface of a cube. What we are still missing is the radius of the outer shell boundary. Since we desire a cube of extent $[-1, 1]$ and the 6-cell shell puts its 8 outer vertices at the 8 opposing diagonals, we must translate the points $(\pm 1, \pm 1, \pm 1)$ into a radius: Clearly, the radius must be $\sqrt{d}$ in $d$ dimensions, i.e., $\sqrt{3}$ for the three-dimensional case we want to consider.

    +

    The next function presents the typical usage of TransfiniteInterpolationManifold. The first step is to create the desired grid, which can be done by composition of two grids from GridGenerator. The inner ball mesh is simple enough: We run GridGenerator::hyper_cube() centered at the origin with radius 0.5 (third function argument). The second mesh is more interesting and constructed as follows: We want to have a mesh that is spherical in the interior but flat on the outer surface. Furthermore, the mesh topology of the inner ball should be compatible with the outer grid in the sense that their vertices coincide so as to allow the two grid to be merged. The grid coming out of GridGenerator::hyper_shell fulfills the requirements on the inner side in case it is created with $2d$ coarse cells (6 coarse cells in 3d which we are going to use) – this is the same number of cells as there are boundary faces for the ball. For the outer surface, we use the fact that the 6 faces on the surface of the shell without a manifold attached would degenerate to the surface of a cube. What we are still missing is the radius of the outer shell boundary. Since we desire a cube of extent $[-1, 1]$ and the 6-cell shell puts its 8 outer vertices at the 8 opposing diagonals, we must translate the points $(\pm 1, \pm 1, \pm 1)$ into a radius: Clearly, the radius must be $\sqrt{d}$ in $d$ dimensions, i.e., $\sqrt{3}$ for the three-dimensional case we want to consider.

    Thus, we have a plan: After creating the inner triangulation for the ball and the one for the outer shell, we merge those two grids but remove all manifolds that the functions in GridGenerator may have set from the resulting triangulation, to ensure that we have full control over manifolds. In particular, we want additional points added on the boundary during refinement to follow a flat manifold description. To start the process of adding more appropriate manifold ids, we assign the manifold id 0 to all mesh entities (cells, faces, lines), which will later be associated with the TransfiniteInterpolationManifold. Then, we must identify the faces and lines that are along the sphere of radius 0.5 and mark them with a different manifold id, so as to then assign a SphericalManifold to those. We will choose the manifold id of 1. Since we have thrown away all manifolds that pre-existed after calling GridGenerator::hyper_ball(), we manually go through the cells of the mesh and all their faces. We have found a face on the sphere if all four vertices have a radius of 0.5, or, as we write in the program, have $r^2-0.25 \approx 0$. Note that we call cell->face(f)->set_all_manifold_ids(1) to set the manifold id both on the faces and the surrounding lines. Furthermore, we want to distinguish the cells inside the ball and outside the ball by a material id for visualization, corresponding to the picture in the introduction.

      template <int dim>
      void PoissonProblem<dim>::create_grid()
    @@ -468,7 +468,7 @@ \sum_{k=1}^d\text{det}(J) w_q a(x)\frac{\partial \varphi_i(\boldsymbol \xi_q)}{\partial x_k} \frac{\partial \varphi_j(\boldsymbol \xi_q)}{\partial x_k}$" src="form_6307.png"/>, which is exactly the terms needed for the bilinear form of the Laplace equation.

    -

    The reason for choosing this somewhat unusual scheme is due to the heavy work involved in computing the cell matrix for a relatively high polynomial degree in 3d. As we want to highlight the cost of the mapping in this tutorial program, we better do the assembly in an optimized way in order to not chase bottlenecks that have been solved by the community already. Matrix-matrix multiplication is one of the best optimized kernels in the HPC context, and the FullMatrix::mTmult() function will call into those optimized BLAS functions. If the user has provided a good BLAS library when configuring deal.II (like OpenBLAS or Intel's MKL), the computation of the cell matrix will execute close to the processor's peak arithmetic performance. As a side note, we mention that despite an optimized matrix-matrix multiplication, the current strategy is sub-optimal in terms of complexity as the work to be done is proportional to $(p+1)^9$ operations for degree $p$ (this also applies to the usual evaluation with FEValues). One could compute the cell matrix with $\mathcal O((p+1)^7)$ operations by utilizing the tensor product structure of the shape functions, as is done by the matrix-free framework in deal.II. We refer to step-37 and the documentation of the tensor-product-aware evaluators FEEvaluation for details on how an even more efficient cell matrix computation could be realized.

    +

    The reason for choosing this somewhat unusual scheme is due to the heavy work involved in computing the cell matrix for a relatively high polynomial degree in 3d. As we want to highlight the cost of the mapping in this tutorial program, we better do the assembly in an optimized way in order to not chase bottlenecks that have been solved by the community already. Matrix-matrix multiplication is one of the best optimized kernels in the HPC context, and the FullMatrix::mTmult() function will call into those optimized BLAS functions. If the user has provided a good BLAS library when configuring deal.II (like OpenBLAS or Intel's MKL), the computation of the cell matrix will execute close to the processor's peak arithmetic performance. As a side note, we mention that despite an optimized matrix-matrix multiplication, the current strategy is sub-optimal in terms of complexity as the work to be done is proportional to $(p+1)^9$ operations for degree $p$ (this also applies to the usual evaluation with FEValues). One could compute the cell matrix with $\mathcal O((p+1)^7)$ operations by utilizing the tensor product structure of the shape functions, as is done by the matrix-free framework in deal.II. We refer to step-37 and the documentation of the tensor-product-aware evaluators FEEvaluation for details on how an even more efficient cell matrix computation could be realized.

      template <int dim>
      void PoissonProblem<dim>::assemble_system(const Mapping<dim> &mapping)
      {
    /usr/share/doc/packages/dealii/doxygen/deal.II/step_66.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_66.html 2024-01-30 03:04:54.780900812 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_66.html 2024-01-30 03:04:54.780900812 +0000 @@ -151,44 +151,44 @@

    Introduction

    The aim of this tutorial program is to demonstrate how to solve a nonlinear problem using Newton's method within the matrix-free framework. This tutorial combines several techniques already introduced in step-15, step-16, step-37, step-48 and others.

    Problem formulation

    -

    On the unit circle $\Omega = \bigl\{ x \in \mathbb{R}^2 : \|x\| \leq 1 \bigr\}$ we consider the following nonlinear elliptic boundary value problem subject to a homogeneous Dirichlet boundary condition: Find a function $u\colon\Omega\to\mathbb{R}$ such that it holds:

    -\begin{align*}
+<p>On the unit circle <picture><source srcset=$\Omega = \bigl\{ x \in \mathbb{R}^2 : \|x\| \leq 1 \bigr\}$ we consider the following nonlinear elliptic boundary value problem subject to a homogeneous Dirichlet boundary condition: Find a function $u\colon\Omega\to\mathbb{R}$ such that it holds:

    +\begin{align*}
     - \Delta u &= \exp(u) & \quad & \text{in } \Omega,\\
              u &= 0       & \quad & \text{on } \partial\Omega.
-\end{align*} +\end{align*}" src="form_6313.png"/>

    This problem is also called the Gelfand problem and is a typical example for problems from combustion theory, see for example [bebernes1989mathematical].

    Discretization with finite elements

    -

    As usual, we first derive the weak formulation for this problem by multiplying with a smooth test function $v\colon\Omega\to\mathbb{R}$ respecting the boundary condition and integrating over the domain $\Omega$. Integration by parts and putting the term from the right hand side to the left yields the weak formulation: Find a function $u\colon\Omega\to\mathbb{R}$ such that for all test functions $v$ it holds:

    -\begin{align*}
+<p>As usual, we first derive the weak formulation for this problem by multiplying with a smooth test function <picture><source srcset=$v\colon\Omega\to\mathbb{R}$ respecting the boundary condition and integrating over the domain $\Omega$. Integration by parts and putting the term from the right hand side to the left yields the weak formulation: Find a function $u\colon\Omega\to\mathbb{R}$ such that for all test functions $v$ it holds:

    +\begin{align*}
  \int_\Omega \nabla v \cdot \nabla u \,\mathrm{d}x
  -
  \int_\Omega v \exp(u) \,\mathrm{d}x
  =
  0.
-\end{align*} +\end{align*}" src="form_6315.png"/>

    -

    Choosing the Lagrangian finite element space $V_h \dealcoloneq
+<p>Choosing the Lagrangian finite element space <picture><source srcset=$V_h \dealcoloneq
 \bigl\{ v \in C(\overline{\Omega}) : v|_Q \in \mathbb{Q}_p \text{ for all }
-Q \in \mathcal{T}_h \bigr\} \cap H_0^1(\Omega)$, which directly incorporates the homogeneous Dirichlet boundary condition, we can define a basis $\{\varphi_i\}_{i=1,\dots,N}$ and thus it suffices to test only with those basis functions. So the discrete problem reads as follows: Find $u_h\in V_h$ such that for all $i=1,\dots,N$ it holds:

    -\begin{align*}
+Q \in \mathcal{T}_h \bigr\} \cap H_0^1(\Omega)$, which directly incorporates the homogeneous Dirichlet boundary condition, we can define a basis $\{\varphi_i\}_{i=1,\dots,N}$ and thus it suffices to test only with those basis functions. So the discrete problem reads as follows: Find $u_h\in V_h$ such that for all $i=1,\dots,N$ it holds:

    +\begin{align*}
  F(u_h)
  \dealcoloneq
  \int_\Omega \nabla \varphi_i \cdot \nabla u_h \,\mathrm{d}x
  -
  \int_\Omega \varphi_i \exp(u_h) \,\mathrm{d}x \stackrel{!}{=} 0.
-\end{align*} +\end{align*}" src="form_6320.png"/>

    -

    As each finite element function is a linear combination of the basis functions $\{\varphi_i\}_{i=1,\dots,N}$, we can identify the finite element solution by a vector from $\mathbb{R}^N$ consisting of the unknown values in each degree of freedom (DOF). Thus, we define the nonlinear function $F\colon\mathbb{R}^N\to\mathbb{R}^N$ representing the discrete nonlinear problem.

    -

    To solve this nonlinear problem we use Newton's method. So given an initial guess $u_h^0\in V_h$, which already fulfills the Dirichlet boundary condition, we determine a sequence of Newton steps $\bigl( u_h^n \bigr)_n$ by successively applying the following scheme:

    -\begin{align*}
+<p> As each finite element function is a linear combination of the basis functions <picture><source srcset=$\{\varphi_i\}_{i=1,\dots,N}$, we can identify the finite element solution by a vector from $\mathbb{R}^N$ consisting of the unknown values in each degree of freedom (DOF). Thus, we define the nonlinear function $F\colon\mathbb{R}^N\to\mathbb{R}^N$ representing the discrete nonlinear problem.

    +

    To solve this nonlinear problem we use Newton's method. So given an initial guess $u_h^0\in V_h$, which already fulfills the Dirichlet boundary condition, we determine a sequence of Newton steps $\bigl( u_h^n \bigr)_n$ by successively applying the following scheme:

    +\begin{align*}
  &\text{Solve for } s_h^n\in V_h: \quad & F'(u_h^n)[s_h^n] &= -F(u_h^n),\\
  &\text{Update: }                       & u_h^{n+1} &= u_h^n + s_h^n.
-\end{align*} +\end{align*}" src="form_6325.png"/>

    -

    So in each Newton step we have to solve a linear problem $A\,x = b$, where the system matrix $A$ is represented by the Jacobian $F'(u_h^n)[\,\cdot\,]\colon\mathbb{R}^N\to\mathbb{R}^N$ and the right hand side $b$ by the negative residual $-F(u_h^n)$. The solution vector $x$ is in that case the Newton update of the $n$-th Newton step. Note, that we assume an initial guess $u_h^0$, which already fulfills the Dirichlet boundary conditions of the problem formulation (in fact this could also be an inhomogeneous Dirichlet boundary condition) and thus the Newton updates $s_h$ satisfy a homogeneous Dirichlet condition.

    -

    Until now we only tested with the basis functions, however, we can also represent any function of $V_h$ as linear combination of basis functions. More mathematically this means, that every element of $V_h$ can be identified with a vector $U\in\mathbb{R}^N$ via the representation formula: $u_h = \sum_{i=1}^N U_i \varphi_i$. So using this we can give an expression for the discrete Jacobian and the residual:

    -\begin{align*}
+<p> So in each Newton step we have to solve a linear problem <picture><source srcset=$A\,x = b$, where the system matrix $A$ is represented by the Jacobian $F'(u_h^n)[\,\cdot\,]\colon\mathbb{R}^N\to\mathbb{R}^N$ and the right hand side $b$ by the negative residual $-F(u_h^n)$. The solution vector $x$ is in that case the Newton update of the $n$-th Newton step. Note, that we assume an initial guess $u_h^0$, which already fulfills the Dirichlet boundary conditions of the problem formulation (in fact this could also be an inhomogeneous Dirichlet boundary condition) and thus the Newton updates $s_h$ satisfy a homogeneous Dirichlet condition.

    +

    Until now we only tested with the basis functions, however, we can also represent any function of $V_h$ as linear combination of basis functions. More mathematically this means, that every element of $V_h$ can be identified with a vector $U\in\mathbb{R}^N$ via the representation formula: $u_h = \sum_{i=1}^N U_i \varphi_i$. So using this we can give an expression for the discrete Jacobian and the residual:

    +\begin{align*}
  A_{ij} = \bigl( F'(u_h^n) \bigr)_{ij}
  &=
  \int_\Omega \nabla\varphi_i \cdot \nabla \varphi_j \,\mathrm{d} x
@@ -199,11 +199,11 @@
  \int_\Omega \nabla\varphi_i \cdot \nabla u_h^n \,\mathrm{d} x
  -
  \int_\Omega \varphi_i \, \exp( u_h^n ) \,\mathrm{d} x.
-\end{align*} +\end{align*}" src="form_6333.png"/>

    Compared to step-15 we could also have formed the Fréchet derivative of the nonlinear function corresponding to the strong formulation of the problem and discretized it afterwards. However, in the end we would get the same set of discrete equations.

    Numerical linear algebra

    -

    Note, how the system matrix, actually the Jacobian, depends on the previous Newton step $A = F'(u^n)$. Hence we need to tell the function that computes the system matrix about the solution at the last Newton step. In an implementation with a classical assemble_system() function we would gather this information from the last Newton step during assembly by the use of the member functions FEValuesBase::get_function_values() and FEValuesBase::get_function_gradients(). This is how step-15, for example, does things. The assemble_system() function would then look like:

    template <int dim>
    +

    Note, how the system matrix, actually the Jacobian, depends on the previous Newton step $A = F'(u^n)$. Hence we need to tell the function that computes the system matrix about the solution at the last Newton step. In an implementation with a classical assemble_system() function we would gather this information from the last Newton step during assembly by the use of the member functions FEValuesBase::get_function_values() and FEValuesBase::get_function_gradients(). This is how step-15, for example, does things. The assemble_system() function would then look like:

    template <int dim>
    void GelfandProblem<dim>::assemble_system()
    {
    system_matrix = 0;
    @@ -331,7 +331,7 @@

    Triangulation

    -

    As said in step-37, the matrix-free method gets more efficient if we choose a higher order finite element space. Since we want to solve the problem on the $d$-dimensional unit ball, it would be good to have an appropriate boundary approximation to overcome convergence issues. For this reason we use an isoparametric approach with the MappingQ class to recover the smooth boundary as well as the mapping for inner cells. In addition, to get a good triangulation in total we make use of the TransfiniteInterpolationManifold.

    +

    As said in step-37, the matrix-free method gets more efficient if we choose a higher order finite element space. Since we want to solve the problem on the $d$-dimensional unit ball, it would be good to have an appropriate boundary approximation to overcome convergence issues. For this reason we use an isoparametric approach with the MappingQ class to recover the smooth boundary as well as the mapping for inner cells. In addition, to get a good triangulation in total we make use of the TransfiniteInterpolationManifold.

    The commented program

    First we include the typical headers of the deal.II library needed for this tutorial:

      #href_anchor"line">  #include <deal.II/base/quadrature_lib.h>
    @@ -393,7 +393,7 @@

    Matrix-free JacobianOperator

    In the beginning we define the matrix-free operator for the Jacobian. As a guideline we follow the tutorials step-37 and step-48, where the precise interface of the MatrixFreeOperators::Base class was extensively documented.

    Since we want to use the Jacobian as system matrix and pass it to the linear solver as well as to the multilevel preconditioner classes, we derive the JacobianOperator class from the MatrixFreeOperators::Base class, such that we have already the right interface. The two functions we need to override from the base class are the MatrixFreeOperators::Base::apply_add() and the MatrixFreeOperators::Base::compute_diagonal() function. To allow preconditioning with float precision we define the number type as template argument.

    -

    As mentioned already in the introduction, we need to evaluate the Jacobian $F'$ at the last Newton step $u_h^n$ for the computation of the Newton update $s_h^n$. To get the information of the last Newton step $u_h^n$ we do pretty much the same as in step-37, where we stored the values of a coefficient function in a table nonlinear_values once before we use the matrix-free operator. Instead of a function evaluate_coefficient(), we here implement a function evaluate_newton_step().

    +

    As mentioned already in the introduction, we need to evaluate the Jacobian $F'$ at the last Newton step $u_h^n$ for the computation of the Newton update $s_h^n$. To get the information of the last Newton step $u_h^n$ we do pretty much the same as in step-37, where we stored the values of a coefficient function in a table nonlinear_values once before we use the matrix-free operator. Instead of a function evaluate_coefficient(), we here implement a function evaluate_newton_step().

    As additional private member functions of the JacobianOperator we implement the local_apply() and the local_compute_diagonal() function. The first one is the actual worker function for the matrix-vector application, which we pass to the MatrixFree::cell_loop() in the apply_add() function. The later one is the worker function to compute the diagonal, which we pass to the MatrixFreeTools::compute_diagonal() function.

    For better readability of the source code we further define an alias for the FEEvaluation object.

      template <int dim, int fe_degree, typename number>
    @@ -837,7 +837,7 @@
    TasksParallelScheme tasks_parallel_scheme

    GelfandProblem::evaluate_residual

    -

    Next we implement a function which evaluates the nonlinear discrete residual for a given input vector ( $\texttt{dst} = F(\texttt{src})$). This function is then used for the assembly of the right hand side of the linearized system and later for the computation of the residual of the next Newton step to check if we already reached the error tolerance. As this function should not affect any class variable we define it as a constant function. Internally we exploit the fast finite element evaluation through the FEEvaluation class and the MatrixFree::cell_loop(), similar to apply_add() function of the JacobianOperator.

    +

    Next we implement a function which evaluates the nonlinear discrete residual for a given input vector ( $\texttt{dst} = F(\texttt{src})$). This function is then used for the assembly of the right hand side of the linearized system and later for the computation of the residual of the next Newton step to check if we already reached the error tolerance. As this function should not affect any class variable we define it as a constant function. Internally we exploit the fast finite element evaluation through the FEEvaluation class and the MatrixFree::cell_loop(), similar to apply_add() function of the JacobianOperator.

    First we create a pointer to the MatrixFree object, which is stored in the system_matrix. Then we pass the worker function local_evaluate_residual() for the cell wise evaluation of the residual together with the input and output vector to the MatrixFree::cell_loop(). In addition, we enable the zero out of the output vector in the loop, which is more efficient than calling dst = 0.0 separately before.

    Note that with this approach we do not have to take care about the MPI related data exchange, since all the bookkeeping is done by the MatrixFree::cell_loop().

      template <int dim, int fe_degree>
    @@ -901,7 +901,7 @@
     
     

    GelfandProblem::compute_residual

    -

    According to step-15 the following function computes the norm of the nonlinear residual for the solution $u_h^n + \alpha s_h^n$ with the help of the evaluate_residual() function. The Newton step length $\alpha$ becomes important if we would use an adaptive version of the Newton method. Then for example we would compute the residual for different step lengths and compare the residuals. However, for our problem the full Newton step with $\alpha=1$ is the best we can do. An adaptive version of Newton's method becomes interesting if we have no good initial value. Note that in theory Newton's method converges with quadratic order, but only if we have an appropriate initial value. For unsuitable initial values the Newton method diverges even with quadratic order. A common way is then to use a damped version $\alpha<1$ until the Newton step is good enough and the full Newton step can be performed. This was also discussed in step-15.

    +

    According to step-15 the following function computes the norm of the nonlinear residual for the solution $u_h^n + \alpha s_h^n$ with the help of the evaluate_residual() function. The Newton step length $\alpha$ becomes important if we would use an adaptive version of the Newton method. Then for example we would compute the residual for different step lengths and compare the residuals. However, for our problem the full Newton step with $\alpha=1$ is the best we can do. An adaptive version of Newton's method becomes interesting if we have no good initial value. Note that in theory Newton's method converges with quadratic order, but only if we have an appropriate initial value. For unsuitable initial values the Newton method diverges even with quadratic order. A common way is then to use a damped version $\alpha<1$ until the Newton step is good enough and the full Newton step can be performed. This was also discussed in step-15.

      template <int dim, int fe_degree>
      double GelfandProblem<dim, fe_degree>::compute_residual(const double alpha)
      {
    @@ -1044,9 +1044,9 @@
      TimerOutput::Scope t(computing_timer, "solve");
     
     
    -

    We define a maximal number of Newton steps and tolerances for the convergence criterion. Usually, with good starting values, the Newton method converges in three to six steps, so maximal ten steps should be totally sufficient. As tolerances we use $\|F(u^n_h)\|<\text{TOL}_f =
-   10^{-12}$ for the norm of the residual and $\|s_h^n\| < \text{TOL}_x =
-   10^{-10}$ for the norm of the Newton update. This seems a bit over the top, but we will see that, for our example, we will achieve these tolerances after a few steps.

    +

    We define a maximal number of Newton steps and tolerances for the convergence criterion. Usually, with good starting values, the Newton method converges in three to six steps, so maximal ten steps should be totally sufficient. As tolerances we use $\|F(u^n_h)\|<\text{TOL}_f =
+   10^{-12}$ for the norm of the residual and $\|s_h^n\| < \text{TOL}_x =
+   10^{-10}$ for the norm of the Newton update. This seems a bit over the top, but we will see that, for our example, we will achieve these tolerances after a few steps.

      const unsigned int itmax = 10;
      const double TOLf = 1e-12;
      const double TOLx = 1e-10;
    @@ -1066,7 +1066,7 @@
      compute_update();
     
     
    -

    Then we compute the errors, namely the norm of the Newton update and the residual. Note that at this point one could incorporate a step size control for the Newton method by varying the input parameter $\alpha$ for the compute_residual function. However, here we just use $\alpha$ equal to one for a plain Newton iteration.

    +

    Then we compute the errors, namely the norm of the Newton update and the residual. Note that at this point one could incorporate a step size control for the Newton method by varying the input parameter $\alpha$ for the compute_residual function. However, here we just use $\alpha$ equal to one for a plain Newton iteration.

      const double ERRx = newton_update.l2_norm();
      const double ERRf = compute_residual(1.0);
     
    @@ -1451,17 +1451,17 @@
    +---------------------------------+-----------+------------+------------+

    We show the solution for the two- and three-dimensional problem in the following figure.

    Solution of the two-dimensional Gelfand problem.
    Solution of the three-dimensional Gelfand problem.

    Newton solver

    -

    In the program output above we find some interesting information about the Newton iterations. The terminal output in each refinement cycle presents detailed diagnostics of the Newton method, which show first of all the number of Newton steps and for each step the norm of the residual $\|F(u_h^{n+1})\|$, the norm of the Newton update $\|s_h^n\|$, and the number of CG iterations it.

    -

    We observe that for all cases the Newton method converges in approximately three to four steps, which shows the quadratic convergence of the Newton method with a full step length $\alpha = 1$. However, be aware that for a badly chosen initial guess $u_h^0$, the Newton method will also diverge quadratically. Usually if you do not have an appropriate initial guess, you try a few damped Newton steps with a reduced step length $\alpha < 1$ until the Newton step is again in the quadratic convergence domain. This damping and relaxation of the Newton step length truly requires a more sophisticated implementation of the Newton method, which we designate to you as a possible extension of the tutorial.

    +

    In the program output above we find some interesting information about the Newton iterations. The terminal output in each refinement cycle presents detailed diagnostics of the Newton method, which show first of all the number of Newton steps and for each step the norm of the residual $\|F(u_h^{n+1})\|$, the norm of the Newton update $\|s_h^n\|$, and the number of CG iterations it.

    +

    We observe that for all cases the Newton method converges in approximately three to four steps, which shows the quadratic convergence of the Newton method with a full step length $\alpha = 1$. However, be aware that for a badly chosen initial guess $u_h^0$, the Newton method will also diverge quadratically. Usually if you do not have an appropriate initial guess, you try a few damped Newton steps with a reduced step length $\alpha < 1$ until the Newton step is again in the quadratic convergence domain. This damping and relaxation of the Newton step length truly requires a more sophisticated implementation of the Newton method, which we designate to you as a possible extension of the tutorial.

    Furthermore, we see that the number of CG iterations is approximately constant with successive mesh refinements and an increasing number of DoFs. This is of course due to the geometric multigrid preconditioner and similar to the observations made in other tutorials that use this method, e.g., step-16 and step-37. Just to give an example, in the three-dimensional case after five refinements, we have approximately 14.7 million distributed DoFs with fourth-order Lagrangian finite elements, but the number of CG iterations is still less than ten.

    In addition, there is one more very useful optimization that we applied and that should be mentioned here. In the compute_update() function we explicitly reset the vector holding the Newton update before passing it as the output vector to the solver. In that case we use a starting value of zero for the CG method, which is more suitable than the previous Newton update, the actual content of the newton_update before resetting, and thus reduces the number of CG iterations by a few steps.

    Possibilities for extensions

    A couple of possible extensions are available concerning minor updates to the present code as well as a deeper numerical investigation of the Gelfand problem.

    More sophisticated Newton iteration

    Beside a step size controlled version of the Newton iteration as mentioned already in step-15 (and actually implemented, with many more bells and whistles, in step-77), one could also implement a more flexible stopping criterion for the Newton iteration. For example one could replace the fixed tolerances for the residual TOLf and for the Newton updated TOLx and implement a mixed error control with a given absolute and relative tolerance, such that the Newton iteration exits with success as, e.g.,

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   \|F(u_h^{n+1})\| \leq \texttt{RelTol} \|u_h^{n+1}\| + \texttt{AbsTol}.
-\end{align*} +\end{align*}" src="form_6344.png"/>

    For more advanced applications with many nonlinear systems to solve, for example at each time step for a time-dependent problem, it turns out that it is not necessary to set up and assemble the Jacobian anew at every single Newton step or even for each time step. Instead, the existing Jacobian from a previous step can be used for the Newton iteration. The Jacobian is then only rebuilt if, for example, the Newton iteration converges too slowly. Such an idea yields a quasi-Newton method. Admittedly, when using the matrix-free framework, the assembly of the Jacobian is omitted anyway, but with in this way one can try to optimize the reassembly of the geometric multigrid preconditioner. Remember that each time the solution from the old Newton step must be distributed to all levels and the mutligrid preconditioner must be reinitialized.

    Parallel scalability and thread parallelism

    @@ -1469,9 +1469,9 @@

    Comparison to matrix-based methods

    Analogously to step-50 and the mentioned possible extension of step-75, you can convince yourself which method is faster.

    Eigenvalue problem

    -

    One can consider the corresponding eigenvalue problem, which is called Bratu problem. For example, if we define a fixed eigenvalue $\lambda\in[0,6]$, we can compute the corresponding discrete eigenfunction. You will notice that the number of Newton steps will increase with increasing $\lambda$. To reduce the number of Newton steps you can use the following trick: start from a certain $\lambda$, compute the eigenfunction, increase $\lambda=\lambda +
-\delta_\lambda$, and then use the previous solution as an initial guess for the Newton iteration – this approach is called a "continuation -method". In the end you can plot the $H^1(\Omega)$-norm over the eigenvalue $\lambda \mapsto \|u_h\|_{H^1(\Omega)}$. What do you observe for further increasing $\lambda>7$?

    +

    One can consider the corresponding eigenvalue problem, which is called Bratu problem. For example, if we define a fixed eigenvalue $\lambda\in[0,6]$, we can compute the corresponding discrete eigenfunction. You will notice that the number of Newton steps will increase with increasing $\lambda$. To reduce the number of Newton steps you can use the following trick: start from a certain $\lambda$, compute the eigenfunction, increase $\lambda=\lambda +
+\delta_\lambda$, and then use the previous solution as an initial guess for the Newton iteration – this approach is called a "continuation +method". In the end you can plot the $H^1(\Omega)$-norm over the eigenvalue $\lambda \mapsto \|u_h\|_{H^1(\Omega)}$. What do you observe for further increasing $\lambda>7$?

    The plain program

    /* ---------------------------------------------------------------------
    *
    /usr/share/doc/packages/dealii/doxygen/deal.II/step_67.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_67.html 2024-01-30 03:04:54.904901844 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_67.html 2024-01-30 03:04:54.904901844 +0000 @@ -144,15 +144,15 @@

    This tutorial program solves the Euler equations of fluid dynamics using an explicit time integrator with the matrix-free framework applied to a high-order discontinuous Galerkin discretization in space. For details about the Euler system and an alternative implicit approach, we also refer to the step-33 tutorial program. You might also want to look at step-69 for an alternative approach to solving these equations.

    The Euler equations

    The Euler equations are a conservation law, describing the motion of a compressible inviscid gas,

    -\[
+<picture><source srcset=\[
 \frac{\partial \mathbf{w}}{\partial t} + \nabla \cdot \mathbf{F}(\mathbf{w}) =
 \mathbf{G}(\mathbf w),
-\] +\]" src="form_6349.png"/>

    -

    where the $d+2$ components of the solution vector are $\mathbf{w}=(\rho, \rho
-u_1,\ldots,\rho u_d,E)^{\mathrm T}$. Here, $\rho$ denotes the fluid density, ${\mathbf u}=(u_1,\ldots, u_d)^\mathrm T$ the fluid velocity, and $E$ the energy density of the gas. The velocity is not directly solved for, but rather the variable $\rho \mathbf{u}$, the linear momentum (since this is the conserved quantity).

    -

    The Euler flux function, a $(d+2)\times d$ matrix, is defined as

    -\[
+<p> where the <picture><source srcset=$d+2$ components of the solution vector are $\mathbf{w}=(\rho, \rho
+u_1,\ldots,\rho u_d,E)^{\mathrm T}$. Here, $\rho$ denotes the fluid density, ${\mathbf u}=(u_1,\ldots, u_d)^\mathrm T$ the fluid velocity, and $E$ the energy density of the gas. The velocity is not directly solved for, but rather the variable $\rho \mathbf{u}$, the linear momentum (since this is the conserved quantity).

    +

    The Euler flux function, a $(d+2)\times d$ matrix, is defined as

    +\[
   \mathbf F(\mathbf w)
   =
   \begin{pmatrix}
@@ -160,10 +160,10 @@
   \rho \mathbf{u} \otimes \mathbf{u} + \mathbb{I}p\\
   (E+p)\mathbf{u}
   \end{pmatrix}
-\] +\]" src="form_6355.png"/>

    -

    with $\mathbb{I}$ the $d\times d$ identity matrix and $\otimes$ the outer product; its components denote the mass, momentum, and energy fluxes, respectively. The right hand side forcing is given by

    -\[
+<p> with <picture><source srcset=$\mathbb{I}$ the $d\times d$ identity matrix and $\otimes$ the outer product; its components denote the mass, momentum, and energy fluxes, respectively. The right hand side forcing is given by

    +\[
   \mathbf G(\mathbf w)
   =
   \begin{pmatrix}
@@ -171,53 +171,53 @@
   \rho\mathbf{g}\\
   \rho \mathbf{u} \cdot \mathbf{g}
   \end{pmatrix},
-\] +\]" src="form_6357.png"/>

    -

    where the vector $\mathbf g$ denotes the direction and magnitude of gravity. It could, however, also denote any other external force per unit mass that is acting on the fluid. (Think, for example, of the electrostatic forces exerted by an external electric field on charged particles.)

    -

    The three blocks of equations, the second involving $d$ components, describe the conservation of mass, momentum, and energy. The pressure is not a solution variable but needs to be expressed through a "closure relationship" by the other variables; we here choose the relationship appropriate for a gas with molecules composed of two atoms, which at moderate temperatures is given by $p=(\gamma - 1) \left(E-\frac 12 \rho
-\mathbf{u}\cdot \mathbf{u}\right)$ with the constant $\gamma = 1.4$.

    +

    where the vector $\mathbf g$ denotes the direction and magnitude of gravity. It could, however, also denote any other external force per unit mass that is acting on the fluid. (Think, for example, of the electrostatic forces exerted by an external electric field on charged particles.)

    +

    The three blocks of equations, the second involving $d$ components, describe the conservation of mass, momentum, and energy. The pressure is not a solution variable but needs to be expressed through a "closure relationship" by the other variables; we here choose the relationship appropriate for a gas with molecules composed of two atoms, which at moderate temperatures is given by $p=(\gamma - 1) \left(E-\frac 12 \rho
+\mathbf{u}\cdot \mathbf{u}\right)$ with the constant $\gamma = 1.4$.

    High-order discontinuous Galerkin discretization

    For spatial discretization, we use a high-order discontinuous Galerkin (DG) discretization, using a solution expansion of the form

    -\[
+<picture><source srcset=\[
 \mathbf{w}_h(\mathbf{x}, t) =
 \sum_{j=1}^{n_\mathbf{dofs}} \boldsymbol{\varphi}_j(\mathbf{x}) {w}_j(t).
-\] +\]" src="form_6361.png"/>

    -

    Here, $\boldsymbol{\varphi}_j$ denotes the $j$th basis function, written in vector form with separate shape functions for the different components and letting $w_j(t)$ go through the density, momentum, and energy variables, respectively. In this form, the space dependence is contained in the shape functions and the time dependence in the unknown coefficients $w_j$. As opposed to the continuous finite element method where some shape functions span across element boundaries, the shape functions are local to a single element in DG methods, with a discontinuity from one element to the next. The connection of the solution from one cell to its neighbors is instead imposed by the numerical fluxes specified below. This allows for some additional flexibility, for example to introduce directionality in the numerical method by, e.g., upwinding.

    +

    Here, $\boldsymbol{\varphi}_j$ denotes the $j$th basis function, written in vector form with separate shape functions for the different components and letting $w_j(t)$ go through the density, momentum, and energy variables, respectively. In this form, the space dependence is contained in the shape functions and the time dependence in the unknown coefficients $w_j$. As opposed to the continuous finite element method where some shape functions span across element boundaries, the shape functions are local to a single element in DG methods, with a discontinuity from one element to the next. The connection of the solution from one cell to its neighbors is instead imposed by the numerical fluxes specified below. This allows for some additional flexibility, for example to introduce directionality in the numerical method by, e.g., upwinding.

    DG methods are popular methods for solving problems of transport character because they combine low dispersion errors with controllable dissipation on barely resolved scales. This makes them particularly attractive for simulation in the field of fluid dynamics where a wide range of active scales needs to be represented and inadequately resolved features are prone to disturb the important well-resolved features. Furthermore, high-order DG methods are well-suited for modern hardware with the right implementation. At the same time, DG methods are no silver bullet. In particular when the solution develops discontinuities (shocks), as is typical for the Euler equations in some flow regimes, high-order DG methods tend to oscillatory solutions, like all high-order methods when not using flux- or slope-limiters. This is a consequence of Godunov's theorem that states that any total variation limited (TVD) scheme that is linear (like a basic DG discretization) can at most be first-order accurate. Put differently, since DG methods aim for higher order accuracy, they cannot be TVD on solutions that develop shocks. Even though some communities claim that the numerical flux in DG methods can control dissipation, this is of limited value unless all shocks in a problem align with cell boundaries. Any shock that passes through the interior of cells will again produce oscillatory components due to the high-order polynomials. In the finite element and DG communities, there exist a number of different approaches to deal with shocks, for example the introduction of artificial diffusion on troubled cells (using a troubled-cell indicator based e.g. on a modal decomposition of the solution), a switch to dissipative low-order finite volume methods on a subgrid, or the addition of some limiting procedures. Given the ample possibilities in this context, combined with the considerable implementation effort, we here refrain from the regime of the Euler equations with pronounced shocks, and rather concentrate on the regime of subsonic flows with wave-like phenomena. For a method that works well with shocks (but is more expensive per unknown), we refer to the step-69 tutorial program.

    For the derivation of the DG formulation, we multiply the Euler equations with test functions $\mathbf{v}$ and integrate over an individual cell $K$, which gives

    -\[
+<picture><source srcset=\[
 \left(\mathbf{v}, \frac{\partial \mathbf{w}}{\partial t}\right)_{K}
 + \left(\mathbf{v}, \nabla \cdot \mathbf{F}(\mathbf{w})\right)_{K} =
 \left(\mathbf{v},\mathbf{G}(\mathbf w)\right)_{K}.
-\] +\]" src="form_6365.png"/>

    We then integrate the second term by parts, moving the divergence from the solution slot to the test function slot, and producing an integral over the element boundary:

    -\[
+<picture><source srcset=\[
 \left(\mathbf{v}, \frac{\partial \mathbf{w}}{\partial t}\right)_{K}
 - \left(\nabla \mathbf{v}, \mathbf{F}(\mathbf{w})\right)_{K}
 + \left<\mathbf{v}, \mathbf{n} \cdot \widehat{\mathbf{F}}(\mathbf{w})
 \right>_{\partial K} =
 \left(\mathbf{v},\mathbf{G}(\mathbf w)\right)_{K}.
-\] +\]" src="form_6366.png"/>

    -

    In the surface integral, we have replaced the term $\mathbf{F}(\mathbf w)$ by the term $\widehat{\mathbf{F}}(\mathbf w)$, the numerical flux. The role of the numerical flux is to connect the solution on neighboring elements and weakly impose continuity of the solution. This ensures that the global coupling of the PDE is reflected in the discretization, despite independent basis functions on the cells. The connectivity to the neighbor is included by defining the numerical flux as a function $\widehat{\mathbf{F}}(\mathbf w^-,
-\mathbf w^+)$ of the solution from both sides of an interior face, $\mathbf
-w^-$ and $\mathbf w^+$. A basic property we require is that the numerical flux needs to be conservative. That is, we want all information (i.e., mass, momentum, and energy) that leaves a cell over a face to enter the neighboring cell in its entirety and vice versa. This can be expressed as $\widehat{\mathbf{F}}(\mathbf w^-, \mathbf w^+) =
-\widehat{\mathbf{F}}(\mathbf w^+, \mathbf w^-)$, meaning that the numerical flux evaluates to the same result from either side. Combined with the fact that the numerical flux is multiplied by the unit outer normal vector on the face under consideration, which points in opposite direction from the two sides, we see that the conservation is fulfilled. An alternative point of view of the numerical flux is as a single-valued intermediate state that links the solution weakly from both sides.

    -

    There is a large number of numerical flux functions available, also called Riemann solvers. For the Euler equations, there exist so-called exact Riemann solvers – meaning that the states from both sides are combined in a way that is consistent with the Euler equations along a discontinuity – and approximate Riemann solvers, which violate some physical properties and rely on other mechanisms to render the scheme accurate overall. Approximate Riemann solvers have the advantage of being cheaper to compute. Most flux functions have their origin in the finite volume community, which are similar to DG methods with polynomial degree 0 within the cells (called volumes). As the volume integral of the Euler operator $\mathbf{F}$ would disappear for constant solution and test functions, the numerical flux must fully represent the physical operator, explaining why there has been a large body of research in that community. For DG methods, consistency is guaranteed by higher order polynomials within the cells, making the numerical flux less of an issue and usually affecting only the convergence rate, e.g., whether the solution converges as $\mathcal O(h^p)$, $\mathcal O(h^{p+1/2})$ or $\mathcal
-O(h^{p+1})$ in the $L_2$ norm for polynomials of degree $p$. The numerical flux can thus be seen as a mechanism to select more advantageous dissipation/dispersion properties or regarding the extremal eigenvalue of the discretized and linearized operator, which affect the maximal admissible time step size in explicit time integrators.

    +

    In the surface integral, we have replaced the term $\mathbf{F}(\mathbf w)$ by the term $\widehat{\mathbf{F}}(\mathbf w)$, the numerical flux. The role of the numerical flux is to connect the solution on neighboring elements and weakly impose continuity of the solution. This ensures that the global coupling of the PDE is reflected in the discretization, despite independent basis functions on the cells. The connectivity to the neighbor is included by defining the numerical flux as a function $\widehat{\mathbf{F}}(\mathbf w^-,
+\mathbf w^+)$ of the solution from both sides of an interior face, $\mathbf
+w^-$ and $\mathbf w^+$. A basic property we require is that the numerical flux needs to be conservative. That is, we want all information (i.e., mass, momentum, and energy) that leaves a cell over a face to enter the neighboring cell in its entirety and vice versa. This can be expressed as $\widehat{\mathbf{F}}(\mathbf w^-, \mathbf w^+) =
+\widehat{\mathbf{F}}(\mathbf w^+, \mathbf w^-)$, meaning that the numerical flux evaluates to the same result from either side. Combined with the fact that the numerical flux is multiplied by the unit outer normal vector on the face under consideration, which points in opposite direction from the two sides, we see that the conservation is fulfilled. An alternative point of view of the numerical flux is as a single-valued intermediate state that links the solution weakly from both sides.

    +

    There is a large number of numerical flux functions available, also called Riemann solvers. For the Euler equations, there exist so-called exact Riemann solvers – meaning that the states from both sides are combined in a way that is consistent with the Euler equations along a discontinuity – and approximate Riemann solvers, which violate some physical properties and rely on other mechanisms to render the scheme accurate overall. Approximate Riemann solvers have the advantage of being cheaper to compute. Most flux functions have their origin in the finite volume community, which are similar to DG methods with polynomial degree 0 within the cells (called volumes). As the volume integral of the Euler operator $\mathbf{F}$ would disappear for constant solution and test functions, the numerical flux must fully represent the physical operator, explaining why there has been a large body of research in that community. For DG methods, consistency is guaranteed by higher order polynomials within the cells, making the numerical flux less of an issue and usually affecting only the convergence rate, e.g., whether the solution converges as $\mathcal O(h^p)$, $\mathcal O(h^{p+1/2})$ or $\mathcal
+O(h^{p+1})$ in the $L_2$ norm for polynomials of degree $p$. The numerical flux can thus be seen as a mechanism to select more advantageous dissipation/dispersion properties or regarding the extremal eigenvalue of the discretized and linearized operator, which affect the maximal admissible time step size in explicit time integrators.

    In this tutorial program, we implement two variants of fluxes that can be controlled via a switch in the program (of course, it would be easy to make them a run time parameter controlled via an input file). The first flux is the local Lax–Friedrichs flux

    -\[
+<picture><source srcset=\[
 \hat{\mathbf{F}}(\mathbf{w}^-,\mathbf{w}^+) =
 \frac{\mathbf{F}(\mathbf{w}^-)+\mathbf{F}(\mathbf{w}^+)}{2} +
    \frac{\lambda}{2}\left[\mathbf{w}^--\mathbf{w}^+\right]\otimes
    \mathbf{n^-}.
-\] +\]" src="form_6375.png"/>

    -

    In the original definition of the Lax–Friedrichs flux, a factor $\lambda =
-\max\left(\|\mathbf{u}^-\|+c^-, \|\mathbf{u}^+\|+c^+\right)$ is used (corresponding to the maximal speed at which information is moving on the two sides of the interface), stating that the difference between the two states, $[\![\mathbf{w}]\!]$ is penalized by the largest eigenvalue in the Euler flux, which is $\|\mathbf{u}\|+c$, where $c=\sqrt{\gamma p / \rho}$ is the speed of sound. In the implementation below, we modify the penalty term somewhat, given that the penalty is of approximate nature anyway. We use

    -\begin{align*}
+<p>In the original definition of the Lax–Friedrichs flux, a factor <picture><source srcset=$\lambda =
+\max\left(\|\mathbf{u}^-\|+c^-, \|\mathbf{u}^+\|+c^+\right)$ is used (corresponding to the maximal speed at which information is moving on the two sides of the interface), stating that the difference between the two states, $[\![\mathbf{w}]\!]$ is penalized by the largest eigenvalue in the Euler flux, which is $\|\mathbf{u}\|+c$, where $c=\sqrt{\gamma p / \rho}$ is the speed of sound. In the implementation below, we modify the penalty term somewhat, given that the penalty is of approximate nature anyway. We use

    +\begin{align*}
 \lambda
 &=
 \frac{1}{2}\max\left(\sqrt{\|\mathbf{u^-}\|^2+(c^-)^2},
@@ -226,52 +226,52 @@
 &=
 \frac{1}{2}\sqrt{\max\left(\|\mathbf{u^-}\|^2+(c^-)^2,
                            \|\mathbf{u}^+\|^2+(c^+)^2\right)}.
-\end{align*} +\end{align*}" src="form_6380.png"/>

    -

    The additional factor $\frac 12$ reduces the penalty strength (which results in a reduced negative real part of the eigenvalues, and thus increases the admissible time step size). Using the squares within the sums allows us to reduce the number of expensive square root operations, which is 4 for the original Lax–Friedrichs definition, to a single one. This simplification leads to at most a factor of 2 in the reduction of the parameter $\lambda$, since $\|\mathbf{u}\|^2+c^2 \leq
+<p> The additional factor <picture><source srcset=$\frac 12$ reduces the penalty strength (which results in a reduced negative real part of the eigenvalues, and thus increases the admissible time step size). Using the squares within the sums allows us to reduce the number of expensive square root operations, which is 4 for the original Lax–Friedrichs definition, to a single one. This simplification leads to at most a factor of 2 in the reduction of the parameter $\lambda$, since $\|\mathbf{u}\|^2+c^2 \leq
 \|\mathbf{u}\|^2+2 c |\mathbf{u}\| + c^2 = \left(\|\mathbf{u}\|+c\right)^2
-\leq 2 \left(\|\mathbf{u}\|^2+c^2\right)$, with the last inequality following from Young's inequality.

    -

    The second numerical flux is one proposed by Harten, Lax and van Leer, called the HLL flux. It takes the different directions of propagation of the Euler equations into account, depending on the speed of sound. It utilizes some intermediate states $\bar{\mathbf{u}}$ and $\bar{c}$ to define the two branches $s^\mathrm{p} = \max\left(0, \bar{\mathbf{u}}\cdot \mathbf{n} +
-\bar{c}\right)$ and $s^\mathrm{n} = \min\left(0, \bar{\mathbf{u}}\cdot
-\mathbf{n} - \bar{c}\right)$. From these branches, one then defines the flux

    -\[
+\leq 2 \left(\|\mathbf{u}\|^2+c^2\right)$, with the last inequality following from Young's inequality.

    +

    The second numerical flux is one proposed by Harten, Lax and van Leer, called the HLL flux. It takes the different directions of propagation of the Euler equations into account, depending on the speed of sound. It utilizes some intermediate states $\bar{\mathbf{u}}$ and $\bar{c}$ to define the two branches $s^\mathrm{p} = \max\left(0, \bar{\mathbf{u}}\cdot \mathbf{n} +
+\bar{c}\right)$ and $s^\mathrm{n} = \min\left(0, \bar{\mathbf{u}}\cdot
+\mathbf{n} - \bar{c}\right)$. From these branches, one then defines the flux

    +\[
 \hat{\mathbf{F}}(\mathbf{w}^-,\mathbf{w}^+) =
 \frac{s^\mathrm{p} \mathbf{F}(\mathbf{w}^-)-s^\mathrm{n} \mathbf{F}(\mathbf{w}^+)}
                    {s^\mathrm p - s^\mathrm{n} } +
 \frac{s^\mathrm{p} s^\mathrm{n}}{s^\mathrm{p}-s^\mathrm{n}}
 \left[\mathbf{w}^--\mathbf{w}^+\right]\otimes \mathbf{n^-}.
-\] +\]" src="form_6386.png"/>

    -

    Regarding the definition of the intermediate state $\bar{\mathbf{u}}$ and $\bar{c}$, several variants have been proposed. The variant originally proposed uses a density-averaged definition of the velocity, $\bar{\mathbf{u}}
+<p> Regarding the definition of the intermediate state <picture><source srcset=$\bar{\mathbf{u}}$ and $\bar{c}$, several variants have been proposed. The variant originally proposed uses a density-averaged definition of the velocity, $\bar{\mathbf{u}}
 = \frac{\sqrt{\rho^-} \mathbf{u}^- + \sqrt{\rho^+}\mathbf{u}^+}{\sqrt{\rho^-}
-+ \sqrt{\rho^+}}$. Since we consider the Euler equations without shocks, we simply use arithmetic means, $\bar{\mathbf{u}} = \frac{\mathbf{u}^- +
-\mathbf{u}^+}{2}$ and $\bar{c} = \frac{c^- + c^+}{2}$, with $c^{\pm} =
-\sqrt{\gamma p^{\pm} / \rho^{\pm}}$, in this tutorial program, and leave other variants to a possible extension. We also note that the HLL flux has been extended in the literature to the so-called HLLC flux, where C stands for the ability to represent contact discontinuities.

    -

    At the boundaries with no neighboring state $\mathbf{w}^+$ available, it is common practice to deduce suitable exterior values from the boundary conditions (see the general literature on DG methods for details). In this tutorial program, we consider three types of boundary conditions, namely inflow boundary conditions where all components are prescribed,

    -\[
++ \sqrt{\rho^+}}$. Since we consider the Euler equations without shocks, we simply use arithmetic means, $\bar{\mathbf{u}} = \frac{\mathbf{u}^- +
+\mathbf{u}^+}{2}$ and $\bar{c} = \frac{c^- + c^+}{2}$, with $c^{\pm} =
+\sqrt{\gamma p^{\pm} / \rho^{\pm}}$, in this tutorial program, and leave other variants to a possible extension. We also note that the HLL flux has been extended in the literature to the so-called HLLC flux, where C stands for the ability to represent contact discontinuities.

    +

    At the boundaries with no neighboring state $\mathbf{w}^+$ available, it is common practice to deduce suitable exterior values from the boundary conditions (see the general literature on DG methods for details). In this tutorial program, we consider three types of boundary conditions, namely inflow boundary conditions where all components are prescribed,

    +\[
 \mathbf{w}^+ = \begin{pmatrix} \rho_\mathrm{D}(t)\\
 (\rho \mathbf u)_{\mathrm D}(t) \\ E_\mathrm{D}(t)\end{pmatrix} \quad
  \text{(Dirichlet)},
-\] +\]" src="form_6392.png"/>

    subsonic outflow boundaries, where we do not prescribe exterior solutions as the flow field is leaving the domain and use the interior values instead; we still need to prescribe the energy as there is one incoming characteristic left in the Euler flux,

    -\[
+<picture><source srcset=\[
 \mathbf{w}^+ = \begin{pmatrix} \rho^-\\
 (\rho \mathbf u)^- \\ E_\mathrm{D}(t)\end{pmatrix} \quad
  \text{(mixed Neumann/Dirichlet)},
-\] +\]" src="form_6393.png"/>

    and wall boundary condition which describe a no-penetration configuration:

    -\[
+<picture><source srcset=\[
 \mathbf{w}^+ = \begin{pmatrix} \rho^-\\
 (\rho \mathbf u)^- - 2 [(\rho \mathbf u)^-\cdot \mathbf n] \mathbf{n}
  \\ E^-\end{pmatrix}.
-\] +\]" src="form_6394.png"/>

    -

    The polynomial expansion of the solution is finally inserted to the weak form and test functions are replaced by the basis functions. This gives a discrete in space, continuous in time nonlinear system with a finite number of unknown coefficient values $w_j$, $j=1,\ldots,n_\text{dofs}$. Regarding the choice of the polynomial degree in the DG method, there is no consensus in literature as of 2019 as to what polynomial degrees are most efficient and the decision is problem-dependent. Higher order polynomials ensure better convergence rates and are thus superior for moderate to high accuracy requirements for smooth solutions. At the same time, the volume-to-surface ratio of where degrees of freedom are located, increases with higher degrees, and this makes the effect of the numerical flux weaker, typically reducing dissipation. However, in most of the cases the solution is not smooth, at least not compared to the resolution that can be afforded. This is true for example in incompressible fluid dynamics, compressible fluid dynamics, and the related topic of wave propagation. In this pre-asymptotic regime, the error is approximately proportional to the numerical resolution, and other factors such as dispersion errors or the dissipative behavior become more important. Very high order methods are often ruled out because they come with more restrictive CFL conditions measured against the number of unknowns, and they are also not as flexible when it comes to representing complex geometries. Therefore, polynomial degrees between two and six are most popular in practice, see e.g. the efficiency evaluation in [FehnWallKronbichler2019] and references cited therein.

    +

    The polynomial expansion of the solution is finally inserted to the weak form and test functions are replaced by the basis functions. This gives a discrete in space, continuous in time nonlinear system with a finite number of unknown coefficient values $w_j$, $j=1,\ldots,n_\text{dofs}$. Regarding the choice of the polynomial degree in the DG method, there is no consensus in literature as of 2019 as to what polynomial degrees are most efficient and the decision is problem-dependent. Higher order polynomials ensure better convergence rates and are thus superior for moderate to high accuracy requirements for smooth solutions. At the same time, the volume-to-surface ratio of where degrees of freedom are located, increases with higher degrees, and this makes the effect of the numerical flux weaker, typically reducing dissipation. However, in most of the cases the solution is not smooth, at least not compared to the resolution that can be afforded. This is true for example in incompressible fluid dynamics, compressible fluid dynamics, and the related topic of wave propagation. In this pre-asymptotic regime, the error is approximately proportional to the numerical resolution, and other factors such as dispersion errors or the dissipative behavior become more important. Very high order methods are often ruled out because they come with more restrictive CFL conditions measured against the number of unknowns, and they are also not as flexible when it comes to representing complex geometries. Therefore, polynomial degrees between two and six are most popular in practice, see e.g. the efficiency evaluation in [FehnWallKronbichler2019] and references cited therein.

    Explicit time integration

    To discretize in time, we slightly rearrange the weak form and sum over all cells:

    -\[
+<picture><source srcset=\[
 \sum_{K \in \mathcal T_h} \left(\boldsymbol{\varphi}_i,
 \frac{\partial \mathbf{w}}{\partial t}\right)_{K}
 =
@@ -282,13 +282,13 @@
 \mathbf{n} \cdot \widehat{\mathbf{F}}(\mathbf{w})\right>_{\partial K} +
 \left(\boldsymbol{\varphi}_i,\mathbf{G}(\mathbf w)\right)_{K}
 \right],
-\] +\]" src="form_6396.png"/>

    -

    where $\boldsymbol{\varphi}_i$ runs through all basis functions with from 1 to $n_\text{dofs}$.

    -

    We now denote by $\mathcal M$ the mass matrix with entries $\mathcal M_{ij} =
+<p> where <picture><source srcset=$\boldsymbol{\varphi}_i$ runs through all basis functions with from 1 to $n_\text{dofs}$.

    +

    We now denote by $\mathcal M$ the mass matrix with entries $\mathcal M_{ij} =
 \sum_{K} \left(\boldsymbol{\varphi}_i,
/usr/share/doc/packages/dealii/doxygen/deal.II/step_68.html differs (HTML document, UTF-8 Unicode text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_68.html	2024-01-30 03:04:54.960902311 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_68.html	2024-01-30 03:04:54.960902311 +0000
@@ -141,17 +141,17 @@
 <p><a class=

    Introduction

    Simulation of the motion of massless tracer particles in a vortical flow

    Particles play an important part in numerical models for a large number of applications. Particles are routinely used as massless tracers to visualize the dynamic of a transient flow. They can also play an intrinsic role as part of a more complex finite element model, as is the case for the Particle-In-Cell (PIC) method [GLHPW2018] or they can even be used to simulate the motion of granular matter, as in the Discrete Element Method (DEM) [Blais2019]. In the case of DEM, the resulting model is not related to the finite element method anymore, but just leads to a system of ordinary differential equation which describes the motion of the particles and the dynamic of their collisions. All of these models can be built using deal.II's particle handling capabilities.

    -

    In the present step, we use particles as massless tracers to illustrate the dynamic of a vortical flow. Since the particles are massless tracers, the position of each particle $i$ is described by the following ordinary differential equation (ODE):

    -\[
+<p>In the present step, we use particles as massless tracers to illustrate the dynamic of a vortical flow. Since the particles are massless tracers, the position of each particle <picture><source srcset=$i$ is described by the following ordinary differential equation (ODE):

    +\[
 \frac{d \textbf{x}_i}{dt} =\textbf{u}(\textbf{x}_i)
-\] +\]" src="form_6512.png"/>

    -

    where $\textbf{x}_i$ is the position of particle $i$ and $\textbf{u}(\textbf{x}_i)$ the flow velocity at its position. In the present step, this ODE is solved using the explicit Euler method. The resulting scheme is:

    -\[
+<p>where <picture><source srcset=$\textbf{x}_i$ is the position of particle $i$ and $\textbf{u}(\textbf{x}_i)$ the flow velocity at its position. In the present step, this ODE is solved using the explicit Euler method. The resulting scheme is:

    +\[
 \textbf{x}_{i}^{n+1} = \textbf{x}_{i}^{n} + \Delta t \; \textbf{u}(\textbf{x}_{i}^{n})
-\] +\]" src="form_6515.png"/>

    -

    where $\textbf{x}_{i}^{n+1}$ and $\textbf{x}_{i}^{n}$ are the position of particle $i$ at time $t+\Delta t$ and $t$, respectively and where $\Delta t$ is the time step. In the present step, the velocity at the location of particles is obtained in two different fashions:

  • The plain program - This program was contributed by Matthias Maier (Texas A&M University), and Ignacio Tomas (Sandia National Laboratories $^{\!\dagger}$).

    -

    $^\dagger$Sandia National Laboratories is a multimission laboratory managed and operated by National Technology & Engineering Solutions of Sandia, LLC, a wholly owned subsidiary of Honeywell International Inc., for the U.S. Department of Energy's National Nuclear Security Administration under contract DE-NA0003525. This document describes objective technical results and analysis. Any subjective views or opinions that might be expressed in the paper do not necessarily represent the views of the U.S. Department of Energy or the United States Government.

    + This program was contributed by Matthias Maier (Texas A&M University), and Ignacio Tomas (Sandia National Laboratories $^{\!\dagger}$).

    +

    $^\dagger$Sandia National Laboratories is a multimission laboratory managed and operated by National Technology & Engineering Solutions of Sandia, LLC, a wholly owned subsidiary of Honeywell International Inc., for the U.S. Department of Energy's National Nuclear Security Administration under contract DE-NA0003525. This document describes objective technical results and analysis. Any subjective views or opinions that might be expressed in the paper do not necessarily represent the views of the U.S. Department of Energy or the United States Government.

    Note
    This tutorial step implements a first-order accurate guaranteed maximum wavespeed method based on a first-order graph viscosity for solving Euler's equations of gas dynamics [GuermondPopov2016]. As such it is presented primarily for educational purposes. For actual research computations you might want to consider exploring a corresponding high-performance implementation of a second-order accurate scheme that uses convex limiting techniques, and strong stability-preserving (SSP) time integration, see [GuermondEtAl2018] (website).
    If you use this program as a basis for your own work, please consider citing it in your list of references. The initial version of this work was contributed to the deal.II project by the authors listed in the following citation: 10.5281/zenodo.3698223
    @@ -152,15 +152,15 @@

    It should be noted that first-order schemes in the context of hyperbolic conservation laws require prohibitively many degrees of freedom to resolve certain key features of the simulated fluid, and thus, typically only serve as elementary building blocks in higher-order schemes [GuermondEtAl2018]. However, we hope that the reader still finds the tutorial step to be a good starting point (in particular with respect to the programming techniques) before jumping into full research codes such as the second-order scheme discussed in [GuermondEtAl2018].

    Euler's equations of gas dynamics

    The compressible Euler's equations of gas dynamics are written in conservative form as follows:

    -\begin{align}
+<picture><source srcset=\begin{align}
 \mathbf{u}_t + \text{div} \, \mathbb{f}(\mathbf{u}) = \boldsymbol{0} ,
-\end{align} +\end{align}" src="form_6526.png"/>

    -

    where $\mathbf{u}(\textbf{x},t):\mathbb{R}^{d} \times \mathbb{R}
-\rightarrow \mathbb{R}^{d+2}$, and $\mathbb{f}(\mathbf{u}):\mathbb{R}^{d+2}
-\rightarrow \mathbb{R}^{(d+2) \times d}$, and $d \geq 1$ is the space dimension. We say that $\mathbf{u} \in \mathbb{R}^{d+2}$ is the state and $\mathbb{f}(\mathbf{u}) \in  \mathbb{R}^{(d+2) \times d}$ is the flux of the system. In the case of Euler's equations the state is given by $\textbf{u} = [\rho, \textbf{m}^\top,E]^{\top}$: where $\rho \in \mathbb{R}^+$ denotes the density, $\textbf{m} \in \mathbb{R}^d$ is the momentum, and $E
-\in \mathbb{R}^+$ is the total energy of the system. The flux of the system $\mathbb{f}(\mathbf{u})$ is defined as

    -\begin{align*}
+<p> where <picture><source srcset=$\mathbf{u}(\textbf{x},t):\mathbb{R}^{d} \times \mathbb{R}
+\rightarrow \mathbb{R}^{d+2}$, and $\mathbb{f}(\mathbf{u}):\mathbb{R}^{d+2}
+\rightarrow \mathbb{R}^{(d+2) \times d}$, and $d \geq 1$ is the space dimension. We say that $\mathbf{u} \in \mathbb{R}^{d+2}$ is the state and $\mathbb{f}(\mathbf{u}) \in  \mathbb{R}^{(d+2) \times d}$ is the flux of the system. In the case of Euler's equations the state is given by $\textbf{u} = [\rho, \textbf{m}^\top,E]^{\top}$: where $\rho \in \mathbb{R}^+$ denotes the density, $\textbf{m} \in \mathbb{R}^d$ is the momentum, and $E
+\in \mathbb{R}^+$ is the total energy of the system. The flux of the system $\mathbb{f}(\mathbf{u})$ is defined as

    +\begin{align*}
 \mathbb{f}(\textbf{u})
 =
 \begin{bmatrix}
@@ -168,32 +168,32 @@
   \rho^{-1} \textbf{m} \otimes \textbf{m} + \mathbb{I} p\\
   \tfrac{\textbf{m}^\top}{\rho} (E + p)
 \end{bmatrix},
-\end{align*} +\end{align*}" src="form_6537.png"/>

    -

    where $\mathbb{I} \in \mathbb{R}^{d \times d}$ is the identity matrix and $\otimes$ denotes the tensor product. Here, we have introduced the pressure $p$ that, in general, is defined by a closed-form equation of state. In this tutorial we limit the discussion to the class of polytropic ideal gases for which the pressure is given by

    -\begin{align*}
+<p> where <picture><source srcset=$\mathbb{I} \in \mathbb{R}^{d \times d}$ is the identity matrix and $\otimes$ denotes the tensor product. Here, we have introduced the pressure $p$ that, in general, is defined by a closed-form equation of state. In this tutorial we limit the discussion to the class of polytropic ideal gases for which the pressure is given by

    +\begin{align*}
 p = p(\textbf{u}) := (\gamma -1) \Big(E -
 \tfrac{|\textbf{m}|^2}{2\,\rho}
 \Big),
-\end{align*} +\end{align*}" src="form_6539.png"/>

    -

    where the factor $\gamma \in (1,5/3]$ denotes the ratio of specific heats.

    +

    where the factor $\gamma \in (1,5/3]$ denotes the ratio of specific heats.

    Solution theory

    Hyperbolic conservation laws, such as

    -\begin{align*}
+<picture><source srcset=\begin{align*}
 \mathbf{u}_t + \text{div} \, \mathbb{f}(\mathbf{u}) = \boldsymbol{0},
-\end{align*} +\end{align*}" src="form_6541.png"/>

    -

    pose a significant challenge with respect to solution theory. An evident observation is that rewriting the equation in variational form and testing with the solution itself does not lead to an energy estimate because the pairing $\langle \text{div} \, \mathbb{f}(\mathbf{u}), \mathbf{u}\rangle$ (understood as the $L^2(\Omega)$ inner product or duality pairing) is not guaranteed to be non-negative. Notions such as energy-stability or $L^2(\Omega)$-stability are (in general) meaningless in this context.

    -

    Historically, the most fruitful step taken in order to deepen the understanding of hyperbolic conservation laws was to assume that the solution is formally defined as $\mathbf{u} := \lim_{\epsilon \rightarrow
-0^+} \mathbf{u}^{\epsilon}$ where $\mathbf{u}^{\epsilon}$ is the solution of the parabolic regularization

    -\begin{align}
+<p> pose a significant challenge with respect to solution theory. An evident observation is that rewriting the equation in variational form and testing with the solution itself does not lead to an energy estimate because the pairing <picture><source srcset=$\langle \text{div} \, \mathbb{f}(\mathbf{u}), \mathbf{u}\rangle$ (understood as the $L^2(\Omega)$ inner product or duality pairing) is not guaranteed to be non-negative. Notions such as energy-stability or $L^2(\Omega)$-stability are (in general) meaningless in this context.

    +

    Historically, the most fruitful step taken in order to deepen the understanding of hyperbolic conservation laws was to assume that the solution is formally defined as $\mathbf{u} := \lim_{\epsilon \rightarrow
+0^+} \mathbf{u}^{\epsilon}$ where $\mathbf{u}^{\epsilon}$ is the solution of the parabolic regularization

    +\begin{align}
 \mathbf{u}_t^{\epsilon} + \text{div} \, \mathbb{f}(\mathbf{u}^{\epsilon})
 - {\epsilon} \Delta \mathbf{u}^{\epsilon} = 0.
-\end{align} +\end{align}" src="form_6546.png"/>

    -

    Such solutions, which are understood as the solution recovered in the zero-viscosity limit, are often referred to as viscosity solutions. (This is, because physically $\epsilon$ can be understood as related to the viscosity of the fluid, i.e., a quantity that indicates the amount of friction neighboring gas particles moving at different speeds exert on each other. The Euler equations themselves are derived under the assumption of no friction, but can physically be expected to describe the limiting case of vanishing friction or viscosity.) Global existence and uniqueness of such solutions is an open issue. However, we know at least that if such viscosity solutions exists they have to satisfy the constraint $\textbf{u}(\mathbf{x},t) \in \mathcal{B}$ for all $\mathbf{x} \in \Omega$ and $t \geq 0$ where

    -\begin{align}
+<p> Such solutions, which are understood as the solution recovered in the zero-viscosity limit, are often referred to as <em>viscosity solutions</em>. (This is, because physically <picture><source srcset=$\epsilon$ can be understood as related to the viscosity of the fluid, i.e., a quantity that indicates the amount of friction neighboring gas particles moving at different speeds exert on each other. The Euler equations themselves are derived under the assumption of no friction, but can physically be expected to describe the limiting case of vanishing friction or viscosity.) Global existence and uniqueness of such solutions is an open issue. However, we know at least that if such viscosity solutions exists they have to satisfy the constraint $\textbf{u}(\mathbf{x},t) \in \mathcal{B}$ for all $\mathbf{x} \in \Omega$ and $t \geq 0$ where

    +\begin{align}
   \mathcal{B} = \big\{ \textbf{u} =
   [\rho, \textbf{m}^\top,E]^{\top} \in \mathbb{R}^{d+2} \, \big |
   \
@@ -203,81 +203,81 @@
   \
   s(\mathbf{u}) \geq \min_{x \in \Omega} s(\mathbf{u}_0(\mathbf{x}))
   \big\}.
-\end{align} +\end{align}" src="form_6550.png"/>

    -

    Here, $s(\mathbf{u})$ denotes the specific entropy

    -\begin{align}
+<p> Here, <picture><source srcset=$s(\mathbf{u})$ denotes the specific entropy

    +\begin{align}
   s(\mathbf{u}) = \ln \Big(\frac{p(\mathbf{u})}{\rho^{\gamma}}\Big).
-\end{align} +\end{align}" src="form_6552.png"/>

    -

    We will refer to $\mathcal{B}$ as the invariant set of Euler's equations. In other words, a state $\mathbf{u}(\mathbf{x},t)\in\mathcal{B}$ obeys positivity of the density, positivity of the internal energy, and a local minimum principle on the specific entropy. This condition is a simplified version of a class of pointwise stability constraints satisfied by the exact (viscosity) solution. By pointwise we mean that the constraint has to be satisfied at every point of the domain, not just in an averaged (integral, or high order moments) sense.

    -

    In context of a numerical approximation, a violation of such a constraint has dire consequences: it almost surely leads to catastrophic failure of the numerical scheme, loss of hyperbolicity, and overall, loss of well-posedness of the (discrete) problem. It would also mean that we have computed something that can not be interpreted physically. (For example, what are we to make of a computed solution with a negative density?) In the following we will formulate a scheme that ensures that the discrete approximation of $\mathbf{u}(\mathbf{x},t)$ remains in $\mathcal{B}$.

    +

    We will refer to $\mathcal{B}$ as the invariant set of Euler's equations. In other words, a state $\mathbf{u}(\mathbf{x},t)\in\mathcal{B}$ obeys positivity of the density, positivity of the internal energy, and a local minimum principle on the specific entropy. This condition is a simplified version of a class of pointwise stability constraints satisfied by the exact (viscosity) solution. By pointwise we mean that the constraint has to be satisfied at every point of the domain, not just in an averaged (integral, or high order moments) sense.

    +

    In context of a numerical approximation, a violation of such a constraint has dire consequences: it almost surely leads to catastrophic failure of the numerical scheme, loss of hyperbolicity, and overall, loss of well-posedness of the (discrete) problem. It would also mean that we have computed something that can not be interpreted physically. (For example, what are we to make of a computed solution with a negative density?) In the following we will formulate a scheme that ensures that the discrete approximation of $\mathbf{u}(\mathbf{x},t)$ remains in $\mathcal{B}$.

    Variational versus collocation-type discretizations

    Following step-9, step-12, step-33, and step-67, at this point it might look tempting to base a discretization of Euler's equations on a (semi-discrete) variational formulation:

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   (\partial_t\mathbf{u}_{h},\textbf{v}_h)_{L^2(\Omega)}
   - ( \mathbb{f}(\mathbf{u}_{h}) ,\text{grad} \, \textbf{v}_{h})_{L^2(\Omega)}
   + s_h(\mathbf{u}_{h},\textbf{v}_h)_{L^2(\Omega)} = \boldsymbol{0}
   \quad\forall \textbf{v}_h \in \mathbb{V}_h.
-\end{align*} +\end{align*}" src="form_6555.png"/>

    -

    Here, $\mathbb{V}_h$ is an appropriate finite element space, and $s_h(\cdot,\cdot)_{L^2(\Omega)}$ is some linear stabilization method (possibly complemented with some ad-hoc shock-capturing technique, see for instance Chapter 5 of [GuermondErn2004] and references therein). Most time-dependent discretization approaches described in the deal.II tutorials are based on such a (semi-discrete) variational approach. Fundamentally, from an analysis perspective, variational discretizations are conceived to provide some notion of global (integral) stability, meaning an estimate of the form

    -\begin{align*}
+<p> Here, <picture><source srcset=$\mathbb{V}_h$ is an appropriate finite element space, and $s_h(\cdot,\cdot)_{L^2(\Omega)}$ is some linear stabilization method (possibly complemented with some ad-hoc shock-capturing technique, see for instance Chapter 5 of [GuermondErn2004] and references therein). Most time-dependent discretization approaches described in the deal.II tutorials are based on such a (semi-discrete) variational approach. Fundamentally, from an analysis perspective, variational discretizations are conceived to provide some notion of global (integral) stability, meaning an estimate of the form

    +\begin{align*}
   |\!|\!| \mathbf{u}_{h}(t) |\!|\!| \leq |\!|\!| \mathbf{u}_{h}(0) |\!|\!|
-\end{align*} +\end{align*}" src="form_6558.png"/>

    -

    holds true, where $|\!|\!| \cdot |\!|\!| $ could represent the $L^2(\Omega)$-norm or, more generally, some discrete (possibly mesh dependent) energy-norm. Variational discretizations of hyperbolic conservation laws have been very popular since the mid eighties, in particular combined with SUPG-type stabilization and/or upwinding techniques (see the early work of [Brooks1982] and [Johnson1986]). They have proven to be some of the best approaches for simulations in the subsonic shockless regime and similarly benign situations.

    +

    holds true, where $|\!|\!| \cdot |\!|\!| $ could represent the $L^2(\Omega)$-norm or, more generally, some discrete (possibly mesh dependent) energy-norm. Variational discretizations of hyperbolic conservation laws have been very popular since the mid eighties, in particular combined with SUPG-type stabilization and/or upwinding techniques (see the early work of [Brooks1982] and [Johnson1986]). They have proven to be some of the best approaches for simulations in the subsonic shockless regime and similarly benign situations.

    However, in the transonic and supersonic regimes, and shock-hydrodynamics applications the use of variational schemes might be questionable. In fact, at the time of this writing, most shock-hydrodynamics codes are still firmly grounded on finite volume methods. The main reason for failure of variational schemes in such extreme regimes is the lack of pointwise stability. This stems from the fact that a priori bounds on integrated quantities (e.g. integrals of moments) have in general no implications on pointwise properties of the solution. While some of these problems might be alleviated by the (perpetual) chase of the right shock capturing scheme, finite difference-like and finite volume schemes still have an edge in many regards.

    In this tutorial step we therefore depart from variational schemes. We will present a completely algebraic formulation (with the flavor of a collocation-type scheme) that preserves constraints pointwise, i.e.,

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   \textbf{u}_h(\mathbf{x}_i,t) \in \mathcal{B}
   \;\text{at every node}\;\mathbf{x}_i\;\text{of the mesh}.
-\end{align*} +\end{align*}" src="form_6560.png"/>

    Contrary to finite difference/volume schemes, the scheme implemented in this step maximizes the use of finite element software infrastructure, works on any mesh, in any space dimension, and is theoretically guaranteed to always work, all the time, no exception. This illustrates that deal.II can be used far beyond the context of variational schemes in Hilbert spaces and that a large number of classes, modules and namespaces from deal.II can be adapted for such a purpose.

    Description of the scheme

    -

    Let $\mathbb{V}_h$ be scalar-valued finite dimensional space spanned by a basis $\{\phi_i\}_{i \in \mathcal{V}}$ where: $\phi_i:\Omega \rightarrow
-\mathbb{R}$ and $\mathcal{V}$ is the set of all indices (nonnegative integers) identifying each scalar Degree of Freedom (DOF) in the mesh. Therefore a scalar finite element functional $u_h \in \mathbb{V}_h$ can be written as $u_h = \sum_{i \in \mathcal{V}} U_i \phi_i$ with $U_i \in
-\mathbb{R}$. We introduce the notation for vector-valued approximation spaces $\pmb{\mathbb{V}}_h := \{\mathbb{V}_h\}^{d+2}$. Let $\mathbf{u}_h
-\in \pmb{\mathbb{V}}_h$, then it can be written as $\mathbf{u}_h = \sum_{i
-\in \mathcal{V}} \mathbf{U}_i \phi_i$ where $\mathbf{U}_i \in
-\mathbb{R}^{d+2}$ and $\phi_i$ is a scalar-valued shape function.

    +

    Let $\mathbb{V}_h$ be scalar-valued finite dimensional space spanned by a basis $\{\phi_i\}_{i \in \mathcal{V}}$ where: $\phi_i:\Omega \rightarrow
+\mathbb{R}$ and $\mathcal{V}$ is the set of all indices (nonnegative integers) identifying each scalar Degree of Freedom (DOF) in the mesh. Therefore a scalar finite element functional $u_h \in \mathbb{V}_h$ can be written as $u_h = \sum_{i \in \mathcal{V}} U_i \phi_i$ with $U_i \in
+\mathbb{R}$. We introduce the notation for vector-valued approximation spaces $\pmb{\mathbb{V}}_h := \{\mathbb{V}_h\}^{d+2}$. Let $\mathbf{u}_h
+\in \pmb{\mathbb{V}}_h$, then it can be written as $\mathbf{u}_h = \sum_{i
+\in \mathcal{V}} \mathbf{U}_i \phi_i$ where $\mathbf{U}_i \in
+\mathbb{R}^{d+2}$ and $\phi_i$ is a scalar-valued shape function.

    Note
    We purposely refrain from using vector-valued finite element spaces in our notation. Vector-valued finite element spaces are natural for variational formulations of PDE systems (e.g. Navier-Stokes). In such context, the interactions that have to be computed describe interactions between DOFs: with proper renumbering of the vector-valued DoFHandler (i.e. initialized with an FESystem) it is possible to compute the block-matrices (required in order to advance the solution) with relative ease. However, the interactions that have to be computed in the context of time-explicit collocation-type schemes (such as finite differences and/or the scheme presented in this tutorial) can be better described as interactions between nodes (not between DOFs). In addition, in our case we do not solve a linear equation in order to advance the solution. This leaves very little reason to use vector-valued finite element spaces both in theory and/or practice.
    -

    We will use the usual Lagrange finite elements: let $\{\mathbf{x}_i\}_{i \in
-\mathcal{V}}$ denote the set of all support points (see this glossary entry), where $\mathbf{x}_i \in \mathbb{R}^d$. Then each index $i \in
-\mathcal{V}$ uniquely identifies a support point $\mathbf{x}_i$, as well as a scalar-valued shape function $\phi_i$. With this notation at hand we can define the (explicit time stepping) scheme as:

    -\begin{align*}
+<p>We will use the usual Lagrange finite elements: let <picture><source srcset=$\{\mathbf{x}_i\}_{i \in
+\mathcal{V}}$ denote the set of all support points (see this glossary entry), where $\mathbf{x}_i \in \mathbb{R}^d$. Then each index $i \in
+\mathcal{V}$ uniquely identifies a support point $\mathbf{x}_i$, as well as a scalar-valued shape function $\phi_i$. With this notation at hand we can define the (explicit time stepping) scheme as:

    +\begin{align*}
   m_i \frac{\mathbf{U}_i^{n+1} - \mathbf{U}_i^{n}}{\tau}
   + \sum_{j \in \mathcal{I}(i)} \mathbb{f}(\mathbf{U}_j^{n})\cdot
   \mathbf{c}_{ij} - \sum_{j \in \mathcal{I}(i)}
   d_{ij} \mathbf{U}_j^{n} = \boldsymbol{0} \, ,
-\end{align*} +\end{align*}" src="form_6574.png"/>

    where

      -
    • $m_i \dealcoloneq \int_{\Omega} \phi_i \, \mathrm{d}\mathbf{x}$ is the lumped mass matrix
    • -
    • $\tau$ is the time step size
    • -
    • $\mathbf{c}_{ij} \dealcoloneq \int_{\Omega} \nabla\phi_j\phi_i \,
-    \mathrm{d}\mathbf{x}$ (note that $\mathbf{c}_{ij}\in \mathbb{R}^d$) is a vector-valued matrix that was used to approximate the divergence of the flux in a weak sense.
    • -
    • $\mathcal{I}(i) \dealcoloneq \{j \in \mathcal{V} \ | \ \mathbf{c}_{ij}
-    \not \equiv \boldsymbol{0}\} \cup \{i\}$ is the adjacency list containing all degrees of freedom coupling to the index $i$. In other words $\mathcal{I}(i)$ contains all nonzero column indices for row index i. $\mathcal{I}(i)$ will also be called a "stencil".
    • -
    • $\mathbb{f}(\mathbf{U}_j^{n})$ is the flux $\mathbb{f}$ of the hyperbolic system evaluated for the state $\mathbf{U}_j^{n}$ associated with support point $\mathbf{x}_j$.
    • -
    • $d_{ij} \dealcoloneq \max \{ \lambda_{\text{max}}
+<li><picture><source srcset=$m_i \dealcoloneq \int_{\Omega} \phi_i \, \mathrm{d}\mathbf{x}$ is the lumped mass matrix
    • +
    • $\tau$ is the time step size
    • +
    • $\mathbf{c}_{ij} \dealcoloneq \int_{\Omega} \nabla\phi_j\phi_i \,
+    \mathrm{d}\mathbf{x}$ (note that $\mathbf{c}_{ij}\in \mathbb{R}^d$) is a vector-valued matrix that was used to approximate the divergence of the flux in a weak sense.
    • +
    • $\mathcal{I}(i) \dealcoloneq \{j \in \mathcal{V} \ | \ \mathbf{c}_{ij}
+    \not \equiv \boldsymbol{0}\} \cup \{i\}$ is the adjacency list containing all degrees of freedom coupling to the index $i$. In other words $\mathcal{I}(i)$ contains all nonzero column indices for row index i. $\mathcal{I}(i)$ will also be called a "stencil".
    • +
    • $\mathbb{f}(\mathbf{U}_j^{n})$ is the flux $\mathbb{f}$ of the hyperbolic system evaluated for the state $\mathbf{U}_j^{n}$ associated with support point $\mathbf{x}_j$.
    • +
    • $d_{ij} \dealcoloneq \max \{ \lambda_{\text{max}}
     (\mathbf{U}_i^{n},\mathbf{U}_j^{n}, \textbf{n}_{ij}),
     \lambda_{\text{max}} (\mathbf{U}_j^{n}, \mathbf{U}_i^{n},
-    \textbf{n}_{ji}) \} \|\mathbf{c}_{ij}\|$ if $i \not = j$ is the so called graph viscosity. The graph viscosity serves as a stabilization term, it is somewhat the discrete counterpart of $\epsilon \Delta \mathbf{u}$ that appears in the notion of viscosity solution described above. We will base our construction of $d_{ij}$ on an estimate of the maximal local wavespeed $\lambda_{\text{max}}$ that will be explained in detail in a moment.
    • -
    • the diagonal entries of the viscosity matrix are defined as $d_{ii} = - \sum_{j \in \mathcal{I}(i)\backslash \{i\}} d_{ij}$.
    • -
    • $\textbf{n}_{ij} = \frac{\mathbf{c}_{ij}}{ \|\mathbf{c}_{ij}\| }$ is a normalization of the $\textbf{c}_{ij}$ matrix that enters the approximate Riemann solver with which we compute an the approximations $\lambda_{\text{max}}$ on the local wavespeed. (This will be explained further down below).
    • + \textbf{n}_{ji}) \} \|\mathbf{c}_{ij}\|$" src="form_6584.png"/> if $i \not = j$ is the so called graph viscosity. The graph viscosity serves as a stabilization term, it is somewhat the discrete counterpart of $\epsilon \Delta \mathbf{u}$ that appears in the notion of viscosity solution described above. We will base our construction of $d_{ij}$ on an estimate of the maximal local wavespeed $\lambda_{\text{max}}$ that will be explained in detail in a moment. +
    • the diagonal entries of the viscosity matrix are defined as $d_{ii} = - \sum_{j \in \mathcal{I}(i)\backslash \{i\}} d_{ij}$.
    • +
    • $\textbf{n}_{ij} = \frac{\mathbf{c}_{ij}}{ \|\mathbf{c}_{ij}\| }$ is a normalization of the $\textbf{c}_{ij}$ matrix that enters the approximate Riemann solver with which we compute an the approximations $\lambda_{\text{max}}$ on the local wavespeed. (This will be explained further down below).
    -

    The definition of $\lambda_{\text{max}} (\mathbf{U},\mathbf{V},
-\textbf{n})$ is far from trivial and we will postpone the precise definition in order to focus first on some algorithmic and implementation questions. We note that

      -
    • $m_i$ and $\mathbf{c}_{ij}$ do not evolve in time (provided we keep the discretization fixed). It thus makes sense to assemble these matrices/vectors once in a so called offline computation and reuse them in every time step. They are part of what we are going to call off-line data.
    • -
    • At every time step we have to evaluate $\mathbb{f}(\mathbf{U}_j^{n})$ and $d_{ij} \dealcoloneq \max \{ \lambda_{\text{max}}
+<p>The definition of <picture><source srcset=$\lambda_{\text{max}} (\mathbf{U},\mathbf{V},
+\textbf{n})$ is far from trivial and we will postpone the precise definition in order to focus first on some algorithmic and implementation questions. We note that

        +
      • $m_i$ and $\mathbf{c}_{ij}$ do not evolve in time (provided we keep the discretization fixed). It thus makes sense to assemble these matrices/vectors once in a so called offline computation and reuse them in every time step. They are part of what we are going to call off-line data.
      • +
      • At every time step we have to evaluate $\mathbb{f}(\mathbf{U}_j^{n})$ and $d_{ij} \dealcoloneq \max \{ \lambda_{\text{max}}
     (\mathbf{U}_i^{n},\mathbf{U}_j^{n}, \textbf{n}_{ij}),
     \lambda_{\text{max}} (\mathbf{U}_j^{n}, \mathbf{U}_i^{n},
-    \textbf{n}_{ji}) \} \|\mathbf{c}_{ij}\| $, which will constitute the bulk of the computational cost.
      • + \textbf{n}_{ji}) \} \|\mathbf{c}_{ij}\| $" src="form_6595.png"/>, which will constitute the bulk of the computational cost. /usr/share/doc/packages/dealii/doxygen/deal.II/step_7.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_7.html 2024-01-30 03:04:55.152903910 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_7.html 2024-01-30 03:04:55.152903910 +0000 @@ -148,67 +148,67 @@

        Verification of correctness

        There has probably never been a non-trivial finite element program that worked right from the start. It is therefore necessary to find ways to verify whether a computed solution is correct or not. Usually, this is done by choosing the set-up of a simulation in such a way that we know the exact continuous solution and evaluate the difference between continuous and computed discrete solution. If this difference converges to zero with the right order of convergence, this is already a good indication of correctness, although there may be other sources of error persisting which have only a small contribution to the total error or are of higher order. In the context of finite element simulations, this technique of picking the solution by choosing appropriate right hand sides and boundary conditions is often called the Method of Manufactured Solution. (We will come back to how exactly we construct the solution in this method below, after discussing the equation we want to solve.)

        In this example, we will not go into the theories of systematic software verification which is a complicated problem in general. Rather we will demonstrate the tools which deal.II can offer in this respect. This is basically centered around the functionality of a single function, VectorTools::integrate_difference(). This function computes the difference between a given continuous function and a finite element field in various norms on each cell. Of course, like with any other integral, we can only evaluate these norms using quadrature formulas; the choice of the right quadrature formula is therefore crucial to the accurate evaluation of the error. This holds in particular for the $L_\infty$ norm, where we evaluate the maximal deviation of numerical and exact solution only at the quadrature points; one should then not try to use a quadrature rule whose evaluation occurs only at points where super-convergence might occur, such as the Gauss points of the lowest-order Gauss quadrature formula for which the integrals in the assembly of the matrix is correct (e.g., for linear elements, do not use the QGauss(2) quadrature formula). In fact, this is generally good advice also for the other norms: if your quadrature points are fortuitously chosen at locations where the error happens to be particularly small due to superconvergence, the computed error will look like it is much smaller than it really is and may even suggest a higher convergence order. Consequently, we will choose a different quadrature formula for the integration of these error norms than for the assembly of the linear system.

        -

        The function VectorTools::integrate_difference() evaluates the desired norm on each cell $K$ of the triangulation and returns a vector which holds these values for each cell. From the local values, we can then obtain the global error. For example, if the vector $\mathbf e$ with element $e_K$ for all cells $K$ contains the local $L_2$ norms $\|u-u_h\|_K$, then

        -\[
+<p>The function <a class=VectorTools::integrate_difference() evaluates the desired norm on each cell $K$ of the triangulation and returns a vector which holds these values for each cell. From the local values, we can then obtain the global error. For example, if the vector $\mathbf e$ with element $e_K$ for all cells $K$ contains the local $L_2$ norms $\|u-u_h\|_K$, then

        +\[
   E = \| {\mathbf e} \| = \left( \sum_K e_K^2 \right)^{1/2}
-\] +\]" src="form_6693.png"/>

        -

        is the global $L_2$ error $E=\|u-u_h\|_\Omega$.

        +

        is the global $L_2$ error $E=\|u-u_h\|_\Omega$.

        In the program, we will show how to evaluate and use these quantities, and we will monitor their values under mesh refinement. Of course, we have to choose the problem at hand such that we can explicitly state the solution and its derivatives, but since we want to evaluate the correctness of the program, this is only reasonable. If we know that the program produces the correct solution for one (or, if one wants to be really sure: many) specifically chosen right hand sides, we can be rather confident that it will also compute the correct solution for problems where we don't know the exact values.

        In addition to simply computing these quantities, we will show how to generate nicely formatted tables from the data generated by this program that automatically computes convergence rates etc. In addition, we will compare different strategies for mesh refinement.

        Non-homogeneous Neumann boundary conditions

        The second, totally unrelated, subject of this example program is the use of non-homogeneous boundary conditions. These are included into the variational form using boundary integrals which we have to evaluate numerically when assembling the right hand side vector.

        Before we go into programming, let's have a brief look at the mathematical formulation. The equation that we want to solve here is the Helmholtz equation "with the nice sign":

        -\[
+<picture><source srcset=\[
   -\Delta u + \alpha u = f,
-\] +\]" src="form_6695.png"/>

        -

        on the square $[-1,1]^2$ with $\alpha=1$, augmented by Dirichlet boundary conditions

        -\[
+<p> on the square <picture><source srcset=$[-1,1]^2$ with $\alpha=1$, augmented by Dirichlet boundary conditions

        +\[
   u = g_1
-\] +\]" src="form_6696.png"/>

        -

        on some part $\Gamma_1$ of the boundary $\Gamma$, and Neumann conditions

        -\[
+<p> on some part <picture><source srcset=$\Gamma_1$ of the boundary $\Gamma$, and Neumann conditions

        +\[
   {\mathbf n}\cdot \nabla u = g_2
-\] +\]" src="form_6697.png"/>

        -

        on the rest $\Gamma_2 = \Gamma \backslash \Gamma_1$. In our particular testcase, we will use $\Gamma_1=\Gamma \cap\{\{x=1\}
-\cup \{y=1\}\}$. (We say that this equation has the "nice sign" because the operator $-\Delta + \alpha I$ with the identity $I$ and $\alpha>0$ is a positive definite operator; the equation with the "bad sign" is $-\Delta u - \alpha u$ and results from modeling time-harmonic processes. For the equation with the "bad sign", the operator $-\Delta-\alpha I$ is not positive definite if $\alpha>0$ is large, and this leads to all sorts of issues we need not discuss here. The operator may also not be invertible – i.e., the equation does not have a unique solution – if $\alpha$ happens to be one of the eigenvalues of $-\Delta$.)

        -

        Using the above definitions, we can state the weak formulation of the equation, which reads: find $u\in H^1_g=\{v\in H^1: v|_{\Gamma_1}=g_1\}$ such that

        -\[
+<p> on the rest <picture><source srcset=$\Gamma_2 = \Gamma \backslash \Gamma_1$. In our particular testcase, we will use $\Gamma_1=\Gamma \cap\{\{x=1\}
+\cup \{y=1\}\}$. (We say that this equation has the "nice sign" because the operator $-\Delta + \alpha I$ with the identity $I$ and $\alpha>0$ is a positive definite operator; the equation with the "bad sign" is $-\Delta u - \alpha u$ and results from modeling time-harmonic processes. For the equation with the "bad sign", the operator $-\Delta-\alpha I$ is not positive definite if $\alpha>0$ is large, and this leads to all sorts of issues we need not discuss here. The operator may also not be invertible – i.e., the equation does not have a unique solution – if $\alpha$ happens to be one of the eigenvalues of $-\Delta$.)

        +

        Using the above definitions, we can state the weak formulation of the equation, which reads: find $u\in H^1_g=\{v\in H^1: v|_{\Gamma_1}=g_1\}$ such that

        +\[
   {(\nabla v, \nabla u)}_\Omega + {(v,u)}_\Omega
   =
   {(v,f)}_\Omega + {(v,g_2)}_{\Gamma_2}
-\] +\]" src="form_6705.png"/>

        -

        for all test functions $v\in H^1_0=\{v\in H^1: v|_{\Gamma_1}=0\}$. The boundary term ${(v,g_2)}_{\Gamma_2}$ has appeared by integration by parts and using $\partial_n u=g_2$ on $\Gamma_2$ and $v=0$ on $\Gamma_1$. The cell matrices and vectors which we use to build the global matrices and right hand side vectors in the discrete formulation therefore look like this:

        -\begin{eqnarray*}
+<p> for all test functions <picture><source srcset=$v\in H^1_0=\{v\in H^1: v|_{\Gamma_1}=0\}$. The boundary term ${(v,g_2)}_{\Gamma_2}$ has appeared by integration by parts and using $\partial_n u=g_2$ on $\Gamma_2$ and $v=0$ on $\Gamma_1$. The cell matrices and vectors which we use to build the global matrices and right hand side vectors in the discrete formulation therefore look like this:

        +\begin{eqnarray*}
   A_{ij}^K &=& \left(\nabla \varphi_i, \nabla \varphi_j\right)_K
               +\left(\varphi_i, \varphi_j\right)_K,
   \\
   F_i^K &=& \left(\varphi_i, f\right)_K
            +\left(\varphi_i, g_2\right)_{\partial K\cap \Gamma_2}.
-\end{eqnarray*} +\end{eqnarray*}" src="form_6710.png"/>

        Since the generation of the domain integrals has been shown in previous examples several times, only the generation of the contour integral is of interest here. It basically works along the following lines: for domain integrals we have the FEValues class that provides values and gradients of the shape values, as well as Jacobian determinants and other information and specified quadrature points in the cell; likewise, there is a class FEFaceValues that performs these tasks for integrations on faces of cells. One provides it with a quadrature formula for a manifold with dimension one less than the dimension of the domain is, and the cell and the number of its face on which we want to perform the integration. The class will then compute the values, gradients, normal vectors, weights, etc. at the quadrature points on this face, which we can then use in the same way as for the domain integrals. The details of how this is done are shown in the following program.

        The method of manufactured solutions

        -

        Because we want to verify the convergence of our numerical solution $u_h$, we want a setup so that we know the exact solution $u$. This is where the Method of Manufactured Solutions comes in: Let us choose a function

        -\[
+<p>Because we want to verify the convergence of our numerical solution <picture><source srcset=$u_h$, we want a setup so that we know the exact solution $u$. This is where the Method of Manufactured Solutions comes in: Let us choose a function

        +\[
   \bar u(\mathbf x) =
   \sum_{i=1}^3 \exp\left(-\frac{|\mathbf x-\mathbf x_i|^2}{\sigma^2}\right)
-\] +\]" src="form_6711.png"/>

        -

        where the centers $x_i$ of the exponentials are $\mathbf x_1=(-\frac 12,\frac 12)$, $\mathbf x_2=(-\frac 12,-\frac 12)$, and $\mathbf x_3=(\frac 12,-\frac 12)$, and the half width is set to $\sigma=\frac {1}{8}$. The method of manufactured solution then says: choose

        -\begin{align*}
+<p> where the centers <picture><source srcset=$x_i$ of the exponentials are $\mathbf x_1=(-\frac 12,\frac 12)$, $\mathbf x_2=(-\frac 12,-\frac 12)$, and $\mathbf x_3=(\frac 12,-\frac 12)$, and the half width is set to $\sigma=\frac {1}{8}$. The method of manufactured solution then says: choose

        +\begin{align*}
   f &= -\Delta \bar u + \bar u, \\
   g_1 &= \bar u|_{\Gamma_1}, \\
   g_2 &= {\mathbf n}\cdot \nabla\bar u|_{\Gamma_2}.
-\end{align*} +\end{align*}" src="form_6716.png"/>

        -

        With this particular choice for $f,g_1,g_2$, the solution of the original problem must necessarily be $u=\bar u$. In other words, by choosing the right hand sides of the equation and the boundary conditions in a particular way, we have manufactured ourselves a problem to which we know the solution – a very useful case given that in all but the very simplest cases, PDEs do not have solutions we can just write down. This then allows us to compute the error of our numerical solution. In the code below, we represent $\bar u$ by the Solution class, and other classes will be used to denote $\bar u|_{\Gamma_1}=g_1$ and ${\mathbf n}\cdot \nabla\bar u|_{\Gamma_2}=g_2$.

        -
        Note
        In principle, you can choose whatever you want for the function $\bar u$ above – here we have simply chosen a sum of three exponentials. In practice, there are two considerations you want to take into account: (i) The function must be simple enough so that you can compute derivatives of the function with not too much effort, for example in order to determine what $f = -\Delta \bar u + \bar u$ is. Since the derivative of an exponential is relatively straightforward to compute, the choice above satisfies this requirement, whereas a function of the kind $\bar u(\mathbf x) = \text{atan}\left(\|\mathbf x\|^{\|\mathbf x\|}\right)$ would have presented greater difficulties. (ii) You don't want $\bar u$ be a polynomial of low degree. That is because if you choose the polynomial degree of your finite element sufficiently high, you can exactly represent this $\bar u$ with the numerical solution $u_h$, making the error zero regardless of how coarse or fine the mesh is. Verifying that this is so is a useful step, but it will not allow you to verify the correct order of convergence of $\|u-u_h\|$ as a function of the mesh size $h$ in the general case of arbitrary $f$. (iii) The typical finite element error estimates assume sufficiently smooth solutions, i.e., sufficiently smooth domains, right-hand sides $f$ and boundary conditions. As a consequence, you should choose a smooth solution $\bar u$ – for example, it shouldn't have kinks. (iv) You want a solution whose variations can be resolved on the meshes you consider to test convergence. For example, if you were to choose $\bar u(\mathbf x)=\sin(1000 x_1)\sin(1000 x_2)$, you shouldn't be surprised if you don't observe that the error decreases at the expected rate until your mesh is fine enough to actually resolve the high-frequency oscillations with substantially more than 1,000 mesh cells in each coordinate direction.
        -

        The solution $\bar u$ we choose here satisfies all of these requirements: (i) It is relatively straightforward to differentiate; (ii) it is not a polynomial; (iii) it is smooth; and (iv) it has a length scale of $\sigma=\frac {1}{8}$ which, on the domain $[-1,1]^d$ is relatively straightforward to resolve with 16 or more cells in each coordinate direction.

        +

        With this particular choice for $f,g_1,g_2$, the solution of the original problem must necessarily be $u=\bar u$. In other words, by choosing the right hand sides of the equation and the boundary conditions in a particular way, we have manufactured ourselves a problem to which we know the solution – a very useful case given that in all but the very simplest cases, PDEs do not have solutions we can just write down. This then allows us to compute the error of our numerical solution. In the code below, we represent $\bar u$ by the Solution class, and other classes will be used to denote $\bar u|_{\Gamma_1}=g_1$ and ${\mathbf n}\cdot \nabla\bar u|_{\Gamma_2}=g_2$.

        +
        Note
        In principle, you can choose whatever you want for the function $\bar u$ above – here we have simply chosen a sum of three exponentials. In practice, there are two considerations you want to take into account: (i) The function must be simple enough so that you can compute derivatives of the function with not too much effort, for example in order to determine what $f = -\Delta \bar u + \bar u$ is. Since the derivative of an exponential is relatively straightforward to compute, the choice above satisfies this requirement, whereas a function of the kind $\bar u(\mathbf x) = \text{atan}\left(\|\mathbf x\|^{\|\mathbf x\|}\right)$ would have presented greater difficulties. (ii) You don't want $\bar u$ be a polynomial of low degree. That is because if you choose the polynomial degree of your finite element sufficiently high, you can exactly represent this $\bar u$ with the numerical solution $u_h$, making the error zero regardless of how coarse or fine the mesh is. Verifying that this is so is a useful step, but it will not allow you to verify the correct order of convergence of $\|u-u_h\|$ as a function of the mesh size $h$ in the general case of arbitrary $f$. (iii) The typical finite element error estimates assume sufficiently smooth solutions, i.e., sufficiently smooth domains, right-hand sides $f$ and boundary conditions. As a consequence, you should choose a smooth solution $\bar u$ – for example, it shouldn't have kinks. (iv) You want a solution whose variations can be resolved on the meshes you consider to test convergence. For example, if you were to choose $\bar u(\mathbf x)=\sin(1000 x_1)\sin(1000 x_2)$, you shouldn't be surprised if you don't observe that the error decreases at the expected rate until your mesh is fine enough to actually resolve the high-frequency oscillations with substantially more than 1,000 mesh cells in each coordinate direction.
        +

        The solution $\bar u$ we choose here satisfies all of these requirements: (i) It is relatively straightforward to differentiate; (ii) it is not a polynomial; (iii) it is smooth; and (iv) it has a length scale of $\sigma=\frac {1}{8}$ which, on the domain $[-1,1]^d$ is relatively straightforward to resolve with 16 or more cells in each coordinate direction.

        A note on good programming practice

        Besides the mathematical topics outlined above, we also want to use this program to illustrate one aspect of good programming practice, namely the use of namespaces. In programming the deal.II library, we have take great care not to use names for classes and global functions that are overly generic, say f(), sz(), rhs() etc. Furthermore, we have put everything into namespace dealii. But when one writes application programs that aren't meant for others to use, one doesn't always pay this much attention. If you follow the programming style of step-1 through step-6, these functions then end up in the global namespace where, unfortunately, a lot of other stuff also lives (basically everything the C language provides, along with everything you get from the operating system through header files). To make things a bit worse, the designers of the C language were also not always careful in avoiding generic names; for example, the symbols j1, jn are defined in C header files (they denote Bessel functions).

        To avoid the problems that result if names of different functions or variables collide (often with confusing error messages), it is good practice to put everything you do into a namespace. Following this style, we will open a namespace Step7 at the top of the program, import the deal.II namespace into it, put everything that's specific to this program (with the exception of main(), which must be in the global namespace) into it, and only close it at the bottom of the file. In other words, the structure of the program is of the kind

        #href_anchor"line">
        @@ -743,7 +743,7 @@
         

        Finally, we compute the maximum norm. Of course, we can't actually compute the true maximum of the error over all points in the domain, but only the maximum over a finite set of evaluation points that, for convenience, we will still call "quadrature points" and represent by an object of type Quadrature even though we do not actually perform any integration.

        -

        There is then the question of what points precisely we want to evaluate at. It turns out that the result we get depends quite sensitively on the "quadrature" points being used. There is also the issue of superconvergence: Finite element solutions are, on some meshes and for polynomial degrees $k\ge 2$, particularly accurate at the node points as well as at Gauss-Lobatto points, much more accurate than at randomly chosen points. (See [Li2019] and the discussion and references in Section 1.2 for more information on this.) In other words, if we are interested in finding the largest difference $u(\mathbf x)-u_h(\mathbf x)$, then we ought to look at points $\mathbf x$ that are specifically not of this "special" kind of points and we should specifically not use QGauss(fe->degree+1) to define where we evaluate. Rather, we use a special quadrature rule that is obtained by iterating the trapezoidal rule by the degree of the finite element times two plus one in each space direction. Note that the constructor of the QIterated class takes a one-dimensional quadrature rule and a number that tells it how often it shall repeat this rule in each space direction.

        +

        There is then the question of what points precisely we want to evaluate at. It turns out that the result we get depends quite sensitively on the "quadrature" points being used. There is also the issue of superconvergence: Finite element solutions are, on some meshes and for polynomial degrees $k\ge 2$, particularly accurate at the node points as well as at Gauss-Lobatto points, much more accurate than at randomly chosen points. (See [Li2019] and the discussion and references in Section 1.2 for more information on this.) In other words, if we are interested in finding the largest difference $u(\mathbf x)-u_h(\mathbf x)$, then we ought to look at points $\mathbf x$ that are specifically not of this "special" kind of points and we should specifically not use QGauss(fe->degree+1) to define where we evaluate. Rather, we use a special quadrature rule that is obtained by iterating the trapezoidal rule by the degree of the finite element times two plus one in each space direction. Note that the constructor of the QIterated class takes a one-dimensional quadrature rule and a number that tells it how often it shall repeat this rule in each space direction.

        Using this special quadrature rule, we can then try to find the maximal error on each cell. Finally, we compute the global L infinity error from the L infinity errors on each cell with a call to VectorTools::compute_global_error.

          const QTrapezoid<1> q_trapez;
          const QIterated<dim> q_iterated(q_trapez, fe->degree * 2 + 1);
        @@ -1245,10 +1245,10 @@

        One can see the error reduction upon grid refinement, and for the cases where global refinement was performed, also the convergence rates can be seen. The linear and quadratic convergence rates of Q1 and Q2 elements in the $H^1$ semi-norm can clearly be seen, as are the quadratic and cubic rates in the $L_2$ norm.

        Finally, the program also generated LaTeX versions of the tables (not shown here) that is written into a file in a way so that it could be copy-pasted into a LaTeX document.

        When is the error "small"?

        -

        What we showed above is how to determine the size of the error $\|u-u_h\|$ in a number of different norms. We did this primarily because we were interested in testing that our solutions converge. But from an engineering perspective, the question is often more practical: How fine do I have to make my mesh so that the error is "small enough"? In other words, if in the table above the $H^1$ semi-norm has been reduced to 4.121e-03, is this good enough for me to sign the blueprint and declare that our numerical simulation showed that the bridge is strong enough?

        -

        In practice, we are rarely in this situation because I can not typically compare the numerical solution $u_h$ against the exact solution $u$ in situations that matter – if I knew $u$, I would not have to compute $u_h$. But even if I could, the question to ask in general is then: 4.121e-03 what? The solution will have physical units, say kg-times-meter-squared, and I'm integrating a function with units square of the above over the domain, and then take the square root. So if the domain is two-dimensional, the units of $\|u-u_h\|_{L_2}$ are kg-times-meter-cubed. The question is then: Is $4.121\times 10^{-3}$ kg-times-meter-cubed small? That depends on what you're trying to simulate: If you're an astronomer used to masses measured in solar masses and distances in light years, then yes, this is a fantastically small number. But if you're doing atomic physics, then no: That's not small, and your error is most certainly not sufficiently small; you need a finer mesh.

        -

        In other words, when we look at these sorts of numbers, we generally need to compare against a "scale". One way to do that is to not look at the absolute error $\|u-u_h\|$ in whatever norm, but at the relative* error $\|u-u_h\|/\|u\|$. If this ratio is $10^{-5}$, then you know that on average, the difference between $u$ and $u_h$ is 0.001 per cent – probably small enough for engineering purposes.

        -

        How do we compute $\|u\|$? We just need to do an integration loop over all cells, quadrature points on these cells, and then sum things up and take the square root at the end. But there is a simpler way often used: You can call

        Vector<double> zero_vector (dof_handler.n_dofs());
        +

        What we showed above is how to determine the size of the error $\|u-u_h\|$ in a number of different norms. We did this primarily because we were interested in testing that our solutions converge. But from an engineering perspective, the question is often more practical: How fine do I have to make my mesh so that the error is "small enough"? In other words, if in the table above the $H^1$ semi-norm has been reduced to 4.121e-03, is this good enough for me to sign the blueprint and declare that our numerical simulation showed that the bridge is strong enough?

        +

        In practice, we are rarely in this situation because I can not typically compare the numerical solution $u_h$ against the exact solution $u$ in situations that matter – if I knew $u$, I would not have to compute $u_h$. But even if I could, the question to ask in general is then: 4.121e-03 what? The solution will have physical units, say kg-times-meter-squared, and I'm integrating a function with units square of the above over the domain, and then take the square root. So if the domain is two-dimensional, the units of $\|u-u_h\|_{L_2}$ are kg-times-meter-cubed. The question is then: Is $4.121\times 10^{-3}$ kg-times-meter-cubed small? That depends on what you're trying to simulate: If you're an astronomer used to masses measured in solar masses and distances in light years, then yes, this is a fantastically small number. But if you're doing atomic physics, then no: That's not small, and your error is most certainly not sufficiently small; you need a finer mesh.

        +

        In other words, when we look at these sorts of numbers, we generally need to compare against a "scale". One way to do that is to not look at the absolute error $\|u-u_h\|$ in whatever norm, but at the relative* error $\|u-u_h\|/\|u\|$. If this ratio is $10^{-5}$, then you know that on average, the difference between $u$ and $u_h$ is 0.001 per cent – probably small enough for engineering purposes.

        +

        How do we compute $\|u\|$? We just need to do an integration loop over all cells, quadrature points on these cells, and then sum things up and take the square root at the end. But there is a simpler way often used: You can call

        Vector<double> zero_vector (dof_handler.n_dofs());
        Vector<float> norm_per_cell(triangulation.n_active_cells());
        zero_vector,
        @@ -1256,7 +1256,7 @@
        norm_per_cell,
        QGauss<dim>(fe->degree + 1),
        -

        which computes $\|u-0\|_{L_2}$. Alternatively, if you're particularly lazy and don't feel like creating the zero_vector, you could use that if the mesh is not too coarse, then $\|u\| \approx \|u_h\|$, and we can compute $\|u\| \approx \|u_h\|=\|0-u_h\|$ by calling

        Vector<float> norm_per_cell(triangulation.n_active_cells());
        +

        which computes $\|u-0\|_{L_2}$. Alternatively, if you're particularly lazy and don't feel like creating the zero_vector, you could use that if the mesh is not too coarse, then $\|u\| \approx \|u_h\|$, and we can compute $\|u\| \approx \|u_h\|=\|0-u_h\|$ by calling

        Vector<float> norm_per_cell(triangulation.n_active_cells());
        solution,
        @@ -1270,11 +1270,11 @@

        Possibilities for extensions

        Higher Order Elements

        -

        Go ahead and run the program with higher order elements ( $Q_3$, $Q_4$, ...). You will notice that assertions in several parts of the code will trigger (for example in the generation of the filename for the data output). You might have to address these, but it should not be very hard to get the program to work!

        +

        Go ahead and run the program with higher order elements ( $Q_3$, $Q_4$, ...). You will notice that assertions in several parts of the code will trigger (for example in the generation of the filename for the data output). You might have to address these, but it should not be very hard to get the program to work!

        Convergence Comparison

        -

        Is $Q_1$ or $Q_2$ better? What about adaptive versus global refinement? A (somewhat unfair but typical) metric to compare them, is to look at the error as a function of the number of unknowns.

        -

        To see this, create a plot in log-log style with the number of unknowns on the $x$ axis and the $L_2$ error on the $y$ axis. You can add reference lines for $h^2=N^{-1}$ and $h^3=N^{-3/2}$ and check that global and adaptive refinement follow those. If one makes the (not completely unreasonable) assumption that with a good linear solver, the computational effort is proportional to the number of unknowns $N$, then it is clear that an error reduction of ${\cal O}(N^{-3/2})$ is substantially better than a reduction of the form ${\cal O}(N^{-1})$: That is, that adaptive refinement gives us the desired error level with less computational work than if we used global refinement. This is not a particularly surprising conclusion, but it's worth checking these sorts of assumptions in practice.

        -

        Of course, a fairer comparison would be to plot runtime (switch to release mode first!) instead of number of unknowns on the $x$ axis. If you plotted run time (check out the Timer class!) against the number of unknowns by timing each refinement step, you will notice that the linear system solver we use in this program is not perfect – its run time grows faster than proportional to the linear system size – and picking a better linear solver might be appropriate for this kind of comparison.

        +

        Is $Q_1$ or $Q_2$ better? What about adaptive versus global refinement? A (somewhat unfair but typical) metric to compare them, is to look at the error as a function of the number of unknowns.

        +

        To see this, create a plot in log-log style with the number of unknowns on the $x$ axis and the $L_2$ error on the $y$ axis. You can add reference lines for $h^2=N^{-1}$ and $h^3=N^{-3/2}$ and check that global and adaptive refinement follow those. If one makes the (not completely unreasonable) assumption that with a good linear solver, the computational effort is proportional to the number of unknowns $N$, then it is clear that an error reduction of ${\cal O}(N^{-3/2})$ is substantially better than a reduction of the form ${\cal O}(N^{-1})$: That is, that adaptive refinement gives us the desired error level with less computational work than if we used global refinement. This is not a particularly surprising conclusion, but it's worth checking these sorts of assumptions in practice.

        +

        Of course, a fairer comparison would be to plot runtime (switch to release mode first!) instead of number of unknowns on the $x$ axis. If you plotted run time (check out the Timer class!) against the number of unknowns by timing each refinement step, you will notice that the linear system solver we use in this program is not perfect – its run time grows faster than proportional to the linear system size – and picking a better linear solver might be appropriate for this kind of comparison.

        To see how a comparison of this kind could work, take a look at [KronbichlerWall2018] , and specifically Figure 5 that illustrates the error as a function of compute time for a number of polynomial degrees (as well as a number of different ways to discretize the equation used there).

        The plain program

        /* ---------------------------------------------------------------------
        /usr/share/doc/packages/dealii/doxygen/deal.II/step_70.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_70.html 2024-01-30 03:04:55.260904810 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_70.html 2024-01-30 03:04:55.260904810 +0000 @@ -141,34 +141,34 @@
        Note
        If you use this program as a basis for your own work, please consider citing it in your list of references. The initial version of this work was contributed to the deal.II project by the authors listed in the following citation: 10.5281/zenodo.3829064

        Introduction

        Massively parallel non-matching grid simulations of fluid structure interaction problems

        -

        In this tutorial we consider a mixing problem in the laminar flow regime. Such problems occur in a wide range of applications ranging from chemical engineering to power generation (e.g. turbomachinery). Mixing problems are particularly hard to solve numerically, because they often involve a container (with fixed boundaries, and possibly complex geometries such as baffles), represented by the domain $\Omega$, and one (or more) immersed and rotating impellers (represented by the domain $\Omega^{\text{imp}}$). The domain in which we would like to solve the flow equations is the (time dependent) difference between the two domains, namely: $\Omega\setminus\Omega^{\text{imp}}$.

        +

        In this tutorial we consider a mixing problem in the laminar flow regime. Such problems occur in a wide range of applications ranging from chemical engineering to power generation (e.g. turbomachinery). Mixing problems are particularly hard to solve numerically, because they often involve a container (with fixed boundaries, and possibly complex geometries such as baffles), represented by the domain $\Omega$, and one (or more) immersed and rotating impellers (represented by the domain $\Omega^{\text{imp}}$). The domain in which we would like to solve the flow equations is the (time dependent) difference between the two domains, namely: $\Omega\setminus\Omega^{\text{imp}}$.

        For rotating impellers, the use of Arbitrary Lagrangian Eulerian formulations (in which the fluid domain – along with the mesh! – is smoothly deformed to follow the deformations of the immersed solid) is not possible, unless only small times (i.e., small fluid domain deformations) are considered. If one wants to track the evolution of the flow across multiple rotations of the impellers, the resulting deformed grid would simply be too distorted to be useful.

        In this case, a viable alternative strategy would be to use non-matching methods (similarly to what we have done in step-60), where a background fixed grid (that may or may not be locally refined in time to better capture the solid motion) is coupled with a rotating, independent, grid.

        -

        In order to maintain the same notations used in step-60, we use $\Omega$ to denote the domain in ${\mathbb R}^{\text{spacedim}}$ representing the container of both the fluid and the impeller, and we use $\Gamma$ in ${\mathbb R}^{\text{dim}}$ to denote either the full impeller (when its spacedim measure is non-negligible, i.e., when we can represent it as a grid of dimension dim equal to spacedim), a co-dimension one representation of a thin impeller, or just the boundary of the full impeller.

        -

        The domain $\Gamma$ is embedded in $\Omega$ ( $\Gamma \subseteq \Omega$) and it is non-matching: It does not, in general, align with any of the features of the volume mesh. We solve a partial differential equation on $\Omega$, enforcing some conditions on the solution of the problem on the embedded domain $\Gamma$ by some penalization techniques. In the current case, the condition is that the velocity of the fluid at points on $\Gamma$ equal the velocity of the solid impeller at that point.

        +

        In order to maintain the same notations used in step-60, we use $\Omega$ to denote the domain in ${\mathbb R}^{\text{spacedim}}$ representing the container of both the fluid and the impeller, and we use $\Gamma$ in ${\mathbb R}^{\text{dim}}$ to denote either the full impeller (when its spacedim measure is non-negligible, i.e., when we can represent it as a grid of dimension dim equal to spacedim), a co-dimension one representation of a thin impeller, or just the boundary of the full impeller.

        +

        The domain $\Gamma$ is embedded in $\Omega$ ( $\Gamma \subseteq \Omega$) and it is non-matching: It does not, in general, align with any of the features of the volume mesh. We solve a partial differential equation on $\Omega$, enforcing some conditions on the solution of the problem on the embedded domain $\Gamma$ by some penalization techniques. In the current case, the condition is that the velocity of the fluid at points on $\Gamma$ equal the velocity of the solid impeller at that point.

        The technique we describe here is presented in the literature using one of many names: the immersed finite element method and the fictitious boundary method among others. The main principle is that the discretization of the two grids are kept completely independent. In the present tutorial, this approach is used to solve for the motion of a viscous fluid, described by the Stokes equation, that is agitated by a rigid non-deformable impeller.

        -

        Thus, the equations solved in $\Omega$ are the Stokes equations for a creeping flow (i.e. a flow where $\text{Re}\rightarrow 0$) and a no-slip boundary condition is applied on the moving embedded domain $\Gamma$ associated with the impeller. However, this tutorial could be readily extended to other equations (e.g. the Navier-Stokes equations, linear elasticity equation, etc.). It can be seen as a natural extension of step-60 that enables the solution of large problems using a distributed parallel computing architecture via MPI.

        +

        Thus, the equations solved in $\Omega$ are the Stokes equations for a creeping flow (i.e. a flow where $\text{Re}\rightarrow 0$) and a no-slip boundary condition is applied on the moving embedded domain $\Gamma$ associated with the impeller. However, this tutorial could be readily extended to other equations (e.g. the Navier-Stokes equations, linear elasticity equation, etc.). It can be seen as a natural extension of step-60 that enables the solution of large problems using a distributed parallel computing architecture via MPI.

        However, contrary to step-60, the Dirichlet boundary conditions on $\Gamma$ are imposed weakly instead of through the use of Lagrange multipliers, and we concentrate on dealing with the coupling of two fully distributed triangulations (a combination that was not possible in the implementation of step-60).

        There are two interesting scenarios that occur when one wants to enforce conditions on the embedded domain $\Gamma$:

          -
        • The geometrical dimension dim of the embedded domain $\Gamma$ is the same of the domain $\Omega$ (spacedim), that is, the spacedim-dimensional measure of $\Gamma$ is not zero. In this case, the imposition of the Dirichlet boundary boundary condition on $\Gamma$ is done through a volumetric penalization. If the applied penalization only depends on the velocity, this is often referred to as $\mathcal{L}^2$ penalization whereas if the penalization depends on both the velocity and its gradient, it is an $\mathcal{H}^1$ penalization. The case of the $\mathcal{L}^2$ penalization is very similar to a Darcy-type approach. Both $\mathcal{L}^2$ and $\mathcal{H}^1$ penalizations have been analyzed extensively (see, for example, [Angot1999]).
        • -
        • The embedded domain $\Gamma$ has an intrinsic dimension dim which is smaller than that of $\Omega$ (spacedim), thus its spacedim-dimensional measure is zero; for example it is a curve embedded in a two dimensional domain, or a surface embedded in a three-dimensional domain. This is of course physically impossible, but one may consider very thin sheets of metal moving in a fluid as essentially lower-dimensional if the thickness of the sheet is negligible. In this case, the boundary condition is imposed weakly on $\Gamma$ by applying the Nitsche method (see [Freund1995]).
        • +
        • The geometrical dimension dim of the embedded domain $\Gamma$ is the same of the domain $\Omega$ (spacedim), that is, the spacedim-dimensional measure of $\Gamma$ is not zero. In this case, the imposition of the Dirichlet boundary boundary condition on $\Gamma$ is done through a volumetric penalization. If the applied penalization only depends on the velocity, this is often referred to as $\mathcal{L}^2$ penalization whereas if the penalization depends on both the velocity and its gradient, it is an $\mathcal{H}^1$ penalization. The case of the $\mathcal{L}^2$ penalization is very similar to a Darcy-type approach. Both $\mathcal{L}^2$ and $\mathcal{H}^1$ penalizations have been analyzed extensively (see, for example, [Angot1999]).
        • +
        • The embedded domain $\Gamma$ has an intrinsic dimension dim which is smaller than that of $\Omega$ (spacedim), thus its spacedim-dimensional measure is zero; for example it is a curve embedded in a two dimensional domain, or a surface embedded in a three-dimensional domain. This is of course physically impossible, but one may consider very thin sheets of metal moving in a fluid as essentially lower-dimensional if the thickness of the sheet is negligible. In this case, the boundary condition is imposed weakly on $\Gamma$ by applying the Nitsche method (see [Freund1995]).

        Both approaches have very similar requirements and result in highly similar formulations. Thus, we treat them almost in the same way.

        -

        In this tutorial program we are not interested in further details on $\Gamma$: we assume that the dimension of the embedded domain (dim) is always smaller by one or equal with respect to the dimension of the embedding domain $\Omega$ (spacedim).

        -

        We are going to solve the following differential problem: given a sufficiently regular function $g$ on $\Gamma$, find the solution $(\textbf{u},p)$ to

        +

        In this tutorial program we are not interested in further details on $\Gamma$: we assume that the dimension of the embedded domain (dim) is always smaller by one or equal with respect to the dimension of the embedding domain $\Omega$ (spacedim).

        +

        We are going to solve the following differential problem: given a sufficiently regular function $g$ on $\Gamma$, find the solution $(\textbf{u},p)$ to

        -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   -\Delta \mathbf{u} + \nabla p &=& 0,\\
   -\nabla \cdot \textbf{u} &=& 0,\\
   \textbf{u} &=& \textbf{g}  \text{ in } \Gamma,\\
   \textbf{u} &=& 0 \text{ on } \partial\Omega.
-\end{eqnarray*} +\end{eqnarray*}" src="form_6744.png"/>

        -

        This equation, which we have normalized by scaling the time units in such a way that the viscosity has a numerical value of 1, describes slow, viscous flow such as honey or lava. The main goal of this tutorial is to show how to impose the velocity field condition $\mathbf{u} = \mathbf{g}$ on a non-matching $\Gamma$ in a weak way, using a penalization method. A more extensive discussion of the Stokes problem including body forces, different boundary conditions, and solution strategies can be found in step-22.

        -

        Let us start by considering the Stokes problem alone, in the entire domain $\Omega$. We look for a velocity field $\mathbf{u}$ and a pressure field $p$ that satisfy the Stokes equations with homogeneous boundary conditions on $\partial\Omega$.

        +

        This equation, which we have normalized by scaling the time units in such a way that the viscosity has a numerical value of 1, describes slow, viscous flow such as honey or lava. The main goal of this tutorial is to show how to impose the velocity field condition $\mathbf{u} = \mathbf{g}$ on a non-matching $\Gamma$ in a weak way, using a penalization method. A more extensive discussion of the Stokes problem including body forces, different boundary conditions, and solution strategies can be found in step-22.

        +

        Let us start by considering the Stokes problem alone, in the entire domain $\Omega$. We look for a velocity field $\mathbf{u}$ and a pressure field $p$ that satisfy the Stokes equations with homogeneous boundary conditions on $\partial\Omega$.

        The weak form of the Stokes equations is obtained by first writing it in vector form as

        -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   \begin{pmatrix}
     {-\Delta \textbf{u} + \nabla p}
     \\
@@ -180,33 +180,33 @@
   \\
   0
   \end{pmatrix},
-\end{eqnarray*} +\end{eqnarray*}" src="form_6746.png"/>

        -

        forming the dot product from the left with a vector-valued test function $\phi = \begin{pmatrix}\textbf{v} \\ q\end{pmatrix}$, and integrating over the domain $\Omega$, yielding the following set of equations:

        -\begin{eqnarray*}
+<p> forming the dot product from the left with a vector-valued test function <picture><source srcset=$\phi = \begin{pmatrix}\textbf{v} \\ q\end{pmatrix}$, and integrating over the domain $\Omega$, yielding the following set of equations:

        +\begin{eqnarray*}
   (\mathrm v,
    -\Delta \textbf{u} + \nabla p)_{\Omega}
   -
   (q,\textrm{div}\; \textbf{u})_{\Omega}
   =
   0
-\end{eqnarray*} +\end{eqnarray*}" src="form_6747.png"/>

        -

        which has to hold for all test functions $\phi = \begin{pmatrix}\textbf{v}
-\\ q\end{pmatrix}$.

        +

        which has to hold for all test functions $\phi = \begin{pmatrix}\textbf{v}
+\\ q\end{pmatrix}$.

        Integrating by parts and exploiting the boundary conditions on $\partial\Omega$, we obtain the following variational problem:

        -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
 (\nabla \textbf{v}, \nabla \textbf{u})_{\Omega} - (\textrm{div}\; \textbf{v}, p)_{\Omega}
  - (q, \textrm{div}\; \textbf{u})_{\Omega}&=& 0
-\end{eqnarray*} +\end{eqnarray*}" src="form_6748.png"/>

        where $(\cdot, \cdot)_{\Omega}$ represents the $L^2$ scalar product. This is the same variational form used in step-22.

        -

        This variational formulation does not take into account the embedded domain. Contrary to step-60, we do not enforce strongly the constraints of $\textbf{u}$ on $\Gamma$, but enforce them weakly via a penalization term.

        +

        This variational formulation does not take into account the embedded domain. Contrary to step-60, we do not enforce strongly the constraints of $\textbf{u}$ on $\Gamma$, but enforce them weakly via a penalization term.

        The analysis of this weak imposition of the boundary condition depends on the spacedim-dimensional measure of $\Gamma$ as either positive (if dim is equal to spacedim) or zero (if dim is smaller than spacedim). We discuss both scenarios.

        Co-dimension one case

        In this case, we assume that $\Gamma$ is the boundary of the actual impeller, that is, a closed curve embedded in a two-dimensional domain or a closed surface in a three-dimensional domain. The idea of this method starts by considering a weak imposition of the Dirichlet boundary condition on $\Gamma$, following the Nitsche method. This is achieved by using the following modified formulation on the fluid domain, where no strong conditions on the test functions on $\Gamma$ are imposed:

        -\begin{multline*}
+<picture><source srcset=\begin{multline*}
 (\nabla \textbf{v}, \nabla \textbf{u})_{\Omega\setminus\Omega^{\text{imp}}} - (\textrm{div}\;  \textbf{v}, p)_{\Omega\setminus\Omega^{\text{imp}}}
   - (q, \textrm{div}\; \textbf{u})_{\Omega\setminus\Omega^{\text{imp}}} \\
   - (\textbf{v},\nabla \textbf{u} \cdot \textbf{n})_{\Gamma}
@@ -216,14 +216,14 @@
  + \beta (\textbf{v},\textbf{u})_{\Gamma} \\
 =  - (\nabla\textbf{v}\cdot \textbf{n},\textbf{g})_{\Gamma} + (q, \textbf{g} \cdot \textbf{n})_{\Gamma}
  + \beta (\textbf{v},\textbf{g})_{\Gamma}.
-\end{multline*} +\end{multline*}" src="form_6749.png"/>

        -

        The integrals over $\Gamma$ are lower-dimensional integrals. It can be shown (see [Freund1995]) that there exists a positive constant $C_1$ so that if $\beta > C_1$, the weak imposition of the boundary will be consistent and stable. The first two additional integrals on $\Gamma$ (the second line in the equation above) appear naturally after integrating by parts, when one does not assume that $\mathbf{v}$ is zero on $\Gamma$.

        -

        The third line in the equation above contains two terms that are added to ensure consistency of the weak form, and a stabilization term, that is there to enforce the boundary condition with an error which is consistent with the approximation error. The consistency terms and the stabilization term are added to the right hand side with the actual boundary data $\mathbf{g}$.

        -

        When $\mathbf{u}$ satisfies the condition $\mathbf{u}=\mathbf{g}$ on $\Gamma$, all the consistency and stability integrals on $\Gamma$ cancel out, and one is left with the usual weak form of Stokes flow, that is, the above formulation is consistent.

        +

        The integrals over $\Gamma$ are lower-dimensional integrals. It can be shown (see [Freund1995]) that there exists a positive constant $C_1$ so that if $\beta > C_1$, the weak imposition of the boundary will be consistent and stable. The first two additional integrals on $\Gamma$ (the second line in the equation above) appear naturally after integrating by parts, when one does not assume that $\mathbf{v}$ is zero on $\Gamma$.

        +

        The third line in the equation above contains two terms that are added to ensure consistency of the weak form, and a stabilization term, that is there to enforce the boundary condition with an error which is consistent with the approximation error. The consistency terms and the stabilization term are added to the right hand side with the actual boundary data $\mathbf{g}$.

        +

        When $\mathbf{u}$ satisfies the condition $\mathbf{u}=\mathbf{g}$ on $\Gamma$, all the consistency and stability integrals on $\Gamma$ cancel out, and one is left with the usual weak form of Stokes flow, that is, the above formulation is consistent.

        We note that an alternative (non-symmetric) formulation can be used :

        -\begin{multline*}
+<picture><source srcset=\begin{multline*}
 (\nabla \textbf{v}, \nabla \textbf{u})_{\Omega\setminus\Omega^{\text{imp}}} -  (\textrm{div}\;  \textbf{v}, p)_{\Omega\setminus\Omega^{\text{imp}}}
   - (q, \textrm{div}\; \textbf{u})_{\Omega\setminus\Omega^{\text{imp}}} \\
   -(\textbf{v},\nabla \textbf{u} \cdot \textbf{n})_{\Gamma}
@@ -233,13 +233,13 @@
  + \beta (\textbf{v},\textbf{u})_{\Gamma} \\
 =   (\nabla\textbf{v}\cdot \textbf{n},\textbf{g})_{\Gamma} - (q, \textbf{g} \cdot \textbf{n})_{\Gamma}
  + \beta (\textbf{v},\textbf{g})_{\Gamma}.
-\end{multline*} +\end{multline*}" src="form_6753.png"/>

        -

        Note the different sign of the first terms on the third and fourth lines. In this case, the stability and consistency conditions become $\beta > 0$. In the symmetric case, the value of $\beta$ is dependent on $h$, and it is in general chosen such that $\beta = C h^{-1} $ with $h$ a measure of size of the face being integrated and $C$ a constant such that $1 \leq C \leq 10$. This is as one usually does with the Nitsche penalty method to enforcing Dirichlet boundary conditions.

        +

        Note the different sign of the first terms on the third and fourth lines. In this case, the stability and consistency conditions become $\beta > 0$. In the symmetric case, the value of $\beta$ is dependent on $h$, and it is in general chosen such that $\beta = C h^{-1} $ with $h$ a measure of size of the face being integrated and $C$ a constant such that $1 \leq C \leq 10$. This is as one usually does with the Nitsche penalty method to enforcing Dirichlet boundary conditions.

        The non-symmetric approach, on the other hand, is related to how one enforced continuity for the non-symmetric interior penalty method for discontinuous Galerkin methods (the "NIPG" method [Riviere1999]). Even if the non-symmetric case seems advantageous w.r.t. possible choices of stabilization parameters, we opt for the symmetric discretization, since in this case it can be shown that the dual problem is also consistent, leading to a solution where not only the energy norm of the solution converges with the correct order, but also its $L^2$ norm. Furthermore, the resulting matrix remains symmetric.

        -

        The above formulation works under the assumption that the domain is discretized exactly. However, if the deformation of the impeller is a rigid body motion, it is possible to artificially extend the solution of the Stokes problem inside the propeller itself, since a rigid body motion is also a solution to the Stokes problem. The idea is then to solve the same problem, inside $\Omega^{\text{imp}}$, imposing the same boundary conditions on $\Gamma$, using the same penalization technique, and testing with test functions $\mathbf{v}$ which are globally continuous over $\Omega$.

        +

        The above formulation works under the assumption that the domain is discretized exactly. However, if the deformation of the impeller is a rigid body motion, it is possible to artificially extend the solution of the Stokes problem inside the propeller itself, since a rigid body motion is also a solution to the Stokes problem. The idea is then to solve the same problem, inside $\Omega^{\text{imp}}$, imposing the same boundary conditions on $\Gamma$, using the same penalization technique, and testing with test functions $\mathbf{v}$ which are globally continuous over $\Omega$.

        This results in the following (intermediate) formulation:

        -\begin{multline*}
+<picture><source srcset=\begin{multline*}
 (\nabla \textbf{v}, \nabla \textbf{u})_{\Omega} - (\textrm{div}\;  \textbf{v}, p)_{\Omega}
   - (q, \textrm{div}\; \textbf{u})_{\Omega} \\
   - (\textbf{v},  \lbrack \nabla \textbf{u} \rbrack \cdot \textbf{n})_{\Gamma}
@@ -249,61 +249,61 @@
  + 2\beta (\textbf{v},\textbf{u})_{\Gamma} \\
 =  - (\lbrack \nabla\textbf{v}\rbrack\cdot \textbf{n},\textbf{g})_{\Gamma} + (\lbrack q\rbrack, \textbf{g} \cdot n)_{\Gamma}
  + 2\beta (\textbf{v},\textbf{g})_{\Gamma},
-\end{multline*} +\end{multline*}" src="form_6757.png"/>

        -

        where the jump terms, denoted with $\lbrack \cdot \rbrack$, are computed with respect to a fixed orientation of the normal vector $\textbf{n}$. The factor of 2 appears in front of $\beta$ since we see every part of $\Gamma$ twice, once from within the fluid and once from within the obstacle moving around in it. (For all of the other integrals over $\Gamma$, we visit each part of $\Gamma$ twice, but with opposite signs, and consequently get the jump terms.)

        +

        where the jump terms, denoted with $\lbrack \cdot \rbrack$, are computed with respect to a fixed orientation of the normal vector $\textbf{n}$. The factor of 2 appears in front of $\beta$ since we see every part of $\Gamma$ twice, once from within the fluid and once from within the obstacle moving around in it. (For all of the other integrals over $\Gamma$, we visit each part of $\Gamma$ twice, but with opposite signs, and consequently get the jump terms.)

        Here we notice that, unlike in discontinuous Galerkin methods, the test and trial functions are continuous across $\Gamma$. Moreover, if $\Gamma$ is not aligned with cell boundaries, all the jump terms are also zero, since, in general, finite element function spaces are smooth inside each cell, and if $\Gamma$ cuts through an element intersecting its boundary only at a finite number of points, all the contributions on $\Gamma$, with the exception of the stabilization ones, can be neglected from the formulation, resulting in the following final form of the variational formulation:

        -\begin{multline*}
+<picture><source srcset=\begin{multline*}
 (\nabla \textbf{v}, \nabla \textbf{u})_{\Omega} - (\textrm{div}\;  \textbf{v}, p)_{\Omega}
   - (q, \textrm{div}\; \textbf{u})_{\Omega}  + 2\beta (\textbf{v},\textbf{u})_{\Gamma} \\
 =  2\beta (\textbf{v},\textbf{g})_{\Gamma}.
-\end{multline*} +\end{multline*}" src="form_6760.png"/>

        -

        In step-60, the imposition of the constraint required the addition of new variables in the form of Lagrange multipliers. This is not the case for this tutorial program. The imposition of the boundary condition using Nitsche's method only modifies the system matrix and the right-hand side without adding additional unknowns. However, the velocity vector $\textbf{u}$ on the embedded domain will not match exactly the prescribed velocity $\textbf{g}$, but only up to a numerical error which is in the same order as the interpolation error of the finite element method. Furthermore, as in step-60, we still need to integrate over the non-matching embedded grid in order to construct the boundary term necessary to impose the boundary condition over $\Gamma$.

        +

        In step-60, the imposition of the constraint required the addition of new variables in the form of Lagrange multipliers. This is not the case for this tutorial program. The imposition of the boundary condition using Nitsche's method only modifies the system matrix and the right-hand side without adding additional unknowns. However, the velocity vector $\textbf{u}$ on the embedded domain will not match exactly the prescribed velocity $\textbf{g}$, but only up to a numerical error which is in the same order as the interpolation error of the finite element method. Furthermore, as in step-60, we still need to integrate over the non-matching embedded grid in order to construct the boundary term necessary to impose the boundary condition over $\Gamma$.

        Co-dimension zero case

        -

        In this case, $\Gamma$ has the same dimension, but is embedded into $\Omega$. We can think of this as a thick object moving around in the fluid. In the case of $\mathcal{L}^2$ penalization, the additional penalization term can be interpreted as a Darcy term within $\Gamma$, resulting in:

        +

        In this case, $\Gamma$ has the same dimension, but is embedded into $\Omega$. We can think of this as a thick object moving around in the fluid. In the case of $\mathcal{L}^2$ penalization, the additional penalization term can be interpreted as a Darcy term within $\Gamma$, resulting in:

        -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
 (\nabla \textbf{v}, \nabla \textbf{u})_{\Omega} - & (\textrm{div}\;  \textbf{v}, p)_{\Omega}
   - (q, \textrm{div}\; \textbf{u})_{\Omega}  + \beta (\textbf{v},\textbf{u})_{\Gamma}
 =  \beta (\textbf{v},\textbf{g})_{\Gamma}.
-\end{eqnarray*} +\end{eqnarray*}" src="form_6762.png"/>

        -

        Here, integrals over $\Gamma$ are simply integrals over a part of the volume. The $\mathcal{L}^2$ penalization thus consists in adding a volumetric term that constrains the velocity of the fluid to adhere to the velocity of the rigid body within $\Gamma$. Also in this case, $\beta$ must be chosen sufficiently large in order to ensure that the Dirichlet boundary condition in $\Gamma$ is sufficiently respected, but not too high in order to maintain the proper conditioning of the system matrix.

        -

        A $\mathcal{H}^1$ penalization may be constructed in a similar manner, with the addition of a viscous component to the penalization that dampens the velocity gradient within $\Gamma$:

        +

        Here, integrals over $\Gamma$ are simply integrals over a part of the volume. The $\mathcal{L}^2$ penalization thus consists in adding a volumetric term that constrains the velocity of the fluid to adhere to the velocity of the rigid body within $\Gamma$. Also in this case, $\beta$ must be chosen sufficiently large in order to ensure that the Dirichlet boundary condition in $\Gamma$ is sufficiently respected, but not too high in order to maintain the proper conditioning of the system matrix.

        +

        A $\mathcal{H}^1$ penalization may be constructed in a similar manner, with the addition of a viscous component to the penalization that dampens the velocity gradient within $\Gamma$:

        -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
 (\nabla \textbf{v}, \nabla \textbf{u})_{\Omega} - & (\textrm{div}\;  \textbf{v}, p)_{\Omega}
   - (q, \textrm{div}\; \textbf{u})_{\Omega}
   + \beta_1 (\textbf{v},\textbf{u})_{\Gamma}
   + \beta_2 (\nabla \textbf{v}, \nabla \textbf{u})_{\Gamma}
 =  \beta_1 (\textbf{v},\textbf{g})_{\Gamma}
 + \beta_2 (\nabla \textbf{v}, \nabla \textbf{g})_{\Gamma}.
-\end{eqnarray*} +\end{eqnarray*}" src="form_6763.png"/>

        Notice that the $L^2$ penalization (dim equal to spacedim) and the Nitsche penalization (dim equal to spacedim-1) result in the exact same numerical implementation, thanks to the dimension independent capabilities of deal.II.

        Representation of Ω and Γ

        In this tutorial, both the embedded grid $\Gamma$ and the embedding grid are described using a parallel::distributed::Triangulation. These two triangulations can be built from functions in the GridGenerator namespace or by reading a mesh file produced with another application (e.g. GMSH, see the discussion in step-49). This is slightly more general than what was previously done in step-60.

        The addition of the immersed boundary method, whether it is in the dim=spacedim or dim<spacedim case, only introduces additional terms in the system matrix and the right-hand side of the system which result from the integration over $\Gamma$. This does not modify the number of variables for which the problem must be solved. The challenge is thus related to the integrals that must be carried over $\Gamma$.

        -

        As usual in finite elements we split this integral into contributions from all cells of the triangulation used to discretize $\Gamma$, we transform the integral on $K$ to an integral on the reference element $\hat K$, where $F_{K}$ is the mapping from $\hat K$ to $K$, and compute the integral on $\hat K$ using a quadrature formula. For example:

        +

        As usual in finite elements we split this integral into contributions from all cells of the triangulation used to discretize $\Gamma$, we transform the integral on $K$ to an integral on the reference element $\hat K$, where $F_{K}$ is the mapping from $\hat K$ to $K$, and compute the integral on $\hat K$ using a quadrature formula. For example:

        -\[
+<picture><source srcset=\[
 \beta (\textbf{v},\textbf{u})_{\Gamma} =  \sum_{K\in \Gamma} \int_{\hat K}
 \hat{\textbf{u}}(\hat x) (\textbf{v} \circ F_{K}) (\hat x) J_K (\hat x) \mathrm{d} \hat x =
 \sum_{K\in \Gamma} \sum_{i=1}^{n_q}  \big(\hat{\textbf{u}}(\hat x_i)  (\textbf{v} \circ F_{K}) (\hat x_i) J_K (\hat x_i) w_i \big)
-\] +\]" src="form_6764.png"/>

        -

        Computing this sum is non-trivial because we have to evaluate $(v_j \circ F_{K})
-(\hat x_i)$. In general, if $\Gamma$ and $\Omega$ are not aligned, the point $y_i = F_{K}(\hat x_i)$ is completely arbitrary with respect to $\Omega$, and unless we figure out a way to interpolate all basis functions of $V_h(\Omega)$ on an arbitrary point on $\Omega$, we cannot compute the integral needed.

        -

        To evaluate $(v_j \circ F_{K}) (\hat x_i)$ the following steps needs to be taken (as shown in the picture below):

        +

        Computing this sum is non-trivial because we have to evaluate $(v_j \circ F_{K})
/usr/share/doc/packages/dealii/doxygen/deal.II/step_71.html differs (HTML document, UTF-8 Unicode text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_71.html	2024-01-30 03:04:55.432906243 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_71.html	2024-01-30 03:04:55.432906243 +0000
@@ -149,7 +149,7 @@
 </p>
 <p><em>This program was contributed by Jean-Paul Pelteret. </em></p>
 <p><a class=

        Introduction

        -

        The aim of this tutorial is, quite simply, to introduce the fundamentals of both automatic and symbolic differentiation (respectively abbreviated as AD and SD): Ways in which one can, in source code, describe a function $\mathbf f(\mathbf x)$ and automatically also obtain a representation of derivatives $\nabla \mathbf f(\mathbf x)$ (the "Jacobian"), $\nabla^2 \mathbf f(\mathbf x)$ (the "Hessian"), etc., without having to write additional lines of code. Doing this is quite helpful in solving nonlinear or optimization problems where one would like to only describe the nonlinear equation or the objective function in the code, without having to also provide their derivatives (which are necessary for a Newton method for solving a nonlinear problem, or for finding a minimizer).

        +

        The aim of this tutorial is, quite simply, to introduce the fundamentals of both automatic and symbolic differentiation (respectively abbreviated as AD and SD): Ways in which one can, in source code, describe a function $\mathbf f(\mathbf x)$ and automatically also obtain a representation of derivatives $\nabla \mathbf f(\mathbf x)$ (the "Jacobian"), $\nabla^2 \mathbf f(\mathbf x)$ (the "Hessian"), etc., without having to write additional lines of code. Doing this is quite helpful in solving nonlinear or optimization problems where one would like to only describe the nonlinear equation or the objective function in the code, without having to also provide their derivatives (which are necessary for a Newton method for solving a nonlinear problem, or for finding a minimizer).

        Since AD and SD tools are somewhat independent of finite elements and boundary value problems, this tutorial is going to be different to the others that you may have read beforehand. It will focus specifically on how these frameworks work and the principles and thinking behind them, and will forgo looking at them in the direct context of a finite element simulation.

        We will, in fact, look at two different sets of problems that have greatly different levels of complexity, but when framed properly hold sufficient similarity that the same AD and SD frameworks can be leveraged. With these examples the aim is to build up an understanding of the steps that are required to use the AD and SD tools, the differences between them, and hopefully identify where they could be immediately be used in order to improve or simplify existing code.

        It's plausible that you're wondering what AD and SD are, in the first place. Well, that question is easy to answer but without context is not very insightful. So we're not going to cover that in this introduction, but will rather defer this until the first introductory example where we lay out the key points as this example unfolds. To complement this, we should mention that the core theory for both frameworks is extensively discussed in the Automatic and symbolic differentiation module, so it bears little repeating here.

        @@ -166,7 +166,7 @@

        Thermodynamic principles

        As a prelude to introducing the coupled magneto-mechanical material law that we'll use to model a magneto-active polymer, we'll start with a very concise summary of the salient thermodynamics to which these constitutive laws must subscribe. The basis for the theory, as summarized here, is described in copious detail by Truesdell and Toupin [Truesdell1960a] and Coleman and Noll [Coleman1963a], and follows the logic laid out by Holzapfel [Holzapfel2007a].

        Starting from the first law of thermodynamics, and following a few technical assumptions, it can be shown the balance between the kinetic plus internal energy rates and the power supplied to the system from external sources is given by the following relationship that equates the rate of change of the energy in an (arbitrary) volume $V$ on the left, and the sum of forces acting on that volume on the right:

        -\[
+<picture><source srcset=\[
   D_{t} \int\limits_{V} \left[
     \frac{1}{2} \rho_{0} \mathbf{v} \cdot \mathbf{v}
     + U^{*}_{0} \right] dV
@@ -178,31 +178,31 @@
   - D_{t} M^{*}_{0}
   - \nabla_{0} \cdot \mathbf{Q}
   + R_{0} \right] dV .
-\] +\]" src="form_6777.png"/>

        -

        Here $D_{t}$ represents the total time derivative, $\rho_{0}$ is the material density as measured in the Lagrangian reference frame, $\mathbf{v}$ is the material velocity and $\mathbf{a}$ its acceleration, $U^{*}_{0}$ is the internal energy per unit reference volume, $\mathbf{P}^{\text{tot}}$ is the total Piola stress tensor and $\dot{\mathbf{F}}$ is the time rate of the deformation gradient tensor, $\boldsymbol{\mathbb{H}}$ and $\boldsymbol{\mathbb{B}}$ are, respectively, the magnetic field vector and the magnetic induction (or magnetic flux density) vector, $\mathbb{E}$ and $\mathbb{D}$ are the electric field vector and electric displacement vector, and $\mathbf{Q}$ and $R_{0}$ represent the referential thermal flux vector and thermal source. The material differential operator $\nabla_{0} (\bullet) \dealcoloneq \frac{d(\bullet)}{d\mathbf{X}}$ where $\mathbf{X}$ is the material position vector. With some rearrangement of terms, invoking the arbitrariness of the integration volume $V$, the total internal energy density rate $\dot{E}_{0}$ can be identified as

        -\[
+<p> Here <picture><source srcset=$D_{t}$ represents the total time derivative, $\rho_{0}$ is the material density as measured in the Lagrangian reference frame, $\mathbf{v}$ is the material velocity and $\mathbf{a}$ its acceleration, $U^{*}_{0}$ is the internal energy per unit reference volume, $\mathbf{P}^{\text{tot}}$ is the total Piola stress tensor and $\dot{\mathbf{F}}$ is the time rate of the deformation gradient tensor, $\boldsymbol{\mathbb{H}}$ and $\boldsymbol{\mathbb{B}}$ are, respectively, the magnetic field vector and the magnetic induction (or magnetic flux density) vector, $\mathbb{E}$ and $\mathbb{D}$ are the electric field vector and electric displacement vector, and $\mathbf{Q}$ and $R_{0}$ represent the referential thermal flux vector and thermal source. The material differential operator $\nabla_{0} (\bullet) \dealcoloneq \frac{d(\bullet)}{d\mathbf{X}}$ where $\mathbf{X}$ is the material position vector. With some rearrangement of terms, invoking the arbitrariness of the integration volume $V$, the total internal energy density rate $\dot{E}_{0}$ can be identified as

        +\[
   \dot{E}_{0}
 = \mathbf{P}^{\text{tot}} : \dot{\mathbf{F}}
   + \boldsymbol{\mathbb{H}} \cdot \dot{\boldsymbol{\mathbb{B}}}
   + \mathbb{E} \cdot \dot{\mathbb{D}}
   - \nabla_{0} \cdot \mathbf{Q}
   + R_{0} .
-\] +\]" src="form_6791.png"/>

        The total internal energy includes contributions that arise not only due to mechanical deformation (the first term), and thermal fluxes and sources (the fourth and fifth terms), but also due to the intrinsic energy stored in the magnetic and electric fields themselves (the second and third terms, respectively).

        The second law of thermodynamics, known also as the entropy inequality principle, informs us that certain thermodynamic processes are irreversible. After accounting for the total entropy and rate of entropy input, the Clausius-Duhem inequality can be derived. In local form (and in the material configuration), this reads

        -\[
+<picture><source srcset=\[
   \theta \dot{\eta}_{0}
   - R_{0}
   + \nabla_{0} \cdot \mathbf{Q}
   - \frac{1}{\theta} \nabla_{0} \theta \cdot \mathbf{Q}
   \geq 0 .
-\] +\]" src="form_6792.png"/>

        -

        The quantity $\theta$ is the absolute temperature, and $\eta_{0}$ represents the entropy per unit reference volume.

        -

        Using this to replace $R_{0} - \nabla_{0} \cdot \mathbf{Q}$ in the result stemming from the first law of thermodynamics, we now have the relation

        -\[
+<p> The quantity <picture><source srcset=$\theta$ is the absolute temperature, and $\eta_{0}$ represents the entropy per unit reference volume.

        +

        Using this to replace $R_{0} - \nabla_{0} \cdot \mathbf{Q}$ in the result stemming from the first law of thermodynamics, we now have the relation

        +\[
   \mathbf{P}^{\text{tot}} : \dot{\mathbf{F}}
   + \boldsymbol{\mathbb{H}} \cdot \dot{\boldsymbol{\mathbb{B}}}
   + \mathbb{E} \cdot \dot{\mathbb{D}}
@@ -210,26 +210,26 @@
   - \dot{E}_{0}
   - \frac{1}{\theta} \nabla_{0} \theta \cdot \mathbf{Q}
   \geq 0 .
-\] +\]" src="form_6795.png"/>

        On the basis of Fourier's law, which informs us that heat flows from regions of high temperature to low temperature, the last term is always positive and can be ignored. This renders the local dissipation inequality

        -\[
+<picture><source srcset=\[
   \mathbf{P}^{\text{tot}} : \dot{\mathbf{F}}
   + \boldsymbol{\mathbb{H}} \cdot \dot{\boldsymbol{\mathbb{B}}}
   + \mathbb{E} \cdot \dot{\mathbb{D}}
   - \left[ \dot{E}_{0} - \theta \dot{\eta}_{0}  \right]
   \geq 0 .
-\] +\]" src="form_6796.png"/>

        It is postulated [Holzapfel2007a] that the Legendre transformation

        -\[
+<picture><source srcset=\[
   \psi^{*}_{0}
 = \psi^{*}_{0} \left( \mathbf{F}, \boldsymbol{\mathbb{B}}, \mathbb{D}, \theta \right)
 = E_{0} - \theta \eta_{0} ,
-\] +\]" src="form_6797.png"/>

        -

        from which we may define the free energy density function $\psi^{*}_{0}$ with the stated parameterization, exists and is valid. Taking the material rate of this equation and substituting it into the local dissipation inequality results in the generic expression

        -\[
+<p> from which we may define the free energy density function <picture><source srcset=$\psi^{*}_{0}$ with the stated parameterization, exists and is valid. Taking the material rate of this equation and substituting it into the local dissipation inequality results in the generic expression

        +\[
   \mathcal{D}_{\text{int}}
   = \mathbf{P}^{\text{tot}} : \dot{\mathbf{F}}
   + \boldsymbol{\mathbb{H}} \cdot \dot{\boldsymbol{\mathbb{B}}}
@@ -237,104 +237,104 @@
   - \dot{\theta} \eta_{0}
   - \dot{\psi}^{*}_{0} \left( \mathbf{F}, \boldsymbol{\mathbb{B}}, \mathbb{D}, \theta \right)
   \geq 0 .
-\] +\]" src="form_6799.png"/>

        Under the assumption of isothermal conditions, and that the electric field does not excite the material in a manner that is considered non-negligible, then this dissipation inequality reduces to

        -\[
+<picture><source srcset=\[
   \mathcal{D}_{\text{int}}
   = \mathbf{P}^{\text{tot}} : \dot{\mathbf{F}}
   + \boldsymbol{\mathbb{H}} \cdot \dot{\boldsymbol{\mathbb{B}}}
   - \dot{\psi}^{*}_{0} \left( \mathbf{F}, \boldsymbol{\mathbb{B}} \right)
   \geq 0 .
-\] +\]" src="form_6800.png"/>

        Constitutive laws

        When considering materials that exhibit mechanically dissipative behavior, it can be shown that this can be captured within the dissipation inequality through the augmentation of the material free energy density function with additional parameters that represent internal variables [Holzapfel1996a]. Consequently, we write it as

        -\[
+<picture><source srcset=\[
   \mathcal{D}_{\text{int}}
   = \mathbf{P}^{\text{tot}} : \dot{\mathbf{F}}
   + \boldsymbol{\mathbb{H}} \cdot \dot{\boldsymbol{\mathbb{B}}}
   - \dot{\psi}^{*}_{0} \left( \mathbf{F}, \mathbf{F}_{v}^{i}, \boldsymbol{\mathbb{B}} \right)
   \geq 0 .
-\] +\]" src="form_6801.png"/>

        -

        where $\mathbf{F}_{v}^{i} = \mathbf{F}_{v}^{i} \left( t \right)$ represents the internal variable (which acts like a measure of the deformation gradient) associated with the ith mechanical dissipative (viscous) mechanism. As can be inferred from its parameterization, each of these internal parameters is considered to evolve in time. Currently the free energy density function $\psi^{*}_{0}$ is parameterized in terms of the magnetic induction $\boldsymbol{\mathbb{B}}$. This is the natural parameterization that comes as a consequence of the considered balance laws. Should such a class of materials to be incorporated within a finite-element model, it would be ascertained that a certain formulation of the magnetic problem, known as the magnetic vector potential formulation, would need to be adopted. This has its own set of challenges, so where possible the more simple magnetic scalar potential formulation may be preferred. In that case, the magnetic problem needs to be parameterized in terms of the magnetic field $\boldsymbol{\mathbb{H}}$. To make this re-parameterization, we execute a final Legendre transformation

        -\[
+<p> where <picture><source srcset=$\mathbf{F}_{v}^{i} = \mathbf{F}_{v}^{i} \left( t \right)$ represents the internal variable (which acts like a measure of the deformation gradient) associated with the ith mechanical dissipative (viscous) mechanism. As can be inferred from its parameterization, each of these internal parameters is considered to evolve in time. Currently the free energy density function $\psi^{*}_{0}$ is parameterized in terms of the magnetic induction $\boldsymbol{\mathbb{B}}$. This is the natural parameterization that comes as a consequence of the considered balance laws. Should such a class of materials to be incorporated within a finite-element model, it would be ascertained that a certain formulation of the magnetic problem, known as the magnetic vector potential formulation, would need to be adopted. This has its own set of challenges, so where possible the more simple magnetic scalar potential formulation may be preferred. In that case, the magnetic problem needs to be parameterized in terms of the magnetic field $\boldsymbol{\mathbb{H}}$. To make this re-parameterization, we execute a final Legendre transformation

        +\[
   \tilde{\psi}_{0} \left( \mathbf{F}, \mathbf{F}_{v}^{i}, \boldsymbol{\mathbb{H}} \right)
   = \psi^{*}_{0} \left( \mathbf{F}, \mathbf{F}_{v}^{i}, \boldsymbol{\mathbb{B}} \right)
   - \boldsymbol{\mathbb{H}} \cdot \boldsymbol{\mathbb{B}} .
-\] +\]" src="form_6803.png"/>

        At the same time, we may take advantage of the principle of material frame indifference in order to express the energy density function in terms of symmetric deformation measures:

        -\[
+<picture><source srcset=\[
   \psi_{0} \left( \mathbf{C}, \mathbf{C}_{v}^{i}, \boldsymbol{\mathbb{H}} \right)
   = \tilde{\psi}_{0} \left( \mathbf{F}, \mathbf{F}_{v}^{i}, \boldsymbol{\mathbb{H}} \right) .
-\] +\]" src="form_6804.png"/>

        The upshot of these two transformations (leaving out considerable explicit and hidden details) renders the final expression for the reduced dissipation inequality as

        -\[
+<picture><source srcset=\[
   \mathcal{D}_{\text{int}}
   = \mathbf{S}^{\text{tot}} : \frac{1}{2} \dot{\mathbf{C}}
   - \boldsymbol{\mathbb{B}} \cdot \dot{\boldsymbol{\mathbb{H}}}
   - \dot{\psi}_{0} \left( \mathbf{C}, \mathbf{C}_{v}^{i}, \boldsymbol{\mathbb{H}} \right)
   \geq 0 .
-\] +\]" src="form_6805.png"/>

        -

        (Notice the sign change on the second term on the right hand side, and the transfer of the time derivative to the magnetic induction vector.) The stress quantity $\mathbf{S}^{\text{tot}}$ is known as the total Piola-Kirchhoff stress tensor and its energy conjugate $\mathbf{C} = \mathbf{F}^{T} \cdot \mathbf{F}$ is the right Cauchy-Green deformation tensor, and $\mathbf{C}_{v}^{i} = \mathbf{C}_{v}^{i} \left( t \right)$ is the re-parameterized internal variable associated with the ith mechanical dissipative (viscous) mechanism.

        +

        (Notice the sign change on the second term on the right hand side, and the transfer of the time derivative to the magnetic induction vector.) The stress quantity $\mathbf{S}^{\text{tot}}$ is known as the total Piola-Kirchhoff stress tensor and its energy conjugate $\mathbf{C} = \mathbf{F}^{T} \cdot \mathbf{F}$ is the right Cauchy-Green deformation tensor, and $\mathbf{C}_{v}^{i} = \mathbf{C}_{v}^{i} \left( t \right)$ is the re-parameterized internal variable associated with the ith mechanical dissipative (viscous) mechanism.

        Expansion of the material rate of the energy density function, and rearrangement of the various terms, results in the expression

        -\[
+<picture><source srcset=\[
   \mathcal{D}_{\text{int}}
   = \left[ \mathbf{S}^{\text{tot}} - 2 \frac{\partial \psi_{0}}{\partial \mathbf{C}} \right] : \frac{1}{2} \dot{\mathbf{C}}
   - \sum\limits_{i}\left[ 2 \frac{\partial \psi_{0}}{\partial \mathbf{C}_{v}^{i}} \right] : \frac{1}{2} \dot{\mathbf{C}}_{v}^{i}
   + \left[ - \boldsymbol{\mathbb{B}} - \frac{\partial \psi_{0}}{\partial \boldsymbol{\mathbb{H}}} \right] \cdot \dot{\boldsymbol{\mathbb{H}}}
   \geq 0 .
-\] +\]" src="form_6809.png"/>

        -

        At this point, its worth noting the use of the partial derivatives $\partial \left( \bullet \right)$. This is an important detail that will be fundamental to a certain design choice made within the tutorial. As brief reminder of what this signifies, the partial derivative of a multi-variate function returns the derivative of that function with respect to one of those variables while holding the others constant:

        -\[
+<p> At this point, its worth noting the use of the <a href=partial derivatives $\partial \left( \bullet \right)$. This is an important detail that will be fundamental to a certain design choice made within the tutorial. As brief reminder of what this signifies, the partial derivative of a multi-variate function returns the derivative of that function with respect to one of those variables while holding the others constant:

        +\[
   \frac{\partial f\left(x, y\right)}{\partial x}
   = \frac{d f\left(x, y\right)}{d x} \Big\vert_{y} .
-\] +\]" src="form_6811.png"/>

        -

        More specific to what's encoded in the dissipation inequality (with the very general free energy density function $\psi_{0}$ with its parameterization yet to be formalized), if one of the input variables is a function of another, it is also held constant and the chain rule does not propagate any further, while the computing total derivative would imply judicious use of the chain rule. This can be better understood by comparing the following two statements:

        -\begin{align*}
+<p> More specific to what's encoded in the dissipation inequality (with the very general free energy density function <picture><source srcset=$\psi_{0}$ with its parameterization yet to be formalized), if one of the input variables is a function of another, it is also held constant and the chain rule does not propagate any further, while the computing total derivative would imply judicious use of the chain rule. This can be better understood by comparing the following two statements:

        +\begin{align*}
   \frac{\partial f\left(x, y\left(x\right)\right)}{\partial x}
   &= \frac{d f\left(x, y\left(x\right)\right)}{d x} \Big\vert_{y} \\
   \frac{d f\left(x, y\left(x\right)\right)}{d x}
   &= \frac{d f\left(x, y\left(x\right)\right)}{d x} \Big\vert_{y}
    + \frac{d f\left(x, y\left(x\right)\right)}{d y} \Big\vert_{x} \frac{d y\left(x\right)}{x} .
-\end{align*} +\end{align*}" src="form_6813.png"/>

        -

        Returning to the thermodynamics of the problem, we next exploit the arbitrariness of the quantities $\dot{\mathbf{C}}$ and $\dot{\boldsymbol{\mathbb{H}}}$, by application of the Coleman-Noll procedure [Coleman1963a], [Coleman1967a]. This leads to the identification of the kinetic conjugate quantities

        -\[
+<p>Returning to the thermodynamics of the problem, we next exploit the arbitrariness of the quantities <picture><source srcset=$\dot{\mathbf{C}}$ and $\dot{\boldsymbol{\mathbb{H}}}$, by application of the Coleman-Noll procedure [Coleman1963a], [Coleman1967a]. This leads to the identification of the kinetic conjugate quantities

        +\[
   \mathbf{S}^{\text{tot}}
   = \mathbf{S}^{\text{tot}} \left( \mathbf{C}, \mathbf{C}_{v}^{i}, \boldsymbol{\mathbb{H}} \right)
   \dealcoloneq 2 \frac{\partial \psi_{0} \left( \mathbf{C}, \mathbf{C}_{v}^{i}, \boldsymbol{\mathbb{H}} \right)}{\partial \mathbf{C}} , \\
   \boldsymbol{\mathbb{B}}
   = \boldsymbol{\mathbb{B}} \left( \mathbf{C}, \mathbf{C}_{v}^{i}, \boldsymbol{\mathbb{H}} \right)
   \dealcoloneq - \frac{\partial \psi_{0} \left( \mathbf{C}, \mathbf{C}_{v}^{i}, \boldsymbol{\mathbb{H}} \right)}{\partial \boldsymbol{\mathbb{H}}} .
-\] +\]" src="form_6816.png"/> /usr/share/doc/packages/dealii/doxygen/deal.II/step_72.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_72.html 2024-01-30 03:04:55.504906843 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_72.html 2024-01-30 03:04:55.504906843 +0000 @@ -142,48 +142,48 @@

        Introduction

        Motivation

        This program solves the same problem as step-15, that is, it solves for the minimal surface equation

        -\begin{align*}
+<picture><source srcset=\begin{align*}
     F(u) \dealcoloneq -\nabla \cdot \left( \frac{1}{\sqrt{1+|\nabla u|^{2}}}\nabla u \right) &= 0 \qquad
     \qquad &&\textrm{in} ~ \Omega
     \\
     u&=g \qquad\qquad &&\textrm{on} ~ \partial \Omega.
-  \end{align*} + \end{align*}" src="form_6943.png"/>

        -

        Among the issues we had identified there (see the Possibilities for extensions section) was that when wanting to use a Newton iteration, we needed to compute the derivative of the residual of the equation with regard to the solution $u$ (here, because the right hand side is zero, the residual is simply the left hand side). For the equation we have here, this is cumbersome but not impossible – but one can easily imagine much more complicated equations where just implementing the residual itself correctly is a challenge, let alone doing so for the derivative necessary to compute the Jacobian matrix. We will address this issue in this program: Using the automatic differentiation techniques discussed in great detail in step-71, we will come up with a way how we only have to implement the residual and get the Jacobian for free.

        +

        Among the issues we had identified there (see the Possibilities for extensions section) was that when wanting to use a Newton iteration, we needed to compute the derivative of the residual of the equation with regard to the solution $u$ (here, because the right hand side is zero, the residual is simply the left hand side). For the equation we have here, this is cumbersome but not impossible – but one can easily imagine much more complicated equations where just implementing the residual itself correctly is a challenge, let alone doing so for the derivative necessary to compute the Jacobian matrix. We will address this issue in this program: Using the automatic differentiation techniques discussed in great detail in step-71, we will come up with a way how we only have to implement the residual and get the Jacobian for free.

        In fact, we can even go one step further. While in step-15 we have just taken the equation as a given, the minimal surface equation is actually the product of minimizing an energy. Specifically, the minimal surface equations are the Euler-Lagrange equations that correspond to minimizing the energy

        -\[
+<picture><source srcset=\[
     E(u) = \int_\Omega \Psi \left( u \right)
-  \] + \]" src="form_6944.png"/>

        where the energy density is given by

        -\[
+<picture><source srcset=\[
     \Psi \left( u \right) = \sqrt{1+|\nabla u|^{2}}.
-  \] + \]" src="form_6945.png"/>

        This is the same as saying that we seek to find the stationary point of the variation of the energy functional

        -\[
+<picture><source srcset=\[
     \min\limits_{u} E \left( u \right)
       \quad \rightarrow \quad
       \delta E \left( u, \varphi \right) \dealcoloneq
       \left(\varphi, F(u)\right) = 0
       \qquad
       \forall \varphi,
-  \] + \]" src="form_6946.png"/>

        as this is where the equilibrium solution to the boundary value problem lies.

        -

        The key point then is that, maybe, we don't even need to implement the residual, but that implementing the simpler energy density $\Psi(u)$ might actually be enough.

        +

        The key point then is that, maybe, we don't even need to implement the residual, but that implementing the simpler energy density $\Psi(u)$ might actually be enough.

        Our goal then is this: When using a Newton iteration, we need to repeatedly solve the linear partial differential equation

        -\begin{align*}
+<picture><source srcset=\begin{align*}
     F'(u^{n},\delta u^{n}) &=- F(u^{n})
-  \end{align*} + \end{align*}" src="form_2936.png"/>

        so that we can compute the update

        -\begin{align*}
+<picture><source srcset=\begin{align*}
     u^{n+1}&=u^{n}+\alpha^n \delta u^{n}
-  \end{align*} + \end{align*}" src="form_2937.png"/>

        -

        with the solution $\delta u^{n}$ of the Newton step. As discussed in step-15, we can compute the derivative $F'(u,\delta u)$ by hand and obtain

        -\[
+<p> with the solution <picture><source srcset=$\delta u^{n}$ of the Newton step. As discussed in step-15, we can compute the derivative $F'(u,\delta u)$ by hand and obtain

        +\[
   F'(u,\delta u)
   =
   - \nabla \cdot \left( \frac{1}{\left(1+|\nabla u|^{2}\right)^{\frac{1}{2}}}\nabla
@@ -191,76 +191,76 @@
   \nabla \cdot \left( \frac{\nabla u \cdot
   \nabla \delta u}{\left(1+|\nabla u|^{2}\right)^{\frac{3}{2}}} \nabla u
   \right).
-  \] + \]" src="form_2938.png"/>

        -

        So here then is what this program is about: It is about techniques that can help us with computing $F'(u,\delta u)$ without having to implement it explicitly, either by providing an implementation of $F(u)$ or an implementation of $E(u)$. More precisely, we will implement three different approaches and compare them in terms of run-time but also – maybe more importantly – how much human effort it takes to implement them:

          +

          So here then is what this program is about: It is about techniques that can help us with computing $F'(u,\delta u)$ without having to implement it explicitly, either by providing an implementation of $F(u)$ or an implementation of $E(u)$. More precisely, we will implement three different approaches and compare them in terms of run-time but also – maybe more importantly – how much human effort it takes to implement them:

          • The method used in step-15 to form the Jacobian matrix.
          • Computing the Jacobian matrix from an implementation of the residual $F(u)$, using automatic differentiation.
          • -
          • Computing both the residual and Jacobian matrix from an implementation of the energy functional $E(u)$, also using automatic differentiation.
          • +
          • Computing both the residual and Jacobian matrix from an implementation of the energy functional $E(u)$, also using automatic differentiation.

          For the first of these methods, there are no conceptual changes compared to step-15.

          Computing the Jacobian from the residual

          -

          For the second method, let us outline how we will approach the issue using automatic differentiation to compute the linearization of the residual vector. To this end, let us change notation for a moment and denote by $F(U)$ not the residual of the differential equation, but in fact the residual vector – i.e., the discrete residual. We do so because that is what we actually* do when we discretize the problem on a given mesh: We solve the problem $F(U)=0$ where $U$ is the vector of unknowns.

          -

          More precisely, the $i$th component of the residual is given by

          -\[
+<p>For the second method, let us outline how we will approach the issue using automatic differentiation to compute the linearization of the residual vector. To this end, let us change notation for a moment and denote by <picture><source srcset=$F(U)$ not the residual of the differential equation, but in fact the residual vector – i.e., the discrete residual. We do so because that is what we actually* do when we discretize the problem on a given mesh: We solve the problem $F(U)=0$ where $U$ is the vector of unknowns.

          +

          More precisely, the $i$th component of the residual is given by

          +\[
   F(U)_i \dealcoloneq
   \int\limits_{\Omega}\nabla \varphi_i \cdot \left[ \frac{1}{\sqrt{1+|\nabla
   u|^{2}}} \nabla u \right] \, dV ,
-\] +\]" src="form_6950.png"/>

          -

          where $u(\mathbf x)=\sum_j U_j \varphi_j(\mathbf x)$. Given this, the contribution for cell $K$ is

          -\[
+<p> where <picture><source srcset=$u(\mathbf x)=\sum_j U_j \varphi_j(\mathbf x)$. Given this, the contribution for cell $K$ is

          +\[
   F(U)_i^K \dealcoloneq
   \int\limits_K\nabla \varphi_i \cdot \left[ \frac{1}{\sqrt{1+|\nabla
   u|^{2}}} \nabla u \right] \, dV ,
-\] +\]" src="form_6952.png"/>

          Its first order Taylor expansion is given as

          -\[
+<picture><source srcset=\[
   F(U + \delta U)_i^K
   \approx F(U)_i^K
   + \sum_{j}^{n_{\textrm{dofs}}} \left[ \frac{\partial F(U)_i^K}{\partial
   U_j} \delta U_j \right],
-\] +\]" src="form_6953.png"/>

          -

          and consequently we can compute the contribution of cell $K$ to the Jacobian matrix $J$ as $J(U)_{ij}^K = \frac{\partial F(U)_i^K}{\partial U_j}$. The important point here is that on cell $K$, we can express

          -\[
+<p> and consequently we can compute the contribution of cell <picture><source srcset=$K$ to the Jacobian matrix $J$ as $J(U)_{ij}^K = \frac{\partial F(U)_i^K}{\partial U_j}$. The important point here is that on cell $K$, we can express

          +\[
   F(U)_i^K \dealcoloneq
   \int\limits_K\nabla \varphi_i \cdot \left[ \frac{1}{\sqrt{1+\left|
   \sum_{j'}^{n_\textrm{dofs}} U_{j'} \nabla \varphi_{j'}\right|^{2}}}
   \left(\sum_{j''}^{n_\textrm{dofs}} U_{j''} \nabla \varphi_{j''}\right)\right] \, dV.
-\] +\]" src="form_6955.png"/>

          -

          For clarity, we have used $j'$ and $j''$ as counting indices to make clear that they are distinct from each other and from $j$ above. Because in this formula, $F(U)$ only depends on the coefficients $U_j$, we can compute the derivative $J(U)_{ij}^K$ as a matrix via automatic differentiation of $F(U)_i^K$. By the same argument as we always use, it is clear that $F(U)^K$ does not actually depend on all* unknowns $U_j$, but only on those unknowns for which $j$ is a shape function that lives on cell $K$, and so in practice, we restrict $F(U)^K$ and $J(U)^K$ to that part of the vector and matrix that corresponds to the local DoF indices, and then distribute from the local cell $K$ to the global objects.

          -

          Using all of these realizations, the approach will then be to implement $F(U)^K$ in the program and let the automatic differentiation machinery compute the derivatives $J(U)^K$ from that.

          +

          For clarity, we have used $j'$ and $j''$ as counting indices to make clear that they are distinct from each other and from $j$ above. Because in this formula, $F(U)$ only depends on the coefficients $U_j$, we can compute the derivative $J(U)_{ij}^K$ as a matrix via automatic differentiation of $F(U)_i^K$. By the same argument as we always use, it is clear that $F(U)^K$ does not actually depend on all* unknowns $U_j$, but only on those unknowns for which $j$ is a shape function that lives on cell $K$, and so in practice, we restrict $F(U)^K$ and $J(U)^K$ to that part of the vector and matrix that corresponds to the local DoF indices, and then distribute from the local cell $K$ to the global objects.

          +

          Using all of these realizations, the approach will then be to implement $F(U)^K$ in the program and let the automatic differentiation machinery compute the derivatives $J(U)^K$ from that.

          Computing the Jacobian and the residual from the energy functional

          For the final implementation of the assembly process, we will move a level higher than the residual: our entire linear system will be determined directly from the energy functional that governs the physics of this boundary value problem. We can take advantage of the fact that we can calculate the total energy in the domain directly from the local contributions, i.e.,

          -\[
+<picture><source srcset=\[
   E \left( U \right) \dealcoloneq \int\limits_{\Omega} \Psi \left( u
   \right) \, dV .
-\] +\]" src="form_6962.png"/>

          In the discrete setting, this means that on each finite element we have

          -\[
+<picture><source srcset=\[
    E \left( U \right)^K
     \dealcoloneq \int\limits_{K} \Psi \left( u \right) \, dV
     \approx \sum\limits_{q}^{n_{\textrm{q-points}}} \Psi \left( u \left(
     \mathbf{x}_{q} \right) \right) \underbrace{\vert J_{q} \vert \times W_{q}}_{\text{JxW(q)}} .
-\] +\]" src="form_6963.png"/>

          If we implement the cell energy, which depends on the field solution, we can compute its first (discrete) variation

          -\[
+<picture><source srcset=\[
   F(U)^K_i
     = \frac{\partial E(U)^K}{\partial U_i}
-\] +\]" src="form_6964.png"/>

          and, thereafter, its second (discrete) variation

          -\[
+<picture><source srcset=\[
   J(U)^K_{ij}
     = \frac{\partial^{2}  E(U)^K}{\partial U_i \partial U_j}.
-\] +\]" src="form_6965.png"/>

          -

          So, from the cell contribution to the total energy function, we may expect to have the approximate residual and tangent contributions generated for us as long as we can provide an implementation of the local energy $E(U)^K$. Again, due to the design of the automatic differentiation variables used in this tutorial, in practice these approximations for the contributions to the residual vector and tangent matrix are actually accurate to machine precision.

          +

          So, from the cell contribution to the total energy function, we may expect to have the approximate residual and tangent contributions generated for us as long as we can provide an implementation of the local energy $E(U)^K$. Again, due to the design of the automatic differentiation variables used in this tutorial, in practice these approximations for the contributions to the residual vector and tangent matrix are actually accurate to machine precision.

          The commented program

          The majority of this tutorial is an exact replica of step-15. So, in the interest of brevity and maintaining a focus on the changes implemented here, we will only document what's new and simply indicate which sections of code are a repetition of what has come before.

          Include files

          @@ -578,7 +578,7 @@
           
          void mesh_loop(const CellIteratorType &begin, const CellIteratorType &end, const CellWorkerFunctionType &cell_worker, const CopierType &copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const AssembleFlags flags=assemble_own_cells, const BoundaryWorkerFunctionType &boundary_worker=BoundaryWorkerFunctionType(), const FaceWorkerFunctionType &face_worker=FaceWorkerFunctionType(), const unsigned int queue_length=2 *MultithreadInfo::n_threads(), const unsigned int chunk_size=8)
          Definition mesh_loop.h:282
          -

        And finally, as is done in step-15, we remove hanging nodes from the system and apply zero boundary values to the linear system that defines the Newton updates $\delta u^n$.

        +

        And finally, as is done in step-15, we remove hanging nodes from the system and apply zero boundary values to the linear system that defines the Newton updates $\delta u^n$.

          hanging_node_constraints.condense(system_matrix);
          hanging_node_constraints.condense(system_rhs);
         
        @@ -597,15 +597,15 @@
        void apply_boundary_values(const std::map< types::global_dof_index, number > &boundary_values, SparseMatrix< number > &matrix, Vector< number > &solution, Vector< number > &right_hand_side, const bool eliminate_columns=true)
        void interpolate_boundary_values(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const std::map< types::boundary_id, const Function< spacedim, number > * > &function_map, std::map< types::global_dof_index, number > &boundary_values, const ComponentMask &component_mask=ComponentMask())

        Assembly via differentiation of the residual vector
        -

        As outlined in the introduction, what we need to do for this second approach is implement the local contributions $F(U)^K$ from cell $K$ to the residual vector, and then let the AD machinery deal with how to compute the derivatives $J(U)_{ij}^K=\frac{\partial F(U)^K_i}{\partial U_j}$ from it.

        +

        As outlined in the introduction, what we need to do for this second approach is implement the local contributions $F(U)^K$ from cell $K$ to the residual vector, and then let the AD machinery deal with how to compute the derivatives $J(U)_{ij}^K=\frac{\partial F(U)^K_i}{\partial U_j}$ from it.

        For the following, recall that

        -\[
+<picture><source srcset=\[
    F(U)_i^K \dealcoloneq
    \int\limits_K\nabla \varphi_i \cdot \left[ \frac{1}{\sqrt{1+|\nabla
    u|^{2}}} \nabla u \right] \, dV ,
-   \] + \]" src="form_6968.png"/>

        -

        where $u(\mathbf x)=\sum_j U_j \varphi_j(\mathbf x)$.

        +

        where $u(\mathbf x)=\sum_j U_j \varphi_j(\mathbf x)$.

        Let us see how this is implemented in practice:

          template <int dim>
          void MinimalSurfaceProblem<dim>::assemble_system_with_residual_linearization()
        @@ -651,12 +651,12 @@ /usr/share/doc/packages/dealii/doxygen/deal.II/step_74.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_74.html 2024-01-30 03:04:55.576907443 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_74.html 2024-01-30 03:04:55.576907443 +0000 @@ -144,28 +144,28 @@

        The equation

        In this example, we consider Poisson's equation

        -\[
+<picture><source srcset=\[
 - \nabla \cdot \left( \nu  \nabla u\right) = f  \qquad   \mbox{in } \Omega,
-\] +\]" src="form_6975.png"/>

        subject to the boundary condition

        -\[
+<picture><source srcset=\[
 u = g_D \qquad \mbox{on } \partial \Omega.
-\] +\]" src="form_6976.png"/>

        -

        For simplicity, we assume that the diffusion coefficient $\nu$ is constant here. Note that if $\nu$ is discontinuous, we need to take this into account when computing jump terms on cell faces.

        -

        We denote the mesh by ${\mathbb T}_h$, and $K\in{\mathbb T}_h$ is a mesh cell. The sets of interior and boundary faces are denoted by ${\mathbb F}^i_h$ and ${\mathbb F}^b_h$ respectively. Let $K^0$ and $K^1$ be the two cells sharing a face $f\in F_h^i$, and $\mathbf n$ be the outer normal vector of $K^0$. Then the jump operator is given by the "here minus there" formula,

        -\[
+<p> For simplicity, we assume that the diffusion coefficient <picture><source srcset=$\nu$ is constant here. Note that if $\nu$ is discontinuous, we need to take this into account when computing jump terms on cell faces.

        +

        We denote the mesh by ${\mathbb T}_h$, and $K\in{\mathbb T}_h$ is a mesh cell. The sets of interior and boundary faces are denoted by ${\mathbb F}^i_h$ and ${\mathbb F}^b_h$ respectively. Let $K^0$ and $K^1$ be the two cells sharing a face $f\in F_h^i$, and $\mathbf n$ be the outer normal vector of $K^0$. Then the jump operator is given by the "here minus there" formula,

        +\[
 \jump{v} = v^0 - v^1
-\] +\]" src="form_6984.png"/>

        and the averaging operator as

        -\[
+<picture><source srcset=\[
 \average{v} = \frac{v^0 + v^1}{2}
-\] +\]" src="form_6985.png"/>

        -

        respectively. Note that when $f\subset \partial \Omega$, we define $\jump{v} = v$ and $\average{v}=v$. The discretization using the SIPG is given by the following weak formula (more details can be found in [di2011mathematical] and the references therein)

        -\begin{align*}
+<p> respectively. Note that when <picture><source srcset=$f\subset \partial \Omega$, we define $\jump{v} = v$ and $\average{v}=v$. The discretization using the SIPG is given by the following weak formula (more details can be found in [di2011mathematical] and the references therein)

        +\begin{align*}
 &\sum_{K\in {\mathbb T}_h} (\nabla v_h, \nu \nabla u_h)_K\\
 &-\sum_{F \in F_h^i} \left\{
     \left< \jump{v_h}, \nu\average{ \nabla u_h} \cdot  \mathbf n \right>_F
@@ -181,52 +181,52 @@
   - \sum_{F \in F_h^b} \left\{
     \left< \nabla v_h \cdot \mathbf n, \nu g_D\right>_F - \left<v_h,\nu \sigma g_D\right>_F
   \right\}.
-\end{align*} +\end{align*}" src="form_6989.png"/>

        The penalty parameter

        -

        The penalty parameter is defined as $\sigma = \gamma/h_f$, where $h_f$ a local length scale associated with the cell face; here we choose an approximation of the length of the cell in the direction normal to the face: $\frac 1{h_f} = \frac 12 \left(\frac 1{h_K} + \frac 1{h_{K'}}\right)$, where $K,K'$ are the two cells adjacent to the face $f$ and we we compute $h_K = \frac{|K|}{|f|}$.

        -

        In the formula above, $\gamma$ is the penalization constant. To ensure the discrete coercivity, the penalization constant has to be large enough [ainsworth2007posteriori]. People do not really have consensus on which of the formulas proposed in the literature should be used. (This is similar to the situation discussed in the "Results" section of step-47.) One can just pick a large constant, while other options could be the multiples of $(p+1)^2$ or $p(p+1)$. In this code, we follow step-39 and use $\gamma = p(p+1)$.

        +

        The penalty parameter is defined as $\sigma = \gamma/h_f$, where $h_f$ a local length scale associated with the cell face; here we choose an approximation of the length of the cell in the direction normal to the face: $\frac 1{h_f} = \frac 12 \left(\frac 1{h_K} + \frac 1{h_{K'}}\right)$, where $K,K'$ are the two cells adjacent to the face $f$ and we we compute $h_K = \frac{|K|}{|f|}$.

        +

        In the formula above, $\gamma$ is the penalization constant. To ensure the discrete coercivity, the penalization constant has to be large enough [ainsworth2007posteriori]. People do not really have consensus on which of the formulas proposed in the literature should be used. (This is similar to the situation discussed in the "Results" section of step-47.) One can just pick a large constant, while other options could be the multiples of $(p+1)^2$ or $p(p+1)$. In this code, we follow step-39 and use $\gamma = p(p+1)$.

        A posteriori error estimator

        In this example, with a slight modification, we use the error estimator by Karakashian and Pascal [karakashian2003posteriori]

        -\[
+<picture><source srcset=\[
 \eta^2 = \sum_{K \in {\mathbb T}_h} \eta^2_{K} +  \sum_{f_i \in {\mathbb F}^i_h}  \eta^2_{f_i} + \sum_{f_b \in F^i_b}\eta^2_{f_b}
-\] +\]" src="form_6996.png"/>

        where

        -\begin{align*}
+<picture><source srcset=\begin{align*}
 \eta^2_{K} &= h_K^2 \left\| f + \nu \Delta u_h \right\|_K^2,
 \\
 \eta^2_{f_i} &= \sigma \left\| \jump{u_h}  \right\|_f^2   +  h_f \left\|  \jump{\nu \nabla u_h} \cdot \mathbf n   \right\|_f^2,
 \\
 \eta_{f_b}^2 &=  \sigma \left\| u_h-g_D \right\|_f^2.
-\end{align*} +\end{align*}" src="form_6997.png"/>

        -

        Here we use $\sigma = \gamma/h_f$ instead of $\gamma^2/h_f$ for the jump terms of $u_h$ (the first term in $\eta^2_{f_i}$ and $\eta_{f_b}^2$).

        +

        Here we use $\sigma = \gamma/h_f$ instead of $\gamma^2/h_f$ for the jump terms of $u_h$ (the first term in $\eta^2_{f_i}$ and $\eta_{f_b}^2$).

        In order to compute this estimator, in each cell $K$ we compute

        -\begin{align*}
+<picture><source srcset=\begin{align*}
 \eta_{c}^2 &= h_K^2 \left\| f + \nu \Delta u_h \right\|_K^2,
 \\
 \eta_{f}^2 &= \sum_{f\in \partial K}\lbrace \sigma \left\| \jump{u_h}  \right\|_f^2   +  h_f \left\|  \jump{\nu \nabla u_h} \cdot \mathbf n  \right\|_f^2 \rbrace,
 \\
 \eta_{b}^2 &= \sum_{f\in \partial K \cap \partial \Omega}  \sigma \left\| (u_h -g_D)  \right\|_f^2.
-\end{align*} +\end{align*}" src="form_7001.png"/>

        Then the square of the error estimate per cell is

        -\[
+<picture><source srcset=\[
 \eta_\text{local}^2 =\eta_{c}^2+0.5\eta_{f}^2+\eta_{b}^2.
-\] +\]" src="form_7002.png"/>

        -

        The factor of $0.5$ results from the fact that the overall error estimator includes each interior face only once, and so the estimators per cell count it with a factor of one half for each of the two adjacent cells. Note that we compute $\eta_\text{local}^2$ instead of $\eta_\text{local}$ to simplify the implementation. The error estimate square per cell is then stored in a global vector, whose $l_1$ norm is equal to $\eta^2$.

        +

        The factor of $0.5$ results from the fact that the overall error estimator includes each interior face only once, and so the estimators per cell count it with a factor of one half for each of the two adjacent cells. Note that we compute $\eta_\text{local}^2$ instead of $\eta_\text{local}$ to simplify the implementation. The error estimate square per cell is then stored in a global vector, whose $l_1$ norm is equal to $\eta^2$.

        The test case

        -

        In the first test problem, we run a convergence test using a smooth manufactured solution with $\nu =1$ in 2D

        -\begin{align*}
+<p>In the first test problem, we run a convergence test using a smooth manufactured solution with <picture><source srcset=$\nu =1$ in 2D

        +\begin{align*}
 u&=\sin(2\pi x)\sin(2\pi y), &\qquad\qquad &(x,y)\in\Omega=(0,1)\times (0,1),
 \\
 u&=0,                        &\qquad\qquad &\text{on } \partial \Omega,
-\end{align*} +\end{align*}" src="form_7007.png"/>

        -

        and $f= 8\pi^2 u$. We compute errors against the manufactured solution and evaluate the convergence rate.

        -

        In the second test, we choose Functions::LSingularityFunction on a L-shaped domain (GridGenerator::hyper_L) in 2D. The solution is given in the polar coordinates by $u(r,\phi) = r^{\frac{2}{3}}\sin \left(\frac{2}{3}\phi \right)$, which has a singularity at the origin. An error estimator is constructed to detect the region with large errors, according to which the mesh is refined adaptively.

        +

        and $f= 8\pi^2 u$. We compute errors against the manufactured solution and evaluate the convergence rate.

        +

        In the second test, we choose Functions::LSingularityFunction on a L-shaped domain (GridGenerator::hyper_L) in 2D. The solution is given in the polar coordinates by $u(r,\phi) = r^{\frac{2}{3}}\sin \left(\frac{2}{3}\phi \right)$, which has a singularity at the origin. An error estimator is constructed to detect the region with large errors, according to which the mesh is refined adaptively.

        The commented program

        The first few files have already been covered in previous examples and will thus not be further commented on:

          #include <deal.II/base/quadrature_lib.h>
        @@ -359,7 +359,7 @@
         
         
         
        -

        The right-hand side that corresponds to the function Functions::LSingularityFunction, where we assume that the diffusion coefficient $\nu = 1$:

        +

        The right-hand side that corresponds to the function Functions::LSingularityFunction, where we assume that the diffusion coefficient $\nu = 1$:

          template <int dim>
          class SingularRightHandSide : public Function<dim>
          {
        @@ -393,7 +393,7 @@
        virtual double laplacian(const Point< 2 > &p, const unsigned int component=0) const override

        Auxiliary functions

        -

        This function computes the penalty $\sigma$.

        +

        This function computes the penalty $\sigma$.

          double get_penalty_factor(const unsigned int fe_degree,
          const double cell_extent_left,
          const double cell_extent_right)
        @@ -492,7 +492,7 @@

        The remainder of the class's members are used for the following:

        • Vectors to store error estimator square and energy norm square per cell.
        • Print convergence rate and errors on the screen.
        • -
        • The fiffusion coefficient $\nu$ is set to 1.
        • +
        • The fiffusion coefficient $\nu$ is set to 1.
        • Members that store information about the test case to be computed.
          Vector<double> estimated_error_square_per_cell;
        @@ -802,7 +802,7 @@
        void initialize(const SparsityPattern &sparsity_pattern)
        std::string int_to_string(const unsigned int value, const unsigned int digits=numbers::invalid_unsigned_int)
        Definition utilities.cc:471

        The compute_error_estimate() function

        -

        The assembly of the error estimator here is quite similar to that of the global matrix and right-had side and can be handled by the MeshWorker::mesh_loop() framework. To understand what each of the local (lambda) functions is doing, recall first that the local cell residual is defined as $h_K^2 \left\| f + \nu \Delta u_h \right\|_K^2$:

        +

        The assembly of the error estimator here is quite similar to that of the global matrix and right-had side and can be handled by the MeshWorker::mesh_loop() framework. To understand what each of the local (lambda) functions is doing, recall first that the local cell residual is defined as $h_K^2 \left\| f + \nu \Delta u_h \right\|_K^2$:

          template <int dim>
          void SIPGLaplace<dim>::compute_error_estimate()
          {
        @@ -835,8 +835,8 @@
          };
         
        DEAL_II_HOST constexpr Number trace(const SymmetricTensor< 2, dim2, Number > &)
        -

        Next compute boundary terms $\sum_{f\in \partial K \cap \partial \Omega}
-   \sigma \left\| [  u_h-g_D ]  \right\|_f^2  $:

        +
  • Next compute boundary terms $\sum_{f\in \partial K \cap \partial \Omega}
+   \sigma \left\| [  u_h-g_D ]  \right\|_f^2  $:

      const auto boundary_worker = [&](const auto & cell,
      const unsigned int &face_no,
      auto & scratch_data,
    @@ -866,9 +866,9 @@
      copy_data.value += penalty * difference_norm_square;
      };
     
    -

    And finally interior face terms $\sum_{f\in \partial K}\lbrace \sigma
+</div><!-- fragment --><p>And finally interior face terms <picture><source srcset=$\sum_{f\in \partial K}\lbrace \sigma
    \left\| [u_h]  \right\|_f^2   +  h_f \left\|  [\nu \nabla u_h \cdot
-   \mathbf n ] \right\|_f^2 \rbrace$:

    + \mathbf n ] \right\|_f^2 \rbrace$" src="form_7012.png"/>:

      const auto face_worker = [&](const auto & cell,
      const unsigned int &f,
      const unsigned int &sf,
    @@ -958,25 +958,25 @@
    @ update_hessians
    Second derivatives of shape functions.

    The compute_energy_norm_error() function

    Next, we evaluate the accuracy in terms of the energy norm. This function is similar to the assembling of the error estimator above. Here we compute the square of the energy norm defined by

    -\[
+<picture><source srcset=\[
    \|u \|_{1,h}^2 = \sum_{K \in \Gamma_h} \nu\|\nabla u \|_K^2 +
    \sum_{f \in F_i} \sigma \| [ u ] \|_f^2 +
    \sum_{f \in F_b} \sigma  \|u\|_f^2.
-   \] + \]" src="form_7013.png"/>

    Therefore the corresponding error is

    -\[
+<picture><source srcset=\[
    \|u -u_h \|_{1,h}^2 = \sum_{K \in \Gamma_h} \nu\|\nabla (u_h - u)  \|_K^2
    + \sum_{f \in F_i} \sigma  \|[ u_h ] \|_f^2 + \sum_{f \in F_b}\sigma
    \|u_h-g_D\|_f^2.
-   \] + \]" src="form_7014.png"/>

      template <int dim>
      double SIPGLaplace<dim>::compute_energy_norm_error()
      {
      energy_norm_square_per_cell.reinit(triangulation.n_active_cells());
     
    -

    Assemble $\sum_{K \in \Gamma_h} \nu\|\nabla (u_h - u)  \|_K^2 $.

    /usr/share/doc/packages/dealii/doxygen/deal.II/step_75.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_75.html 2024-01-30 03:04:55.664908176 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_75.html 2024-01-30 03:04:55.664908176 +0000 @@ -162,65 +162,65 @@

    hp-decision indicators

    With hp-adaptive methods, we not only have to decide which cells we want to refine or coarsen, but we also have the choice how we want to do that: either by adjusting the grid resolution or the polynomial degree of the finite element.

    We will again base the decision on which cells to adapt on (a posteriori) computed error estimates of the current solution, e.g., using the KellyErrorEstimator. We will similarly decide how to adapt with (a posteriori) computed smoothness estimates: large polynomial degrees work best on smooth parts of the solution while fine grid resolutions are favorable on irregular parts. In step-27, we presented a way to calculate smoothness estimates based on the decay of Fourier coefficients. Let us take here the opportunity and present an alternative that follows the same idea, but with Legendre coefficients.

    -

    We will briefly present the idea of this new technique, but limit its description to 1D for simplicity. Suppose $u_\text{hp}(x)$ is a finite element function defined on a cell $K$ as

    -\[
+<p>We will briefly present the idea of this new technique, but limit its description to 1D for simplicity. Suppose <picture><source srcset=$u_\text{hp}(x)$ is a finite element function defined on a cell $K$ as

    +\[
 u_\text{hp}(x) = \sum c_i \varphi_i(x)
-\] +\]" src="form_7023.png"/>

    -

    where each $\varphi_i(x)$ is a shape function. We can equivalently represent $u_\text{hp}(x)$ in the basis of Legendre polynomials $P_k$ as

    -\[
+<p> where each <picture><source srcset=$\varphi_i(x)$ is a shape function. We can equivalently represent $u_\text{hp}(x)$ in the basis of Legendre polynomials $P_k$ as

    +\[
 u_\text{hp}(x) = \sum l_k P_k(x).
-\] +\]" src="form_7024.png"/>

    -

    Our goal is to obtain a mapping between the finite element coefficients $c_i$ and the Legendre coefficients $l_k$. We will accomplish this by writing the problem as a $L^2$-projection of $u_\text{hp}(x)$ onto the Legendre basis. Each coefficient $l_k$ can be calculated via

    -\[
+<p> Our goal is to obtain a mapping between the finite element coefficients <picture><source srcset=$c_i$ and the Legendre coefficients $l_k$. We will accomplish this by writing the problem as a $L^2$-projection of $u_\text{hp}(x)$ onto the Legendre basis. Each coefficient $l_k$ can be calculated via

    +\[
 l_k = \int_K u_\text{hp}(x) P_k(x) dx.
-\] +\]" src="form_7026.png"/>

    By construction, the Legendre polynomials are orthogonal under the $L^2$-inner product on $K$. Additionally, we assume that they have been normalized, so their inner products can be written as

    -\[
+<picture><source srcset=\[
 \int_K P_i(x) P_j(x) dx = \det(J_K) \, \delta_{ij}
-\] +\]" src="form_7027.png"/>

    -

    where $\delta_{ij}$ is the Kronecker delta, and $J_K$ is the Jacobian of the mapping from $\hat{K}$ to $K$, which (in this tutorial) is assumed to be constant (i.e., the mapping must be affine).

    -

    Hence, combining all these assumptions, the projection matrix for expressing $u_\text{hp}(x)$ in the Legendre basis is just $\det(J_K) \,
-\mathbb{I}$ – that is, $\det(J_K)$ times the identity matrix. Let $F_K$ be the Mapping from $K$ to its reference cell $\hat{K}$. The entries in the right-hand side in the projection system are, therefore,

    -\[
+<p> where <picture><source srcset=$\delta_{ij}$ is the Kronecker delta, and $J_K$ is the Jacobian of the mapping from $\hat{K}$ to $K$, which (in this tutorial) is assumed to be constant (i.e., the mapping must be affine).

    +

    Hence, combining all these assumptions, the projection matrix for expressing $u_\text{hp}(x)$ in the Legendre basis is just $\det(J_K) \,
+\mathbb{I}$ – that is, $\det(J_K)$ times the identity matrix. Let $F_K$ be the Mapping from $K$ to its reference cell $\hat{K}$. The entries in the right-hand side in the projection system are, therefore,

    +\[
 \int_K u_\text{hp}(x) P_k(x) dx
 = \det(J_K) \int_{\hat{K}} u_\text{hp}(F_K(\hat{x})) P_k(F_K(\hat{x})) d\hat{x}.
-\] +\]" src="form_7030.png"/>

    -

    Recalling the shape function representation of $u_\text{hp}(x)$, we can write this as $\det(J_K) \, \mathbf{C} \, \mathbf{c}$, where $\mathbf{C}$ is the change-of-basis matrix with entries

    -\[
+<p> Recalling the shape function representation of <picture><source srcset=$u_\text{hp}(x)$, we can write this as $\det(J_K) \, \mathbf{C} \, \mathbf{c}$, where $\mathbf{C}$ is the change-of-basis matrix with entries

    +\[
 \int_K P_i(x) \varphi_j(x) dx
 = \det(J_K) \int_{\hat{K}} P_i(F_K(\hat{x})) \varphi_j(F_K(\hat{x})) d\hat{x}
 = \det(J_K) \int_{\hat{K}} \hat{P}_i(\hat{x}) \hat{\varphi}_j(\hat{x}) d\hat{x}
 \dealcoloneq \det(J_K) \, C_{ij}
-\] +\]" src="form_7032.png"/>

    -

    so the values of $\mathbf{C}$ can be written independently of $K$ by factoring $\det(J_K)$ out front after transforming to reference coordinates. Hence, putting it all together, the projection problem can be written as

    -\[
+<p> so the values of <picture><source srcset=$\mathbf{C}$ can be written independently of $K$ by factoring $\det(J_K)$ out front after transforming to reference coordinates. Hence, putting it all together, the projection problem can be written as

    +\[
 \det(J_K) \, \mathbb{I} \, \mathbf{l} = \det(J_K) \, \mathbf{C} \, \mathbf{c}
-\] +\]" src="form_7033.png"/>

    which can be rewritten as simply

    -\[
+<picture><source srcset=\[
 \mathbf{l} = \mathbf{C} \, \mathbf{c}.
-\] +\]" src="form_7034.png"/>

    -

    At this point, we need to emphasize that most finite element applications use unstructured meshes for which mapping is almost always non-affine. Put another way: the assumption that $J_K$ is constant across the cell is not true for general meshes. Hence, a correct calculation of $l_k$ requires not only that we calculate the corresponding transformation matrix $\mathbf{C}$ for every single cell, but that we also define a set of Legendre-like orthogonal functions on a cell $K$ which may have an arbitrary and very complex geometry. The second part, in particular, is very computationally expensive. The current implementation of the FESeries transformation classes relies on the simplification resulting from having a constant Jacobian to increase performance and thus only yields correct results for affine mappings. The transformation is only used for the purpose of smoothness estimation to decide on the type of adaptation, which is not a critical component of a finite element program. Apart from that, this circumstance does not pose a problem for this tutorial as we only use square-shaped cells.

    +

    At this point, we need to emphasize that most finite element applications use unstructured meshes for which mapping is almost always non-affine. Put another way: the assumption that $J_K$ is constant across the cell is not true for general meshes. Hence, a correct calculation of $l_k$ requires not only that we calculate the corresponding transformation matrix $\mathbf{C}$ for every single cell, but that we also define a set of Legendre-like orthogonal functions on a cell $K$ which may have an arbitrary and very complex geometry. The second part, in particular, is very computationally expensive. The current implementation of the FESeries transformation classes relies on the simplification resulting from having a constant Jacobian to increase performance and thus only yields correct results for affine mappings. The transformation is only used for the purpose of smoothness estimation to decide on the type of adaptation, which is not a critical component of a finite element program. Apart from that, this circumstance does not pose a problem for this tutorial as we only use square-shaped cells.

    Eibner and Melenk [eibner2007hp] argued that a function is analytic, i.e., representable by a power series, if and only if the absolute values of the Legendre coefficients decay exponentially with increasing index $k$:

    -\[
+<picture><source srcset=\[
 \exists C,\sigma > 0 : \quad \forall k \in \mathbb{N}_0 : \quad |l_k|
 \leq C \exp\left( - \sigma k \right) .
-\] +\]" src="form_7035.png"/>

    -

    The rate of decay $\sigma$ can be interpreted as a measure for the smoothness of that function. We can get it as the slope of a linear regression fit of the transformation coefficients:

    -\[
+<p> The rate of decay <picture><source srcset=$\sigma$ can be interpreted as a measure for the smoothness of that function. We can get it as the slope of a linear regression fit of the transformation coefficients:

    +\[
 \ln(|l_k|) \sim \ln(C) - \sigma k .
-\] +\]" src="form_7036.png"/>

    -

    We will perform this fit on each cell $K$ to get a local estimate for the smoothness of the finite element approximation. The decay rate $\sigma_K$ then acts as the decision indicator for hp-adaptation. For a finite element on a cell $K$ with a polynomial degree $p$, calculating the coefficients for $k \leq (p+1)$ proved to be a reasonable choice to estimate smoothness. You can find a more detailed and dimension independent description in [fehling2020].

    +

    We will perform this fit on each cell $K$ to get a local estimate for the smoothness of the finite element approximation. The decay rate $\sigma_K$ then acts as the decision indicator for hp-adaptation. For a finite element on a cell $K$ with a polynomial degree $p$, calculating the coefficients for $k \leq (p+1)$ proved to be a reasonable choice to estimate smoothness. You can find a more detailed and dimension independent description in [fehling2020].

    All of the above is already implemented in the FESeries::Legendre class and the SmoothnessEstimator::Legendre namespace. With the error estimates and smoothness indicators, we are then left to flag the cells for actual refinement and coarsening. Some functions from the parallel::distributed::GridRefinement and hp::Refinement namespaces will help us with that later.

    Hybrid geometric multigrid

    Finite element matrices are typically very sparse. Additionally, hp-adaptive methods correspond to matrices with highly variable numbers of nonzero entries per row. Some state-of-the-art preconditioners, like the algebraic multigrid (AMG) ones as used in step-40, behave poorly in these circumstances.

    @@ -229,18 +229,18 @@

    The test case

    For elliptic equations, each reentrant corner typically invokes a singularity [brenner2008]. We can use this circumstance to put our hp-decision algorithms to a test: on all cells to be adapted, we would prefer a fine grid near the singularity, and a high polynomial degree otherwise.

    As the simplest elliptic problem to solve under these conditions, we chose the Laplace equation in a L-shaped domain with the reentrant corner in the origin of the coordinate system.

    -

    To be able to determine the actual error, we manufacture a boundary value problem with a known solution. On the above mentioned domain, one solution to the Laplace equation is, in polar coordinates, $(r, \varphi)$:

    -\[
+<p>To be able to determine the actual error, we manufacture a boundary value problem with a known solution. On the above mentioned domain, one solution to the Laplace equation is, in polar coordinates, <picture><source srcset=$(r, \varphi)$:

    +\[
 u_\text{sol} = r^{2/3} \sin(2/3 \varphi).
-\] +\]" src="form_7040.png"/>

    See also [brenner2008] or [mitchell2014hp]. The solution looks as follows:

    Analytic solution.

    The singularity becomes obvious by investigating the solution's gradient in the vicinity of the reentrant corner, i.e., the origin

    -\[
+<picture><source srcset=\[
 \left\| \nabla u_\text{sol} \right\|_{2} = 2/3 r^{-1/3} , \quad
 \lim\limits_{r \rightarrow 0} \left\| \nabla u_\text{sol} \right\|_{2} =
 \infty .
-\] +\]" src="form_7041.png"/>

    As we know where the singularity will be located, we expect that our hp-decision algorithm decides for a fine grid resolution in this particular region, and high polynomial degree anywhere else.

    So let's see if that is actually the case, and how hp-adaptation performs compared to pure h-adaptation. But first let us have a detailed look at the actual code.

    @@ -1125,7 +1125,7 @@
     
    FESeries::Legendre< dim, spacedim > default_fe_series(const hp::FECollection< dim, spacedim > &fe_collection, const unsigned int component=numbers::invalid_unsigned_int)

    The next part is going to be tricky. During execution of refinement, a few hp-algorithms need to interfere with the actual refinement process on the Triangulation object. We do this by connecting several functions to Triangulation::Signals: signals will be called at different stages during the actual refinement process and trigger all connected functions. We require this functionality for load balancing and to limit the polynomial degrees of neighboring cells.

    -

    For the former, we would like to assign a weight to every cell that is proportional to the number of degrees of freedom of its future finite element. The library offers a class parallel::CellWeights that allows to easily attach individual weights at the right place during the refinement process, i.e., after all refine and coarsen flags have been set correctly for hp-adaptation and right before repartitioning for load balancing is about to happen. Functions can be registered that will attach weights in the form that $a (n_\text{dofs})^b$ with a provided pair of parameters $(a,b)$. We register such a function in the following.

    +

    For the former, we would like to assign a weight to every cell that is proportional to the number of degrees of freedom of its future finite element. The library offers a class parallel::CellWeights that allows to easily attach individual weights at the right place during the refinement process, i.e., after all refine and coarsen flags have been set correctly for hp-adaptation and right before repartitioning for load balancing is about to happen. Functions can be registered that will attach weights in the form that $a (n_\text{dofs})^b$ with a provided pair of parameters $(a,b)$. We register such a function in the following.

    For load balancing, efficient solvers like the one we use should scale linearly with the number of degrees of freedom owned. We set the parameters for cell weighting correspondingly: A weighting factor of $1$ and an exponent of $1$ (see the definitions of the weighting_factor and weighting_exponent above).

      cell_weights = std::make_unique<parallel::CellWeights<dim>>(
      dof_handler,
    @@ -1644,7 +1644,7 @@

    The deal.II library offers multiple strategies to decide which type of adaptation to impose on cells: either adjust the grid resolution or change the polynomial degree. We only presented the Legendre coefficient decay strategy in this tutorial, while step-27 demonstrated the Fourier equivalent of the same idea.

    See the "possibilities for extensions" section of step-27 for an overview over these strategies, or the corresponding documentation for a detailed description.

    There, another strategy is mentioned that has not been shown in any tutorial so far: the strategy based on refinement history. The usage of this method for parallel distributed applications is more tricky than the others, so we will highlight the challenges that come along with it. We need information about the final state of refinement flags, and we need to transfer the solution across refined meshes. For the former, we need to attach the hp::Refinement::predict_error() function to the Triangulation::Signals::post_p4est_refinement signal in a way that it will be called after the hp::Refinement::limit_p_level_difference() function. At this stage, all refinement flags and future FE indices are terminally set and a reliable prediction of the error is possible. The predicted error then needs to be transferred across refined meshes with the aid of parallel::distributed::CellDataTransfer.

    -

    Try implementing one of these strategies into this tutorial and observe the subtle changes to the results. You will notice that all strategies are capable of identifying the singularities near the reentrant corners and will perform $h$-refinement in these regions, while preferring $p$-refinement in the bulk domain. A detailed comparison of these strategies is presented in [fehling2020] .

    +

    Try implementing one of these strategies into this tutorial and observe the subtle changes to the results. You will notice that all strategies are capable of identifying the singularities near the reentrant corners and will perform $h$-refinement in these regions, while preferring $p$-refinement in the bulk domain. A detailed comparison of these strategies is presented in [fehling2020] .

    Solve with matrix-based methods

    This tutorial focuses solely on matrix-free strategies. All hp-adaptive algorithms however also work with matrix-based approaches in the parallel distributed context.

    To create a system matrix, you can either use the LaplaceOperator::get_system_matrix() function, or use an assemble_system() function similar to the one of step-27. You can then pass the system matrix to the solver as usual.

    /usr/share/doc/packages/dealii/doxygen/deal.II/step_76.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_76.html 2024-01-30 03:04:55.768909042 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_76.html 2024-01-30 03:04:55.768909042 +0000 @@ -366,7 +366,7 @@
    dst,
    src);

    VectorizedArrayType

    -

    The class VectorizedArray<Number> is a key component to achieve the high node-level performance of the matrix-free algorithms in deal.II. It is a wrapper class around a short vector of $n$ entries of type Number and maps arithmetic operations to appropriate single-instruction/multiple-data (SIMD) concepts by intrinsic functions. The length of the vector can be queried by VectorizedArray::size() and its underlying number type by VectorizedArray::value_type.

    +

    The class VectorizedArray<Number> is a key component to achieve the high node-level performance of the matrix-free algorithms in deal.II. It is a wrapper class around a short vector of $n$ entries of type Number and maps arithmetic operations to appropriate single-instruction/multiple-data (SIMD) concepts by intrinsic functions. The length of the vector can be queried by VectorizedArray::size() and its underlying number type by VectorizedArray::value_type.

    In the default case (VectorizedArray<Number>), the vector length is set at compile time of the library to match the highest value supported by the given processor architecture. However, also a second optional template argument can be specified as VectorizedArray<Number, size>, where size explicitly controls the vector length within the capabilities of a particular instruction set. A full list of supported vector lengths is presented in the following table:

    @@ -2084,7 +2084,7 @@

    Possibilities for extensions

    The algorithms are easily extendable to higher dimensions: a high-dimensional advection operator based on cell-centric loops is part of the hyper.deal library. An extension of cell-centric loops to locally-refined meshes is more involved.

    Extension to the compressible Navier-Stokes equations

    -

    The solver presented in this tutorial program can also be extended to the compressible Navier–Stokes equations by adding viscous terms, as also suggested in step-67. To keep as much of the performance obtained here despite the additional cost of elliptic terms, e.g. via an interior penalty method, that tutorial has proposed to switch the basis from FE_DGQ to FE_DGQHermite like in the step-59 tutorial program. The reasoning behind this switch is that in the case of FE_DGQ all values of neighboring cells (i.e., $k+1$ layers) are needed, whilst in the case of FE_DGQHermite only 2 layers, making the latter significantly more suitable for higher degrees. The additional layers have to be, on the one hand, loaded from main memory during flux computation and, one the other hand, have to be communicated. Using the shared-memory capabilities introduced in this tutorial, the second point can be eliminated on a single compute node or its influence can be reduced in a hybrid context.

    +

    The solver presented in this tutorial program can also be extended to the compressible Navier–Stokes equations by adding viscous terms, as also suggested in step-67. To keep as much of the performance obtained here despite the additional cost of elliptic terms, e.g. via an interior penalty method, that tutorial has proposed to switch the basis from FE_DGQ to FE_DGQHermite like in the step-59 tutorial program. The reasoning behind this switch is that in the case of FE_DGQ all values of neighboring cells (i.e., $k+1$ layers) are needed, whilst in the case of FE_DGQHermite only 2 layers, making the latter significantly more suitable for higher degrees. The additional layers have to be, on the one hand, loaded from main memory during flux computation and, one the other hand, have to be communicated. Using the shared-memory capabilities introduced in this tutorial, the second point can be eliminated on a single compute node or its influence can be reduced in a hybrid context.

    Block Gauss-Seidel-like preconditioners

    Cell-centric loops could be used to create block Gauss-Seidel preconditioners that are multiplicative within one process and additive over processes. These type of preconditioners use during flux computation, in contrast to Jacobi-type preconditioners, already updated values from neighboring cells. The following pseudo-code visualizes how this could in principal be achieved:

    // vector monitor if cells have been updated or not
    /usr/share/doc/packages/dealii/doxygen/deal.II/step_77.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_77.html 2024-01-30 03:04:55.836909609 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_77.html 2024-01-30 03:04:55.836909609 +0000 @@ -137,68 +137,68 @@

    Introduction

    The step-15 program solved the following, nonlinear equation describing the minimal surface problem:

    -\begin{align*}
+<picture><source srcset=\begin{align*}
     -\nabla \cdot \left( \frac{1}{\sqrt{1+|\nabla u|^{2}}}\nabla u \right) &= 0 \qquad
     \qquad &&\textrm{in} ~ \Omega
     \\
     u&=g \qquad\qquad &&\textrm{on} ~ \partial \Omega.
-\end{align*} +\end{align*}" src="form_7043.png"/>

    -

    step-15 uses a Newton method, and Newton's method works by repeatedly solving a linearized problem for an update $\delta u_k$ – called the "search direction" –, computing a "step length" $\alpha_k$, and then combining them to compute the new guess for the solution via

    -\begin{align*}
+<p> <a class=step-15 uses a Newton method, and Newton's method works by repeatedly solving a linearized problem for an update $\delta u_k$ – called the "search direction" –, computing a "step length" $\alpha_k$, and then combining them to compute the new guess for the solution via

    +\begin{align*}
     u_{k+1} = u_k + \alpha_k \, \delta u_k.
-\end{align*} +\end{align*}" src="form_7045.png"/>

    -

    In the course of the discussions in step-15, we found that it is awkward to compute the step length, and so just settled for simple choice: Always choose $\alpha_k=0.1$. This is of course not efficient: We know that we can only realize Newton's quadratic convergence rate if we eventually are able to choose $\alpha_k=1$, though we may have to choose it smaller for the first few iterations where we are still too far away to use this long a step length.

    +

    In the course of the discussions in step-15, we found that it is awkward to compute the step length, and so just settled for simple choice: Always choose $\alpha_k=0.1$. This is of course not efficient: We know that we can only realize Newton's quadratic convergence rate if we eventually are able to choose $\alpha_k=1$, though we may have to choose it smaller for the first few iterations where we are still too far away to use this long a step length.

    Among the goals of this program is therefore to address this shortcoming. Since line search algorithms are not entirely trivial to implement, one does as one should do anyway: Import complicated functionality from an external library. To this end, we will make use of the interfaces deal.II has to one of the big nonlinear solver packages, namely the KINSOL sub-package of the SUNDIALS suite. SUNDIALS is, at its heart, a package meant to solve complex ordinary differential equations (ODEs) and differential-algebraic equations (DAEs), and the deal.II interfaces allow for this via the classes in the SUNDIALS namespace: Notably the SUNDIALS::ARKode and SUNDIALS::IDA classes. But, because that is an important step in the solution of ODEs and DAEs with implicit methods, SUNDIALS also has a solver for nonlinear problems called KINSOL, and deal.II has an interface to it in the form of the SUNDIALS::KINSOL class. This is what we will use for the solution of our problem.

    But SUNDIALS isn't just a convenient way for us to avoid writing a line search algorithm. In general, the solution of nonlinear problems is quite expensive, and one typically wants to save as much compute time as possible. One way one can achieve this is as follows: The algorithm in step-15 discretizes the problem and then in every iteration solves a linear system of the form

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   J_k \, \delta U_k = -F_k
-\end{align*} +\end{align*}" src="form_7048.png"/>

    -

    where $F_k$ is the residual vector computed using the current vector of nodal values $U_k$, $J_k$ is its derivative (called the "Jacobian"), and $\delta U_k$ is the update vector that corresponds to the function $\delta u_k$ mentioned above. The construction of $J_k,F_k$ has been thoroughly discussed in step-15, as has the way to solve the linear system in each Newton iteration. So let us focus on another aspect of the nonlinear solution procedure: Computing $F_k$ is expensive, and assembling the matrix $J_k$ even more so. Do we actually need to do that in every iteration? It turns out that in many applications, this is not actually necessary: These methods often converge even if we replace $J_k$ by an approximation $\tilde J_k$ and solve

    -\begin{align*}
+<p> where <picture><source srcset=$F_k$ is the residual vector computed using the current vector of nodal values $U_k$, $J_k$ is its derivative (called the "Jacobian"), and $\delta U_k$ is the update vector that corresponds to the function $\delta u_k$ mentioned above. The construction of $J_k,F_k$ has been thoroughly discussed in step-15, as has the way to solve the linear system in each Newton iteration. So let us focus on another aspect of the nonlinear solution procedure: Computing $F_k$ is expensive, and assembling the matrix $J_k$ even more so. Do we actually need to do that in every iteration? It turns out that in many applications, this is not actually necessary: These methods often converge even if we replace $J_k$ by an approximation $\tilde J_k$ and solve

    +\begin{align*}
   \tilde J_k \, \widetilde{\delta U}_k = -F_k
-\end{align*} +\end{align*}" src="form_7055.png"/>

    instead, then update

    -\begin{align*}
+<picture><source srcset=\begin{align*}
     U_{k+1} = U_k + \alpha_k \, \widetilde{\delta U}_k.
-\end{align*} +\end{align*}" src="form_7056.png"/>

    -

    This may require an iteration or two more because our update $\widetilde{\delta U}_k$ is not quite as good as $\delta U_k$, but it may still be a win because we don't have to assemble $J_k$ quite as often.

    -

    What kind of approximation $\tilde J_k$ would we like for $J_k$? Theory says that as $U_k$ converges to the exact solution $U^\ast$, we need to ensure that $\tilde J_k$ needs to converge to $J^\ast = \nabla F(U^\ast)$. In particular, since $J_k\rightarrow J^\ast$, a valid choice is $\tilde J_k = J_k$. But so is choosing $\tilde J_k = J_k$ every, say, fifth iteration $k=0,5,10,\ldots$ and for the other iterations, we choose $\tilde J_k$ equal to the last computed $J_{k'}$. This is what we will do here: we will just re-use $\tilde J_{k-1}$ from the previous iteration, which may again be what we had used in the iteration before that, $\tilde J_{k-2}$.

    -

    This scheme becomes even more interesting if, for the solution of the linear system with $J_k$, we don't just have to assemble a matrix, but also compute a good preconditioner. For example, if we were to use a sparse LU decomposition via the SparseDirectUMFPACK class, or used a geometric or algebraic multigrid. In those cases, we would also not have to update the preconditioner, whose computation may have taken about as long or longer than the assembly of the matrix in the first place. Indeed, with this mindset, we should probably think about using the best preconditioner we can think of, even though their construction is typically quite expensive: We will hope to amortize the cost of computing this preconditioner by applying it to more than one just one linear solve.

    -

    The big question is, of course: By what criterion do we decide whether we can get away with the approximation $\tilde J_k$ based on a previously computed Jacobian matrix $J_{k-s}$ that goes back $s$ steps, or whether we need to – at least in this iteration – actually re-compute the Jacobian $J_k$ and the corresponding preconditioner? This is, like the issue with line search, one that requires a non-trivial amount of code that monitors the convergence of the overall algorithm. We could implement these sorts of things ourselves, but we probably shouldn't: KINSOL already does that for us. It will tell our code when to "update" the Jacobian matrix.

    -

    One last consideration if we were to use an iterative solver instead of the sparse direct one mentioned above: Not only is it possible to get away with replacing $J_k$ by some approximation $\tilde J_k$ when solving for the update $\delta U_k$, but one can also ask whether it is necessary to solve the linear system

    -\begin{align*}
+<p> This may require an iteration or two more because our update <picture><source srcset=$\widetilde{\delta U}_k$ is not quite as good as $\delta U_k$, but it may still be a win because we don't have to assemble $J_k$ quite as often.

    +

    What kind of approximation $\tilde J_k$ would we like for $J_k$? Theory says that as $U_k$ converges to the exact solution $U^\ast$, we need to ensure that $\tilde J_k$ needs to converge to $J^\ast = \nabla F(U^\ast)$. In particular, since $J_k\rightarrow J^\ast$, a valid choice is $\tilde J_k = J_k$. But so is choosing $\tilde J_k = J_k$ every, say, fifth iteration $k=0,5,10,\ldots$ and for the other iterations, we choose $\tilde J_k$ equal to the last computed $J_{k'}$. This is what we will do here: we will just re-use $\tilde J_{k-1}$ from the previous iteration, which may again be what we had used in the iteration before that, $\tilde J_{k-2}$.

    +

    This scheme becomes even more interesting if, for the solution of the linear system with $J_k$, we don't just have to assemble a matrix, but also compute a good preconditioner. For example, if we were to use a sparse LU decomposition via the SparseDirectUMFPACK class, or used a geometric or algebraic multigrid. In those cases, we would also not have to update the preconditioner, whose computation may have taken about as long or longer than the assembly of the matrix in the first place. Indeed, with this mindset, we should probably think about using the best preconditioner we can think of, even though their construction is typically quite expensive: We will hope to amortize the cost of computing this preconditioner by applying it to more than one just one linear solve.

    +

    The big question is, of course: By what criterion do we decide whether we can get away with the approximation $\tilde J_k$ based on a previously computed Jacobian matrix $J_{k-s}$ that goes back $s$ steps, or whether we need to – at least in this iteration – actually re-compute the Jacobian $J_k$ and the corresponding preconditioner? This is, like the issue with line search, one that requires a non-trivial amount of code that monitors the convergence of the overall algorithm. We could implement these sorts of things ourselves, but we probably shouldn't: KINSOL already does that for us. It will tell our code when to "update" the Jacobian matrix.

    +

    One last consideration if we were to use an iterative solver instead of the sparse direct one mentioned above: Not only is it possible to get away with replacing $J_k$ by some approximation $\tilde J_k$ when solving for the update $\delta U_k$, but one can also ask whether it is necessary to solve the linear system

    +\begin{align*}
   \tilde J_k \widetilde{\delta U}_k = -F_k
-\end{align*} +\end{align*}" src="form_7067.png"/>

    -

    to high accuracy. The thinking goes like this: While our current solution $U_k$ is still far away from $U^\ast$, why would we solve this linear system particularly accurately? The update $U_{k+1}=U_k + \widetilde{\delta U}_k$ is likely still going to be far away from the exact solution, so why spend much time on solving the linear system to great accuracy? This is the kind of thinking that underlies algorithms such as the "Eisenstat-Walker trick" [eiwa96] in which one is given a tolerance to which the linear system above in iteration $k$ has to be solved, with this tolerance dependent on the progress in the overall nonlinear solver. As before, one could try to implement this oneself, but KINSOL already provides this kind of information for us – though we will not use it in this program since we use a direct solver that requires no solver tolerance and just solves the linear system exactly up to round-off.

    +

    to high accuracy. The thinking goes like this: While our current solution $U_k$ is still far away from $U^\ast$, why would we solve this linear system particularly accurately? The update $U_{k+1}=U_k + \widetilde{\delta U}_k$ is likely still going to be far away from the exact solution, so why spend much time on solving the linear system to great accuracy? This is the kind of thinking that underlies algorithms such as the "Eisenstat-Walker trick" [eiwa96] in which one is given a tolerance to which the linear system above in iteration $k$ has to be solved, with this tolerance dependent on the progress in the overall nonlinear solver. As before, one could try to implement this oneself, but KINSOL already provides this kind of information for us – though we will not use it in this program since we use a direct solver that requires no solver tolerance and just solves the linear system exactly up to round-off.

    As a summary of all of these considerations, we could say the following: There is no need to reinvent the wheel. Just like deal.II provides a vast amount of finite-element functionality, SUNDIALS' KINSOL package provides a vast amount of nonlinear solver functionality, and we better use it.

    Note
    While this program uses SUNDIAL's KINSOL package as the engine to solve nonlinear problems, KINSOL is not the only option you have. deal.II also has interfaces to PETSc's SNES collection of algorithms (see the PETScWrappers::NonlinearSolver class) as well as to the Trilinos NOX package (see the TrilinosWrappers::NOXSolver class) that provide not only very similar functionality, but also a largely identical interface. If you have installed a version of deal.II that is configured to use either PETSc or Trilinos, but not SUNDIALS, then it is not too difficult to switch this program to use either of the former two packages instead: Basically everything that we say and do below will also be true and work for these other packages! (We will also come back to this point in the results section below.)

    How deal.II interfaces with KINSOL

    KINSOL, like many similar packages, works in a pretty abstract way. At its core, it sees a nonlinear problem of the form

    -\begin{align*}
+<picture><source srcset=\begin{align*}
     F(U) = 0
-\end{align*} +\end{align*}" src="form_7069.png"/>

    -

    and constructs a sequence of iterates $U_k$ which, in general, are vectors of the same length as the vector returned by the function $F$. To do this, there are a few things it needs from the user:

    Computing the residual vector

    -

    The second part of what assemble_system() used to do in step-15 is computing the residual vector, i.e., the right hand side vector of the Newton linear systems. We have broken this out of the previous function, but the following function will be easy to understand if you understood what assemble_system() in step-15 did. Importantly, however, we need to compute the residual not linearized around the current solution vector, but whatever we get from KINSOL. This is necessary for operations such as line search where we want to know what the residual $F(U^k + \alpha_k \delta
-   U^K)$ is for different values of $\alpha_k$; KINSOL in those cases simply gives us the argument to the function $F$ and we then compute the residual $F(\cdot)$ at this point.

    +

    The second part of what assemble_system() used to do in step-15 is computing the residual vector, i.e., the right hand side vector of the Newton linear systems. We have broken this out of the previous function, but the following function will be easy to understand if you understood what assemble_system() in step-15 did. Importantly, however, we need to compute the residual not linearized around the current solution vector, but whatever we get from KINSOL. This is necessary for operations such as line search where we want to know what the residual $F(U^k + \alpha_k \delta
+   U^K)$ is for different values of $\alpha_k$; KINSOL in those cases simply gives us the argument to the function $F$ and we then compute the residual $F(\cdot)$ at this point.

    The function prints the norm of the so-computed residual at the end as a way for us to follow along the progress of the program.

      template <int dim>
      void MinimalSurfaceProblem<dim>::compute_residual(
    @@ -657,7 +657,7 @@
    std::string int_to_string(const unsigned int value, const unsigned int digits=numbers::invalid_unsigned_int)
    Definition utilities.cc:471

    The run() function and the overall logic of the program

    The only function that really is interesting in this program is the one that drives the overall algorithm of starting on a coarse mesh, doing some mesh refinement cycles, and on each mesh using KINSOL to find the solution of the nonlinear algebraic equation we obtain from discretization on this mesh. The refine_mesh() function above makes sure that the solution on one mesh is used as the starting guess on the next mesh. We also use a TimerOutput object to measure how much time every operation on each mesh costs, and reset the timer at the beginning of each cycle.

    -

    As discussed in the introduction, it is not necessary to solve problems on coarse meshes particularly accurately since these will only solve as starting guesses for the next mesh. As a consequence, we will use a target tolerance of $\tau=10^{-3} \frac{1}{10^k}$ for the $k$th mesh refinement cycle.

    +

    As discussed in the introduction, it is not necessary to solve problems on coarse meshes particularly accurately since these will only solve as starting guesses for the next mesh. As a consequence, we will use a target tolerance of $\tau=10^{-3} \frac{1}{10^k}$ for the $k$th mesh refinement cycle.

    All of this is encoded in the first part of this function:

      template <int dim>
      void MinimalSurfaceProblem<dim>::run()
    @@ -891,9 +891,9 @@
    ...

    What is happening is this:

    The program also writes the solution to a VTU file at the end of each mesh refinement cycle, and it looks as follows:

    /usr/share/doc/packages/dealii/doxygen/deal.II/step_78.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_78.html 2024-01-30 03:04:55.892910076 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_78.html 2024-01-30 03:04:55.892910076 +0000 @@ -128,9 +128,9 @@

    Introduction

    The Black-Scholes equation is a partial differential equation that falls a bit out of the ordinary scheme. It describes what the fair price of a "European -call" stock option is. Without going into too much detail, a stock "option" is a contract one can buy from a bank that allows me, but not requires me, to buy a specific stock at a fixed price $K$ at a fixed future time $T$ in the future. The question one would then want to answer as a buyer of such an option is "How much do I think such a contract is worth?", or as the seller "How much do I need to charge for this contract?", both as a function of the time $t<T$ before the contract is up at time $T$ and as a function of the stock price $S$. Fischer Black and Myron Scholes derived a partial differential equation for the fair price $V(S,t)$ for such options under the assumption that stock prices exhibit random price fluctuations with a given level of "volatility" plus a background exponential price increase (which one can think of as the inflation rate that simply devalues all money over time). For their work, Black and Scholes received the Nobel Prize in Economic Sciences in 1997, making this the first tutorial program dealing with a problem for which someone has gotten a Nobel Prize [black1973pricing].

    +call" stock option is. Without going into too much detail, a stock "option" is a contract one can buy from a bank that allows me, but not requires me, to buy a specific stock at a fixed price $K$ at a fixed future time $T$ in the future. The question one would then want to answer as a buyer of such an option is "How much do I think such a contract is worth?", or as the seller "How much do I need to charge for this contract?", both as a function of the time $t<T$ before the contract is up at time $T$ and as a function of the stock price $S$. Fischer Black and Myron Scholes derived a partial differential equation for the fair price $V(S,t)$ for such options under the assumption that stock prices exhibit random price fluctuations with a given level of "volatility" plus a background exponential price increase (which one can think of as the inflation rate that simply devalues all money over time). For their work, Black and Scholes received the Nobel Prize in Economic Sciences in 1997, making this the first tutorial program dealing with a problem for which someone has gotten a Nobel Prize [black1973pricing].

    The equation reads as follows:

    -\begin{align*}
+<picture><source srcset=\begin{align*}
     &\frac{\partial V}{\partial t} + \frac{\sigma^2S^2}{2} \
     \frac{\partial^2 V}{\partial S^2} + \
     rS\frac{\partial V}{\partial S} - rV = 0, \
@@ -144,35 +144,35 @@
     \\
     &V(S,T) = \max(S-K,0) \
     &&\forall S \in \Omega
-\end{align*} +\end{align*}" src="form_7083.png"/>

    where

    -\begin{align*}
+<picture><source srcset=\begin{align*}
     V(S,t): && \text{Value of call option at time t and asset price S} \\
     \sigma: && \text{Volatility of the underlying asset} \\
     r: && \text{Risk free interest rate} \\
     K : && \text{Strike price for purchasing asset}
-\end{align*} +\end{align*}" src="form_7084.png"/>

    -

    The way we should interpret this equation is that it is a time-dependent partial differential equation of one "space" variable $S$ as the price of the stock, and $V(S,t)$ is the price of the option at time $t$ if the stock price at that time were $S$.

    +

    The way we should interpret this equation is that it is a time-dependent partial differential equation of one "space" variable $S$ as the price of the stock, and $V(S,t)$ is the price of the option at time $t$ if the stock price at that time were $S$.

    Particularities of the equation system

    -

    There are a number of oddities in this equation that are worth discussing before moving on to its numerical solution. First, the "spatial" domain $\Omega\subset\mathbb{R}$ is unbounded, and thus $S$ can be unbounded in value. This is because there may be a practical upper bound for stock prices, but not a conceptual one. The boundary conditions $V(S,t)\rightarrow S$ as $S\rightarrow \infty$ can then be interpreted as follows: What is the value of an option that allows me to buy a stock at price $K$ if the stock price (today or at time $t=T$) is $S\gg K$? One would expect that it is $V\approx S-K$ plus some adjustment for inflation, or, if we really truly consider huge values of $S$, we can neglect $K$ and arrive at the statement that the boundary values at the infinite boundary should be of the form $V\rightarrow S$ as stated above.

    -

    In practice, for us to use a finite element method to solve this, we are going to need to bound $\Omega$. Since this equation describes prices, and it doesn't make sense to talk about prices being negative, we will set the lower bound of $\Omega$ to be 0. Then, for an upper bound, we will choose a very large number, one that $S$ is not very likely to ever get to. We will call this $S_\text{max}$. So, $\Omega=[0,S_\text{max}]$.

    -

    Second, after truncating the domain, we need to ask what boundary values we should pose at this now finite boundary. To take care of this, we use "put-call" parity [stoll1969relationship]. A "pull option" is one in which we are allowed, but not required, to sell a stock at price $K$ to someone at a future time $T$. This says

    -\begin{align*}
+<p>There are a number of oddities in this equation that are worth discussing before moving on to its numerical solution. First, the $\Omega\subset\mathbb{R}$ is unbounded, and thus $S$ can be unbounded in value. This is because there may be a practical upper bound for stock prices, but not a conceptual one. The boundary conditions $V(S,t)\rightarrow S$ as $S\rightarrow \infty$ can then be interpreted as follows: What is the value of an option that allows me to buy a stock at price $K$ if the stock price (today or at time $t=T$) is $S\gg K$? One would expect that it is $V\approx S-K$ plus some adjustment for inflation, or, if we really truly consider huge values of $S$, we can neglect $K$ and arrive at the statement that the boundary values at the infinite boundary should be of the form $V\rightarrow S$ as stated above.

    +

    In practice, for us to use a finite element method to solve this, we are going to need to bound $\Omega$. Since this equation describes prices, and it doesn't make sense to talk about prices being negative, we will set the lower bound of $\Omega$ to be 0. Then, for an upper bound, we will choose a very large number, one that $S$ is not very likely to ever get to. We will call this $S_\text{max}$. So, $\Omega=[0,S_\text{max}]$.

    +

    Second, after truncating the domain, we need to ask what boundary values we should pose at this now finite boundary. To take care of this, we use "put-call" parity [stoll1969relationship]. A "pull option" is one in which we are allowed, but not required, to sell a stock at price $K$ to someone at a future time $T$. This says

    +\begin{align*}
     V(S,t)+Ke^{-r(T-t)}=P(S,t)+S
-\end{align*} +\end{align*}" src="form_7094.png"/>

    -

    where $V(S,t)$ is the value of the call option, and $P(S,t)$ is the value of the put option. Since we expect $P(S,t) \rightarrow 0$ as $S \rightarrow \infty$, this says

    -\begin{align*}
+<p> where <picture><source srcset=$V(S,t)$ is the value of the call option, and $P(S,t)$ is the value of the put option. Since we expect $P(S,t) \rightarrow 0$ as $S \rightarrow \infty$, this says

    +\begin{align*}
     V(S,t) \rightarrow S-Ke^{-r(T-t)},
-\end{align*} +\end{align*}" src="form_7098.png"/>

    -

    and we can use this as a reasonable boundary condition at our finite point $S_\text{max}$.

    -

    The second complication of the Block-Scholes equation is that we are given a final condition, and not an initial condition. This is because we know what the option is worth at time $t=T$: If the stock price at $T$ is $S<K$, then we have no incentive to use our option of buying a price $K$ because we can buy that stock for cheaper on the open market. So $V(S,T)=0$ for $S<K$. On the other hand, if at time $T$ we have $S>K$, then we can buy the stock at price $K$ via the option and immediately sell it again on the market for price $S$, giving me a profit of $S-K$. In other words, $V(S,T)=S-K$ for $S>K$. So, we only know values for $V$ at the end time but not the initial time – in fact, finding out what a fair price at the current time (conventionally taken to be $t=0$) is what solving these equations is all about.

    -

    This means that this is not an equation that is posed going forward in time, but in fact going backward in time. Thus it makes sense to solve this problem in reverse by making the change of variables $\tau=T-t$ where now $\tau$ denotes the time before the strike time $T$.

    +

    and we can use this as a reasonable boundary condition at our finite point $S_\text{max}$.

    +

    The second complication of the Block-Scholes equation is that we are given a final condition, and not an initial condition. This is because we know what the option is worth at time $t=T$: If the stock price at $T$ is $S<K$, then we have no incentive to use our option of buying a price $K$ because we can buy that stock for cheaper on the open market. So $V(S,T)=0$ for $S<K$. On the other hand, if at time $T$ we have $S>K$, then we can buy the stock at price $K$ via the option and immediately sell it again on the market for price $S$, giving me a profit of $S-K$. In other words, $V(S,T)=S-K$ for $S>K$. So, we only know values for $V$ at the end time but not the initial time – in fact, finding out what a fair price at the current time (conventionally taken to be $t=0$) is what solving these equations is all about.

    +

    This means that this is not an equation that is posed going forward in time, but in fact going backward in time. Thus it makes sense to solve this problem in reverse by making the change of variables $\tau=T-t$ where now $\tau$ denotes the time before the strike time $T$.

    With all of this, we finally end up with the following problem:

    -\begin{align*}
+<picture><source srcset=\begin{align*}
     &-\frac{\partial V}{\partial \tau} + \frac{\sigma^2S^2}{2} \
     \frac{\partial^2 V}{\partial S^2} + rS\frac{\partial V}{\partial S} - rV=0\
     , \quad\quad &&\forall S\in [0,S_\text{max}], \tau \in [0,T]
@@ -185,23 +185,23 @@
     \\
     &V(S,0) = \max(S-K,0) \
     &&\forall S \in [0,S_\text{max}]
-\end{align*} +\end{align*}" src="form_7105.png"/>

    Conceptually, this is an advection-diffusion-reaction problem for the variable $V$: There is both a second-order derivative diffusion term, a first-order derivative advection term, and a zeroth-order reaction term. We can expect this problem to be a little bit forgiving in practice because for realistic values of the coefficients, it is diffusive dominated. But, because of the advective terms in the problem, we will have to be careful with mesh refinement and time step choice. There is also the issue that the diffusion term is written in a non-conservative form and so integration by parts is not immediately obvious. This will be discussed in the next section.

    Scheme for the numerical solution

    -

    We will solve this problem using an IMEX method. In particular, we first discretize in time with the theta method and will later pick different values of theta for the advective and diffusive terms. Let $V^n(S)$ approximate $V(S,\tau_n)$:

    -\begin{align*}
+<p>We will solve this problem using an IMEX method. In particular, we first discretize in time with the theta method and will later pick different values of theta for the advective and diffusive terms. Let <picture><source srcset=$V^n(S)$ approximate $V(S,\tau_n)$:

    +\begin{align*}
     0=&-\frac{V^n(S)-V^{n-1}(S)}{k_n} \\
     &+\frac{\sigma^2S^2}{2}\left[(1-\theta)\frac{d^2V^{n-1}(S)}{dS^2} + \
     \theta \frac{d^2V^{n}(S)}{dS^2}\right] \\
     &+rS\left[(1-\theta)\frac{dV^{n-1}(S)}{dS} + \
     \theta\frac{dV^{n}(S)}{dS}\right]  \\
     &-r\left[(1-\theta)V^{n-1}(S) + \theta V^n(S)\right]
-\end{align*} +\end{align*}" src="form_7108.png"/>

    -

    Here, $k_n=\tau_n-\tau_{n-1}$ is the time step size. Given this time discretization, we can proceed to discretize space by multiplying with test functions and then integrating by parts. Because there are some interesting details in this due to the advective and non-advective terms in this equation, this process will be explained in detail.

    -

    So, we begin by multiplying by test functions, $\{\phi_i(S)\}_{i\in\mathbb{N}}$:

    -\begin{align*}
+<p> Here, <picture><source srcset=$k_n=\tau_n-\tau_{n-1}$ is the time step size. Given this time discretization, we can proceed to discretize space by multiplying with test functions and then integrating by parts. Because there are some interesting details in this due to the advective and non-advective terms in this equation, this process will be explained in detail.

    +

    So, we begin by multiplying by test functions, $\{\phi_i(S)\}_{i\in\mathbb{N}}$:

    +\begin{align*}
     0=&-\int_0^{S_\text{max}}\phi_i(S)\left[V^n(S)-V^{n-1}(S)\right]dS \\
     &+k_n\int_0^{S_\text{max}}\phi_i(S)\left[\frac{\sigma^2S^2}{2} \
     \left[(1-\theta)\frac{d^2V^{n-1}(S)}{dS^2} + \
@@ -211,12 +211,12 @@
      + \theta\frac{dV^{n}(S)}{dS}\right]\right]dS  \\
     &-k_n\int_0^{S_\text{max}}\phi_i(S)\left[r\left[(1-\theta)V^{n-1}(S)\
      + \theta V^n(S)\right]\right]dS
-\end{align*} +\end{align*}" src="form_7111.png"/>

    -

    As usual, (1) becomes $-\textbf{M}V^n+\textbf{M}V^{n-1}$ and (4) becomes $k_n\left[-r(1-\theta)\textbf{M}V^{n-1} - \theta r\textbf{M}V^n\right]$, where $\textbf{M}_{i,j}=\left(\phi_i(S),\phi_j(S)\right)$, and where we have taken the liberty of denoting by $V$ not only the function $V(S)$ but also the vector of nodal values after discretization.

    +

    As usual, (1) becomes $-\textbf{M}V^n+\textbf{M}V^{n-1}$ and (4) becomes $k_n\left[-r(1-\theta)\textbf{M}V^{n-1} - \theta r\textbf{M}V^n\right]$, where $\textbf{M}_{i,j}=\left(\phi_i(S),\phi_j(S)\right)$, and where we have taken the liberty of denoting by $V$ not only the function $V(S)$ but also the vector of nodal values after discretization.

    The interesting parts come from (2) and (3).

    For (2), we have:

    -\begin{align*}
+<picture><source srcset=\begin{align*}
     &k_n\int_0^{S_\text{max}}\phi_i(S)\left[\frac{\sigma^2S^2}{2} \
      \left[(1-\theta)\frac{d^2V^{n-1}(S)}{dS^2} + \
      \theta \frac{d^2V^{n}(S)}{dS^2}\right]\right]dS \\
@@ -224,10 +224,10 @@
      \frac{d^2V^{n-1}(S)}{dS^2} \
     +k_n\theta\int_0^{S_\text{max}}\phi_i(S)\frac{\sigma^2S^2}{2} \
      \frac{d^2V^{n}(S)}{dS^2}
-\end{align*} +\end{align*}" src="form_7116.png"/>

    There are two integrals here, that are more or less the same, with the differences being a slightly different coefficient in front of the integral, and a different time step for V. Therefore, we will outline this integral in the general case, and account for the differences at the end. So, consider the general integral, which we will solve using integration by parts:

    -\begin{align*}
+<picture><source srcset=\begin{align*}
     &\int_{0}^{S_\text{max}} \phi_i(S)\frac{\sigma^2S^2}{2}
         \frac{d^2V^n(S)}{dS^2}dS \\
     &= \phi_i(S)\frac{1}{2}\sigma^2S^2\frac{dV^n(S)}{dS}\Bigg|_0^{S_{max}} - \
@@ -251,10 +251,10 @@
     &= -\sigma^2\textbf{B}V^n - \frac{1}{2}\sigma^2\textbf{D}V^n, \quad\quad \
     \textbf{B}_{i,j} = \left(\phi_i(S)S, \frac{d\phi_j(S)}{dS}\right),\
     \textbf{D}_{i,j} = \left(\frac{d\phi_i(S)}{dS}S^2,\frac{d\phi_j(S)}{dS}\right)
-\end{align*} +\end{align*}" src="form_7117.png"/>

    -

    So, after adding in the constants and exchanging $V^n$ for $V^{n-1}$ where applicable, we arrive at the following for (2):

    -\begin{align*}
+<p>So, after adding in the constants and exchanging <picture><source srcset=$V^n$ for $V^{n-1}$ where applicable, we arrive at the following for (2):

    +\begin{align*}
     &k_n\int_0^{S_\text{max}}\phi_i(S)\left[\frac{\sigma^2S^2}{2}
         \left[(1-\theta)\
     \frac{d^2V^{n-1}(S)}{dS^2} + \
@@ -263,19 +263,19 @@
      -(1-\theta)\frac{1}{2}\sigma^2\textbf{D}V^{n-1} \
     -\theta\sigma^2\textbf{B}V^{n}
      -\theta\frac{1}{2}\sigma^2\textbf{D}V^{n}\right]
-\end{align*} +\end{align*}" src="form_7119.png"/>

    -

    But, because the matrix $\textbf{B}$ involves an advective term, we will choose $\theta=0$ there – in other words, we use an explicit Euler method to treat advection. Conversely, since the matrix $\textbf{D}$ involves the diffusive term, we will choose $\theta=1/2$ there – i.e., we treat diffusion using the second order Crank-Nicolson method.

    +

    But, because the matrix $\textbf{B}$ involves an advective term, we will choose $\theta=0$ there – in other words, we use an explicit Euler method to treat advection. Conversely, since the matrix $\textbf{D}$ involves the diffusive term, we will choose $\theta=1/2$ there – i.e., we treat diffusion using the second order Crank-Nicolson method.

    So, we arrive at the following:

    -\begin{align*}
+<picture><source srcset=\begin{align*}
     k_n\left[-\frac{1}{4}\sigma^2\textbf{D}V^{n-1} \
     -\frac{1}{4}\sigma^2\textbf{D}V^n \
     - \sigma^2\textbf{B}V^{n-1}\right]
-\end{align*} +\end{align*}" src="form_7122.png"/>

    Now, to handle (3). For this, we will again proceed by considering the general case like above.

    -\begin{align*}
+<picture><source srcset=\begin{align*}
     &\int_{0}^{S_\text{max}} \phi_i(S)rS\frac{dV^n}{dS}dS \\
     &= \phi_i(S)rSV^n\Bigg|_0^{S_\text{max}} - \int_0^{S_\text{max}}
         \left[r\phi_i(S) \
@@ -289,25 +289,25 @@
     &= -r\textbf{M}V^n -r\textbf{A}V^n, \quad\quad\
     \textbf{M}_{i,j} = \left(\phi_i(S), \phi_j(S)\right),\
     \textbf{A}_{i,j} = \left(S\frac{d\phi_i(S)}{dS}, \phi_j(S)\right)
-\end{align*} +\end{align*}" src="form_7123.png"/>

    -

    So, again after adding in the constants and exchanging $V^n$ for $V^{n-1}$ where applicable, we arrive at the following for (3):

    -\begin{align*}
+<p>So, again after adding in the constants and exchanging <picture><source srcset=$V^n$ for $V^{n-1}$ where applicable, we arrive at the following for (3):

    +\begin{align*}
     &k_n\int_0^{S_\text{max}}\phi_i(S)\left[rS\left[(1-\theta)
         \frac{dV^{n-1}(S)}{dS} +\
      \theta\frac{dV^{n}(S)}{dS}\right]\right]dS \\
     &= k_n\left[-(1-\theta)r\textbf{M}V^{n-1} -(1-\theta)r\textbf{A}V^{n-1}\
     -\theta r\textbf{M}V^n -\theta r\textbf{A}V^n\right]
-\end{align*} +\end{align*}" src="form_7124.png"/>

    -

    Just as before, we will use $\theta=0$ for the matrix $\textbf{A}$ and $\theta=\frac{1}{2}$ for the matrix $\textbf{M}$. So, we arrive at the following for (3):

    -\begin{align*}
+<p> Just as before, we will use <picture><source srcset=$\theta=0$ for the matrix $\textbf{A}$ and $\theta=\frac{1}{2}$ for the matrix $\textbf{M}$. So, we arrive at the following for (3):

    +\begin{align*}
     k_n\left[-\frac{1}{2}r\textbf{M}V^{n-1} - \frac{1}{2}r\textbf{M}V^n \
     -r\textbf{A}V^{n-1}\right]
-\end{align*} +\end{align*}" src="form_7127.png"/>

    Now, putting everything together, we obtain the following discrete form for the Black-Scholes Equation:

    -\begin{align*}
+<picture><source srcset=\begin{align*}
     0&= \\
     &-\textbf{M}V^n+\textbf{M}V^{n-1} \\
     & +k_n\left[-\frac{1}{4}\sigma^2\textbf{D}V^{n-1} \
@@ -320,61 +320,61 @@
/usr/share/doc/packages/dealii/doxygen/deal.II/step_79.html differs (HTML document, UTF-8 Unicode text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_79.html	2024-01-30 03:04:56.012911075 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_79.html	2024-01-30 03:04:56.012911075 +0000
@@ -140,199 +140,199 @@
   <li> <a href=The plain program

    Introduction

    -

    Topology Optimization of Elastic Media is a technique used to optimize a structure that is bearing some load. Ideally, we would like to minimize the maximum stress placed on a structure by selecting a region $E$ where material is placed. In other words,

    -\[
+<p>Topology Optimization of Elastic Media is a technique used to optimize a structure that is bearing some load. Ideally, we would like to minimize the maximum stress placed on a structure by selecting a region <picture><source srcset=$E$ where material is placed. In other words,

    +\[
   \text{minimize}\| \boldsymbol{\sigma} (\mathbf{u}) \|_\infty
-\] +\]" src="form_7143.png"/>

    -\[
+<picture><source srcset=\[
   \text{subject to } |E|\leq V_{\max},
-\] +\]" src="form_7144.png"/>

    -\[
+<picture><source srcset=\[
   \text{and } \nabla \cdot \boldsymbol{\sigma} + \mathbf{F} = \mathbf{0}.
-\] +\]" src="form_7145.png"/>

    -

    Here, $\boldsymbol{\sigma} = \mathbf{C} : \boldsymbol{\varepsilon}(\mathbf{u})$ is the stress within the body that is caused by the external forces $\mathbf F$, where we have for simplicity assumed that the material is linear-elastic and so $\mathbf{C}$ is the stress-strain tensor and $\boldsymbol{\varepsilon}(\mathbf{u})=\frac{1}{2} (\nabla \mathbf{u} + (\nabla\mathbf{u})^T)$ is the small-deformation strain as a function of the displacement $\mathbf{u}$ – see step-8 and step-17 for more on linear elasticity. In the formulation above, $V_\text{max}$ is the maximal amount of material we are willing to provide to build the object. The last of the constraints is the partial differential equation that relates stress $\boldsymbol{\sigma}$ and forces $\mathbf F$ and is simply the steady-state force balance.

    +

    Here, $\boldsymbol{\sigma} = \mathbf{C} : \boldsymbol{\varepsilon}(\mathbf{u})$ is the stress within the body that is caused by the external forces $\mathbf F$, where we have for simplicity assumed that the material is linear-elastic and so $\mathbf{C}$ is the stress-strain tensor and $\boldsymbol{\varepsilon}(\mathbf{u})=\frac{1}{2} (\nabla \mathbf{u} + (\nabla\mathbf{u})^T)$ is the small-deformation strain as a function of the displacement $\mathbf{u}$ – see step-8 and step-17 for more on linear elasticity. In the formulation above, $V_\text{max}$ is the maximal amount of material we are willing to provide to build the object. The last of the constraints is the partial differential equation that relates stress $\boldsymbol{\sigma}$ and forces $\mathbf F$ and is simply the steady-state force balance.

    That said, the infinity norm above creates a problem: As a function of location of material, this objective function is necessarily not differentiable, making prospects of optimization rather bleak. So instead, a common approach in topology optimization is to find an approximate solution by optimizing a related problem: We would like to minimize the strain energy. This is a measure of the potential energy stored in an object due to its deformation, but also works as a measure of total deformation over the structure.

    -\[
+<picture><source srcset=\[
   \text{minimize  } \int_E \frac{1}{2}\boldsymbol{\sigma} : \boldsymbol{\varepsilon} dV
-\] +\]" src="form_7149.png"/>

    -\[
+<picture><source srcset=\[
   \text{subject to } \|E\| \leq V_{\max}
-\] +\]" src="form_7150.png"/>

    -\[
+<picture><source srcset=\[
   \text{and } \nabla \cdot \boldsymbol{\sigma} + \mathbf{F} = \mathbf{0}
-\] +\]" src="form_7151.png"/>

    The value of the objective function is calculated using a finite element method, where the solution is the displacements. This is placed inside of a nonlinear solver loop that solves for a vector denoting placement of material.

    Solid Isotropic Material with Penalization

    -

    In actual practice, we can only build objects in which the material is either present, or not present, at any given point – i.e., we would have an indicator function $\rho_E(\mathbf{x})\in \{0,1\}$ that describes the material-filled region and that we want to find through the optimization problem. In this case, the optimization problem becomes combinatorial, and very expensive to solve. Instead, we use an approach called Solid Isotropic Material with Penalization, or SIMP. [Bendse2004]

    -

    The SIMP method is based on an idea of allowing the material to exist in a location with a density $\rho$ between 0 and 1. A density of 0 suggests the material is not there, and it is not a part of the structure, while a density of 1 suggests the material is present. Values between 0 and 1 do not reflect a design we can create in the real-world, but allow us to turn the combinatorial problem into a continuous one. One then looks at density values $\rho$, with the constraint that $0 < \rho_{\min} \leq \rho \leq 1$. The minimum value $\rho_{\min}$, typically chosen to be around $10^{-3}$, avoids the possibility of having an infinite strain energy, but is small enough to provide accurate results.

    -

    The straightforward application of the effect of this "density" on the elasticity of the media would be to simply multiply the stiffness tensor $\mathbf{C}_0$ of the medium by the given density, that is, $\mathbf{C} = \rho \mathbf{C}_0$. However, this approach often gives optimal solutions where density values are far from both 0 and 1. As one wants to find a real-world solution, meaning the material either is present or it is not, a penalty is applied to these in-between values. A simple and effective way to do this is to multiply the stiffness tensor by the density raised to some integer power penalty parameter $p$, so that $\mathbf{C} = \rho^p \mathbf{C}_0$. This makes density values farther away from 0 or 1 less effective. It has been shown that using $p=3$ is sufficiently high to create 'black-and-white' solutions: that is, one gets optimal solutions in which material is either present or not present at all points.

    +

    In actual practice, we can only build objects in which the material is either present, or not present, at any given point – i.e., we would have an indicator function $\rho_E(\mathbf{x})\in \{0,1\}$ that describes the material-filled region and that we want to find through the optimization problem. In this case, the optimization problem becomes combinatorial, and very expensive to solve. Instead, we use an approach called Solid Isotropic Material with Penalization, or SIMP. [Bendse2004]

    +

    The SIMP method is based on an idea of allowing the material to exist in a location with a density $\rho$ between 0 and 1. A density of 0 suggests the material is not there, and it is not a part of the structure, while a density of 1 suggests the material is present. Values between 0 and 1 do not reflect a design we can create in the real-world, but allow us to turn the combinatorial problem into a continuous one. One then looks at density values $\rho$, with the constraint that $0 < \rho_{\min} \leq \rho \leq 1$. The minimum value $\rho_{\min}$, typically chosen to be around $10^{-3}$, avoids the possibility of having an infinite strain energy, but is small enough to provide accurate results.

    +

    The straightforward application of the effect of this "density" on the elasticity of the media would be to simply multiply the stiffness tensor $\mathbf{C}_0$ of the medium by the given density, that is, $\mathbf{C} = \rho \mathbf{C}_0$. However, this approach often gives optimal solutions where density values are far from both 0 and 1. As one wants to find a real-world solution, meaning the material either is present or it is not, a penalty is applied to these in-between values. A simple and effective way to do this is to multiply the stiffness tensor by the density raised to some integer power penalty parameter $p$, so that $\mathbf{C} = \rho^p \mathbf{C}_0$. This makes density values farther away from 0 or 1 less effective. It has been shown that using $p=3$ is sufficiently high to create 'black-and-white' solutions: that is, one gets optimal solutions in which material is either present or not present at all points.

    More material should always provide a structure with a lower strain energy, and so the inequality constraint can be viewed as an equality where the total volume used is the maximum volume.

    Using this density idea also allows us to reframe the volume constraint on the optimization problem. Use of SIMP then turns the optimization problem into the following:

    -\[
+<picture><source srcset=\[
   \text{minimize  } \int_\Omega \frac{1}{2}\boldsymbol{\sigma}(\rho) : \boldsymbol{\varepsilon}(\rho) d\Omega
-\] +\]" src="form_7158.png"/>

    -\[
+<picture><source srcset=\[
   \text{subject to } \int_\Omega \rho(x) d\Omega= V_{\max},
-\] +\]" src="form_7159.png"/>

    -\[
+<picture><source srcset=\[
   0<\rho_{\min}\leq \rho(x) \leq 1,
-\] +\]" src="form_7160.png"/>

    -\[
+<picture><source srcset=\[
 
   \nabla \cdot \boldsymbol{\sigma}(\rho) + \mathbf{F} = 0 \quad \text{on } \Omega
-\] +\]" src="form_7161.png"/>

    -

    The final constraint, the balance of linear momentum (which we will refer to as the elasticity equation), gives a method for finding $\boldsymbol{\sigma}$ and $\boldsymbol{\varepsilon}$ given the density $\rho$.

    +

    The final constraint, the balance of linear momentum (which we will refer to as the elasticity equation), gives a method for finding $\boldsymbol{\sigma}$ and $\boldsymbol{\varepsilon}$ given the density $\rho$.

    Elasticity Equation

    The elasticity equation in the time independent limit reads

    -\[
+<picture><source srcset=\[
   \nabla \cdot \boldsymbol{\sigma} + \mathbf{F} = \mathbf{0} .
-\] +\]" src="form_7162.png"/>

    In the situations we will care about, we will assume that the medium has a linear material response and in that case, we have that

    -\[
+<picture><source srcset=\[
   \boldsymbol{\sigma} = \mathbf{C} : \boldsymbol{\varepsilon} = \rho^p \mathbf{C}_0 : \boldsymbol{\varepsilon}(\mathbf{u})
    = \rho^p \mathbf{C}_0 : \left[\frac{1}{2} (\nabla \mathbf{u} + (\nabla \mathbf{u})^T) \right] .
-\] +\]" src="form_7163.png"/>

    -

    In everything we will do below, we will always consider the displacement field $\mathbf{u}$ as the only solution variable, rather than considering $\mathbf{u}$ and $\boldsymbol{\sigma}$ as solution variables (as is done in mixed formulations).

    +

    In everything we will do below, we will always consider the displacement field $\mathbf{u}$ as the only solution variable, rather than considering $\mathbf{u}$ and $\boldsymbol{\sigma}$ as solution variables (as is done in mixed formulations).

    Furthermore, we will make the assumption that the material is linear isotropic, in which case the stress-strain tensor can be expressed in terms of the Lamé parameters $\lambda,\mu$ such that

    -\begin{align}
+<picture><source srcset=\begin{align}
   \boldsymbol{\sigma} &= \rho^p (\lambda \text{tr}(\boldsymbol{\varepsilon}) \mathbf{I} + 2 \mu \boldsymbol{\varepsilon}) , \\
   \sigma_{i,j} &= \rho^p (\lambda \varepsilon_{k,k} \delta_{i,j} + 2 \mu \varepsilon_{i,j}) .
-\end{align} +\end{align}" src="form_7164.png"/>

    See step-8 for how this transformation works.

    Integrating the objective function by parts gives

    -\[
+<picture><source srcset=\[
   \int_\Omega \boldsymbol{\sigma}(\rho) : (\nabla \mathbf{u} + (\nabla \mathbf{u}))^T  d\Omega+
   \int_\Omega (\nabla \cdot \boldsymbol{\sigma}(\rho)) \cdot \mathbf{u}  d\Omega=
   \int_{\partial \Omega} \mathbf{t} \cdot \mathbf{u} d\partial\Omega ,
-\] +\]" src="form_7165.png"/>

    into which the linear elasticity equation can then be substituted, giving

    -\[
+<picture><source srcset=\[
   \int_\Omega \boldsymbol{\sigma}(\rho) : (\nabla \mathbf{u} + (\nabla \mathbf{u})^T) d\Omega =
   \int_\Omega \mathbf{F}\cdot \mathbf{u} d\Omega+
   \int_{\partial \Omega} \mathbf{t} \cdot \mathbf{u} d\partial\Omega .
-\] +\]" src="form_7166.png"/>

    Because we are assuming no body forces, this simplifies further to

    -\[
+<picture><source srcset=\[
   \int_\Omega \boldsymbol{\sigma}(\rho) : (\nabla \mathbf{u} + (\nabla \mathbf{u})^T) d\Omega
   = \int_{\partial \Omega} \mathbf{t} \cdot \mathbf{u} d\partial\Omega,
-\] +\]" src="form_7167.png"/>

    which is the final form of the governing equation that we'll be considering from this point forward.

    Making the solution mesh-independent

    Typically, the solutions to topology optimization problems are mesh-dependent, and as such the problem is ill-posed. This is because fractal structures are often formed as the mesh is refined further. As the mesh gains resolution, the optimal solution typically gains smaller and smaller structures. There are a few competing workarounds to this issue, but the most popular for first order optimization is the sensitivity filter, while second order optimization methods tend to prefer use of a density filter.

    -

    As the filters affect the gradient and Hessian of the strain energy (i.e., the objective function), the choice of filter has an effect on the solution of the problem. The density filter as part of a second order method works by introducing an unfiltered density, which we refer to as $\varrho$, and then requiring that the density be a convolution of the unfiltered density:

    -\[
+<p>As the filters affect the gradient and Hessian of the strain energy (i.e., the objective function), the choice of filter has an effect on the solution of the problem. The density filter as part of a second order method works by introducing an unfiltered density, which we refer to as <picture><source srcset=$\varrho$, and then requiring that the density be a convolution of the unfiltered density:

    +\[
   \rho = H(\varrho).
-\] +\]" src="form_7169.png"/>

    -

    Here, $H$ is an operator so that $\rho(\mathbf{x})$ is some kind of average of the values of $\varrho$ in the area around $\mathbf{x}$ – i.e., it is a smoothed version of $\varrho$.

    +

    Here, $H$ is an operator so that $\rho(\mathbf{x})$ is some kind of average of the values of $\varrho$ in the area around $\mathbf{x}$ – i.e., it is a smoothed version of $\varrho$.

    This prevents checkerboarding; the radius of the filter allows the user to define an effective minimal beam width for the optimal structures we seek to find.

    Checkerboarding occurring in an MBB Beam

    Complete Problem Formulation

    The minimization problem is now

    -\[
+<picture><source srcset=\[
   \min_{\rho,\varrho,\mathbf{u}} \int_{\partial\Omega} \mathbf{u} \cdot \mathbf{t} d\partial\Omega
-\] +\]" src="form_7171.png"/>

    -\[
+<picture><source srcset=\[
   \text{subject to   } \rho = H(\varrho)
-\] +\]" src="form_7172.png"/>

    -\[
+<picture><source srcset=\[
   \int_\Omega \rho^p \left(\frac{\mu}{2}\left(\boldsymbol{\varepsilon}(\mathbf{v}):
   \boldsymbol{\varepsilon}(\mathbf{u})) \right) + \lambda \left( \nabla \cdot \mathbf{u} \nabla
   \cdot \mathbf{v} \right)  \right) d\Omega = \int_{\partial \Omega} \mathbf{v} \cdot
   \mathbf{t} d\partial\Omega
-\] +\]" src="form_7173.png"/>

    -\[
+<picture><source srcset=\[
   \int_\Omega \rho d\Omega= V
-\] +\]" src="form_7174.png"/>

    -\[
+<picture><source srcset=\[
   0\leq \varrho \leq 1
-\] +\]" src="form_7175.png"/>

    -

    The inequality constraints are dealt with by first introducing slack variables, and second using log barriers to ensure that we obtain an interior-point method. The penalty parameter is going to be $\alpha$, and the following slack variables are

      /usr/share/doc/packages/dealii/doxygen/deal.II/step_8.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_8.html 2024-01-30 03:04:56.060911475 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_8.html 2024-01-30 03:04:56.060911475 +0000 @@ -127,51 +127,51 @@
      Note
      The material presented here is also discussed in video lecture 19. (All video lectures are also available here.)

      In this tutorial program we will want to solve the elastic equations. They are an extension to Laplace's equation with a vector-valued solution that describes the displacement in each space direction of a rigid body which is subject to a force. Of course, the force is also vector-valued, meaning that in each point it has a direction and an absolute value.

      One can write the elasticity equations in a number of ways. The one that shows the symmetry with the Laplace equation in the most obvious way is to write it as

      -\[
+<picture><source srcset=\[
   -
   \text{div}\,
   ({\mathbf C} \nabla \mathbf{u})
   =
   \mathbf f,
-\] +\]" src="form_7239.png"/>

      -

      where $\mathbf u$ is the vector-valued displacement at each point, $\mathbf f$ the force, and ${\mathbf C}$ is a rank-4 tensor (i.e., it has four indices) that encodes the stress-strain relationship – in essence, it represents the "spring constant" in Hookes law that relates the displacement to the forces. ${\mathbf C}$ will, in many cases, depend on $\mathbf x$ if the body whose deformation we want to simulate is composed of different materials.

      +

      where $\mathbf u$ is the vector-valued displacement at each point, $\mathbf f$ the force, and ${\mathbf C}$ is a rank-4 tensor (i.e., it has four indices) that encodes the stress-strain relationship – in essence, it represents the "spring constant" in Hookes law that relates the displacement to the forces. ${\mathbf C}$ will, in many cases, depend on $\mathbf x$ if the body whose deformation we want to simulate is composed of different materials.

      While the form of the equations above is correct, it is not the way they are usually derived. In truth, the gradient of the displacement $\nabla\mathbf u$ (a matrix) has no physical meaning whereas its symmetrized version,

      -\[
+<picture><source srcset=\[
 \varepsilon(\mathbf u)_{kl} =\frac{1}{2}(\partial_k u_l + \partial_l u_k),
-\] +\]" src="form_7241.png"/>

      -

      does and is typically called the "strain". (Here and in the following, $\partial_k=\frac{\partial}{\partial x_k}$. We will also use the Einstein summation convention that whenever the same index appears twice in an equation, summation over this index is implied; we will, however, not distinguish between upper and lower indices.) With this definition of the strain, the elasticity equations then read as

      -\[
+<p> does and is typically called the $\partial_k=\frac{\partial}{\partial x_k}$. We will also use the Einstein summation convention that whenever the same index appears twice in an equation, summation over this index is implied; we will, however, not distinguish between upper and lower indices.) With this definition of the strain, the elasticity equations then read as

      +\[
   -
   \text{div}\,
   ({\mathbf C} \varepsilon(\mathbf u))
   =
   \mathbf f,
-\] +\]" src="form_7243.png"/>

      -

      which you can think of as the more natural generalization of the Laplace equation to vector-valued problems. (The form shown first is equivalent to this form because the tensor ${\mathbf C}$ has certain symmetries, namely that $C_{ijkl}=C_{ijlk}$, and consequently ${\mathbf C} \varepsilon(\mathbf u)_{kl}
-= {\mathbf C} \nabla\mathbf u$.)

      +

      which you can think of as the more natural generalization of the Laplace equation to vector-valued problems. (The form shown first is equivalent to this form because the tensor ${\mathbf C}$ has certain symmetries, namely that $C_{ijkl}=C_{ijlk}$, and consequently ${\mathbf C} \varepsilon(\mathbf u)_{kl}
+= {\mathbf C} \nabla\mathbf u$.)

      One can of course alternatively write these equations in component form:

      -\[
+<picture><source srcset=\[
   -
   \partial_j (c_{ijkl} \varepsilon_{kl})
   =
   f_i,
   \qquad
   i=1\ldots d.
-\] +\]" src="form_7246.png"/>

      -

      In many cases, one knows that the material under consideration is isotropic, in which case by introduction of the two coefficients $\lambda$ and $\mu$ the coefficient tensor reduces to

      -\[
+<p>In many cases, one knows that the material under consideration is isotropic, in which case by introduction of the two coefficients <picture><source srcset=$\lambda$ and $\mu$ the coefficient tensor reduces to

      +\[
   c_{ijkl}
   =
   \lambda \delta_{ij} \delta_{kl} +
   \mu (\delta_{ik} \delta_{jl} + \delta_{il} \delta_{jk}).
-\] +\]" src="form_7247.png"/>

      The elastic equations can then be rewritten in much simpler a form:

      -\[
+<picture><source srcset=\[
    -
    \nabla \lambda (\nabla\cdot {\mathbf u})
    -
@@ -180,10 +180,10 @@
    \nabla\cdot \mu (\nabla {\mathbf u})^T
    =
    {\mathbf f},
-\] +\]" src="form_7248.png"/>

      and the respective bilinear form is then

      -\[
+<picture><source srcset=\[
   a({\mathbf u}, {\mathbf v}) =
   \left(
     \lambda \nabla\cdot {\mathbf u}, \nabla\cdot {\mathbf v}
@@ -198,10 +198,10 @@
   \left(
     \mu \partial_k u_l, \partial_l v_k
   \right)_\Omega,
-\] +\]" src="form_7249.png"/>

      or also writing the first term a sum over components:

      -\[
+<picture><source srcset=\[
   a({\mathbf u}, {\mathbf v}) =
   \sum_{k,l}
   \left(
@@ -217,18 +217,18 @@
   \left(
     \mu \partial_k u_l, \partial_l v_k
   \right)_\Omega.
-\] +\]" src="form_7250.png"/>

      -
      Note
      As written, the equations above are generally considered to be the right description for the displacement of three-dimensional objects if the displacement is small and we can assume that Hooke's law is valid. In that case, the indices $i,j,k,l$ above all run over the set $\{1,2,3\}$ (or, in the C++ source, over $\{0,1,2\}$). However, as is, the program runs in 2d, and while the equations above also make mathematical sense in that case, they would only describe a truly two-dimensional solid. In particular, they are not the appropriate description of an $x-y$ cross-section of a body infinite in the $z$ direction; this is in contrast to many other two-dimensional equations that can be obtained by assuming that the body has infinite extent in $z$-direction and that the solution function does not depend on the $z$ coordinate. On the other hand, there are equations for two-dimensional models of elasticity; see for example the Wikipedia article on plane strain, antiplane shear and plan stress.
      +
      Note
      As written, the equations above are generally considered to be the right description for the displacement of three-dimensional objects if the displacement is small and we can assume that Hooke's law is valid. In that case, the indices $i,j,k,l$ above all run over the set $\{1,2,3\}$ (or, in the C++ source, over $\{0,1,2\}$). However, as is, the program runs in 2d, and while the equations above also make mathematical sense in that case, they would only describe a truly two-dimensional solid. In particular, they are not the appropriate description of an $x-y$ cross-section of a body infinite in the $z$ direction; this is in contrast to many other two-dimensional equations that can be obtained by assuming that the body has infinite extent in $z$-direction and that the solution function does not depend on the $z$ coordinate. On the other hand, there are equations for two-dimensional models of elasticity; see for example the Wikipedia article on plane strain, antiplane shear and plan stress.

      But let's get back to the original problem. How do we assemble the matrix for such an equation? A very long answer with a number of different alternatives is given in the documentation of the Handling vector valued problems module. Historically, the solution shown below was the only one available in the early years of the library. It turns out to also be the fastest. On the other hand, if a few per cent of compute time do not matter, there are simpler and probably more intuitive ways to assemble the linear system than the one discussed below but that weren't available until several years after this tutorial program was first written; if you are interested in them, take a look at the Handling vector valued problems module.

      -

      Let us go back to the question of how to assemble the linear system. The first thing we need is some knowledge about how the shape functions work in the case of vector-valued finite elements. Basically, this comes down to the following: let $n$ be the number of shape functions for the scalar finite element of which we build the vector element (for example, we will use bilinear functions for each component of the vector-valued finite element, so the scalar finite element is the FE_Q(1) element which we have used in previous examples already, and $n=4$ in two space dimensions). Further, let $N$ be the number of shape functions for the vector element; in two space dimensions, we need $n$ shape functions for each component of the vector, so $N=2n$. Then, the $i$th shape function of the vector element has the form

      -\[
+<p>Let us go back to the question of how to assemble the linear system. The first thing we need is some knowledge about how the shape functions work in the case of vector-valued finite elements. Basically, this comes down to the following: let <picture><source srcset=$n$ be the number of shape functions for the scalar finite element of which we build the vector element (for example, we will use bilinear functions for each component of the vector-valued finite element, so the scalar finite element is the FE_Q(1) element which we have used in previous examples already, and $n=4$ in two space dimensions). Further, let $N$ be the number of shape functions for the vector element; in two space dimensions, we need $n$ shape functions for each component of the vector, so $N=2n$. Then, the $i$th shape function of the vector element has the form

      +\[
   \Phi_i({\mathbf x}) = \varphi_{\text{base}(i)}({\mathbf x})\ {\mathbf e}_{\text{comp}(i)},
-\] +\]" src="form_7255.png"/>

      -

      where $e_l$ is the $l$th unit vector, $\text{comp}(i)$ is the function that tells us which component of $\Phi_i$ is the one that is nonzero (for each vector shape function, only one component is nonzero, and all others are zero). $\varphi_{\text{base}(i)}(x)$ describes the space dependence of the shape function, which is taken to be the $\text{base}(i)$-th shape function of the scalar element. Of course, while $i$ is in the range $0,\ldots,N-1$, the functions $\text{comp}(i)$ and $\text{base}(i)$ have the ranges $0,1$ (in 2D) and $0,\ldots,n-1$, respectively.

      +

      where $e_l$ is the $l$th unit vector, $\text{comp}(i)$ is the function that tells us which component of $\Phi_i$ is the one that is nonzero (for each vector shape function, only one component is nonzero, and all others are zero). $\varphi_{\text{base}(i)}(x)$ describes the space dependence of the shape function, which is taken to be the $\text{base}(i)$-th shape function of the scalar element. Of course, while $i$ is in the range $0,\ldots,N-1$, the functions $\text{comp}(i)$ and $\text{base}(i)$ have the ranges $0,1$ (in 2D) and $0,\ldots,n-1$, respectively.

      For example (though this sequence of shape functions is not guaranteed, and you should not rely on it), the following layout could be used by the library:

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   \Phi_0({\mathbf x}) &=&
   \left(\begin{array}{c}
     \varphi_0({\mathbf x}) \\ 0
@@ -249,44 +249,44 @@
     0 \\ \varphi_1({\mathbf x})
   \end{array}\right),
   \ldots
-\end{eqnarray*} +\end{eqnarray*}" src="form_7263.png"/>

      where here

      -\[
+<picture><source srcset=\[
   \text{comp}(0)=0, \quad  \text{comp}(1)=1, \quad  \text{comp}(2)=0, \quad  \text{comp}(3)=1, \quad  \ldots
-\] +\]" src="form_7264.png"/>

      -\[
+<picture><source srcset=\[
   \text{base}(0)=0, \quad  \text{base}(1)=0, \quad  \text{base}(2)=1, \quad  \text{base}(3)=1, \quad  \ldots
-\] +\]" src="form_7265.png"/>

      -

      In all but very rare cases, you will not need to know which shape function $\varphi_{\text{base}(i)}$ of the scalar element belongs to a shape function $\Phi_i$ of the vector element. Let us therefore define

      -\[
+<p>In all but very rare cases, you will not need to know which shape function <picture><source srcset=$\varphi_{\text{base}(i)}$ of the scalar element belongs to a shape function $\Phi_i$ of the vector element. Let us therefore define

      +\[
   \phi_i = \varphi_{\text{base}(i)}
-\] +\]" src="form_7267.png"/>

      by which we can write the vector shape function as

      -\[
+<picture><source srcset=\[
   \Phi_i({\mathbf x}) = \phi_{i}({\mathbf x})\ {\mathbf e}_{\text{comp}(i)}.
-\] +\]" src="form_7268.png"/>

      -

      You can now safely forget about the function $\text{base}(i)$, at least for the rest of this example program.

      +

      You can now safely forget about the function $\text{base}(i)$, at least for the rest of this example program.

      Now using this vector shape functions, we can write the discrete finite element solution as

      -\[
+<picture><source srcset=\[
   {\mathbf u}_h({\mathbf x}) =
   \sum_i \Phi_i({\mathbf x})\ U_i
-\] +\]" src="form_7269.png"/>

      -

      with scalar coefficients $U_i$. If we define an analog function ${\mathbf v}_h$ as test function, we can write the discrete problem as follows: Find coefficients $U_i$ such that

      -\[
+<p> with scalar coefficients <picture><source srcset=$U_i$. If we define an analog function ${\mathbf v}_h$ as test function, we can write the discrete problem as follows: Find coefficients $U_i$ such that

      +\[
   a({\mathbf u}_h, {\mathbf v}_h) = ({\mathbf f}, {\mathbf v}_h)
   \qquad
   \forall {\mathbf v}_h.
-\] +\]" src="form_7271.png"/>

      -

      If we insert the definition of the bilinear form and the representation of ${\mathbf u}_h$ and ${\mathbf v}_h$ into this formula:

      -\begin{eqnarray*}
+<p>If we insert the definition of the bilinear form and the representation of <picture><source srcset=${\mathbf u}_h$ and ${\mathbf v}_h$ into this formula:

      +\begin{eqnarray*}
   \sum_{i,j}
     U_i V_j
   \sum_{k,l}
@@ -311,11 +311,11 @@
     f_l,
     (\Phi_j)_l
   \right)_\Omega.
-\end{eqnarray*} +\end{eqnarray*}" src="form_7273.png"/>

      -

      We note that here and in the following, the indices $k,l$ run over spatial directions, i.e. $0\le k,l < d$, and that indices $i,j$ run over degrees of freedom.

      +

      We note that here and in the following, the indices $k,l$ run over spatial directions, i.e. $0\le k,l < d$, and that indices $i,j$ run over degrees of freedom.

      The local stiffness matrix on cell $K$ therefore has the following entries:

      -\[
+<picture><source srcset=\[
   A^K_{ij}
   =
   \sum_{k,l}
/usr/share/doc/packages/dealii/doxygen/deal.II/step_81.html differs (HTML document, UTF-8 Unicode text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_81.html	2024-01-30 03:04:56.128912042 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_81.html	2024-01-30 03:04:56.128912042 +0000
@@ -147,8 +147,8 @@
 <p><a class=

      Time-Harmonic Maxwell's Equations with interface conditions

      We start the discussion with a short derivation of the governing equations and some literature references.

      Derivation of time-harmonic Maxwell's equations

      -

      In two ( $d=2$) or three ( $d=3$) spatial dimensions, the time evolution of an electromagnetic wave $(\mathbf{E},\mathbf{H})$ that consists of an electric field component $\mathbf{E}(t,\mathbf{x})\;:\;\mathbb{R}\times\mathbb{R}^d\to\mathbb{R}^d$ and a magnetic field component $\mathbf{H}(t,\mathbf{x})\;:\;\mathbb{R}\times\mathbb{R}^d\to\mathbb{R}^d$ is described by Maxwell's equations [Schwartz1972], [Monk2003] :

      -\begin{align*}
+<p>In two ( <picture><source srcset=$d=2$) or three ( $d=3$) spatial dimensions, the time evolution of an electromagnetic wave $(\mathbf{E},\mathbf{H})$ that consists of an electric field component $\mathbf{E}(t,\mathbf{x})\;:\;\mathbb{R}\times\mathbb{R}^d\to\mathbb{R}^d$ and a magnetic field component $\mathbf{H}(t,\mathbf{x})\;:\;\mathbb{R}\times\mathbb{R}^d\to\mathbb{R}^d$ is described by Maxwell's equations [Schwartz1972], [Monk2003] :

      +\begin{align*}
   \frac{\partial}{\partial t} \mathbf{H} + \nabla \times \mathbf{E} &= -\mathbf{M}_a,
   \\
   \nabla \cdot \mathbf{H} &= \rho_m,
@@ -156,23 +156,23 @@
   \frac{\partial}{\partial t} (\varepsilon\mathbf{E}) - \nabla\times(\mu^{-1}\mathbf{H}) &= - \mathbf{J}_a,
   \\
   \nabla\cdot(\varepsilon\mathbf{E}) &= \rho.
-\end{align*} +\end{align*}" src="form_7288.png"/>

      -

      Here, $\nabla\times$ is the curl operator, $\nabla\cdot$ is the divergence operator, $\varepsilon$ is the electric permittivity, $\mu$ is the magnetic permeability, $\rho$ is the electric charge density, and $\rho_m$ is a corresponding (hypothetical) magnetic monopole density. $\mathbf{J}_a$ and $\mathbf{M}_a$ are the electric and magnetic flux densities which are related to their respective charge densities by the conservation equations [Schwartz1972]

      -\[
+<p> Here, <picture><source srcset=$\nabla\times$ is the curl operator, $\nabla\cdot$ is the divergence operator, $\varepsilon$ is the electric permittivity, $\mu$ is the magnetic permeability, $\rho$ is the electric charge density, and $\rho_m$ is a corresponding (hypothetical) magnetic monopole density. $\mathbf{J}_a$ and $\mathbf{M}_a$ are the electric and magnetic flux densities which are related to their respective charge densities by the conservation equations [Schwartz1972]

      +\[
 \frac{\partial}{\partial t} \rho + \nabla\cdot\mathbf{J}_a = 0
 \text{ and }
 \frac{\partial}{\partial t} \rho_m + \nabla\cdot\mathbf{M}_a = 0.
-\] +\]" src="form_7294.png"/>

      -

      We now make the important assumption that the material parameters $\varepsilon$ and $\mu$ are time-independent and that the fields $\mathbf{E}$ and $\mathbf{H}$, the fluxes $\mathbf{M}_a$ and $\mathbf{J}_a$, as well as the densities $\rho$ and $\rho_m$ are all time-harmonic, i.e., their time evolution is completely described by

      -\[
+<p>We now make the important assumption that the material parameters <picture><source srcset=$\varepsilon$ and $\mu$ are time-independent and that the fields $\mathbf{E}$ and $\mathbf{H}$, the fluxes $\mathbf{M}_a$ and $\mathbf{J}_a$, as well as the densities $\rho$ and $\rho_m$ are all time-harmonic, i.e., their time evolution is completely described by

      +\[
   \mathbf{F}(\mathbf{x},t) = \text{Re}\{e^{-i\omega
   t}\tilde{\mathbf{F}}(\mathbf{x})\},
-\] +\]" src="form_7295.png"/>

      -

      in which $\omega$ is the temporal angular frequency and $\tilde{\mathbf{F}}(\mathbf{x})$ is a corresponding complex-valued vector field (or density). Inserting this ansatz into Maxwell's equations, substituting the charge conservation equations and some minor algebra then yields the so-called time-harmonic Maxwell's equations:

      -\begin{align*}
+<p> in which <picture><source srcset=$\omega$ is the temporal angular frequency and $\tilde{\mathbf{F}}(\mathbf{x})$ is a corresponding complex-valued vector field (or density). Inserting this ansatz into Maxwell's equations, substituting the charge conservation equations and some minor algebra then yields the so-called time-harmonic Maxwell's equations:

      +\begin{align*}
   -i\omega \tilde{\mathbf{H}} + \nabla \times \tilde{\mathbf{E}} &=
   -\tilde{\mathbf{M}}_a,
   \\
@@ -184,52 +184,52 @@
   \\
   \nabla\cdot(\varepsilon\tilde{\mathbf{E}}) &=
   \frac{1}{i\omega}\nabla\cdot\tilde{\mathbf{J}}_a.
-\end{align*} +\end{align*}" src="form_7297.png"/>

      -

      For the sake of better readability we will now drop the tilde and simply write $\mathbf{E}(\mathbf{x})$, $\mathbf{H}(\mathbf{x})$, etc., when referring to the time-harmonic fields.

      +

      For the sake of better readability we will now drop the tilde and simply write $\mathbf{E}(\mathbf{x})$, $\mathbf{H}(\mathbf{x})$, etc., when referring to the time-harmonic fields.

      Jump conditions on lower dimensional interfaces

      -

      Graphene is a two-dimensional carbon allotrope with a single atom layer that is arranged in a honeycomb lattice [Geim2004]. Due to its atomic thickness it is an example of a so-called 2D material: Compared to the other spatial dimensions (where graphene samples can reach up to several centimeters) the atomistic thickness of graphene typically ranges around 2.5 ångstrom ( $2.5\times10^{-10}\text{m}$). We will thus model graphene as a lower-dimensional interface $\Sigma$ embedded into the computational domain $\Omega\subset\mathbb{R}^d$. More precisely, $\Sigma$ is a two-dimensional sheet in three spatial dimensions, or a one-dimensional line in two spatial dimensions. The special electronic structure of graphene gives rise to a current density on the lower-dimensional interface that is modeled with an effective surface conductivity $\sigma^\Sigma$ obeying Ohm's Law:

      -\[
+<p>Graphene is a two-dimensional carbon allotrope with a <em>single</em> atom layer that is arranged in a honeycomb lattice <b>[Geim2004]</b>. Due to its atomic thickness it is an example of a so-called 2D material: Compared to the other spatial dimensions (where graphene samples can reach up to several centimeters) the atomistic thickness of graphene typically ranges around 2.5 ångstrom ( <picture><source srcset=$2.5\times10^{-10}\text{m}$). We will thus model graphene as a lower-dimensional interface $\Sigma$ embedded into the computational domain $\Omega\subset\mathbb{R}^d$. More precisely, $\Sigma$ is a two-dimensional sheet in three spatial dimensions, or a one-dimensional line in two spatial dimensions. The special electronic structure of graphene gives rise to a current density on the lower-dimensional interface that is modeled with an effective surface conductivity $\sigma^\Sigma$ obeying Ohm's Law:

      +\[
   \mathbf{J}^\Sigma=\sigma^\Sigma\,\mathbf{E}_T
-\] +\]" src="form_7304.png"/>

      -

      in which $\mathbf{J}^\Sigma$ is the surface current density, $\mathbf{E}_T$ denotes the tangential part of the electric field $\mathbf{E}$, and $\sigma^\Sigma$ is an appropriately chosen surface conductivity that will be discussed in more detail below. The surface current density gives rise to a jump condition on $\Sigma$ in the tangential component of the magnetic field. This is best seen by visualizing Ampère's law:

      +

      in which $\mathbf{J}^\Sigma$ is the surface current density, $\mathbf{E}_T$ denotes the tangential part of the electric field $\mathbf{E}$, and $\sigma^\Sigma$ is an appropriately chosen surface conductivity that will be discussed in more detail below. The surface current density gives rise to a jump condition on $\Sigma$ in the tangential component of the magnetic field. This is best seen by visualizing Ampère's law:

      Visualization of Ohm's law and Ampère's law leading to a jump condition over the interface

      -

      and then taking the limit of the upper and lower part of the line integral approaching the sheet. In contrast, the tangential part of the electric field is continuous. By fixing a unit normal $\mathbf{\nu}$ on the hypersurface $\Sigma$ both jump conditions are

      -\begin{align*}
+<p>and then taking the limit of the upper and lower part of the line integral approaching the sheet. In contrast, the tangential part of the electric field is continuous. By fixing a unit normal <picture><source srcset=$\mathbf{\nu}$ on the hypersurface $\Sigma$ both jump conditions are

      +\begin{align*}
 \mathbf{\nu} \times \left[(\mu^{-1}\mathbf{H})^+ - (\mu^{-1}\mathbf{H})^-\right]|_{\Sigma}
 &= \sigma^{\Sigma}\left[(\mathbf{\nu}\times \mathbf{E}\times \mathbf{\nu})\right]|_{\Sigma},
 \\
 \mathbf{\nu} \times \left[\mathbf{E}^+ - \mathbf{E}^-\right]|_{\Sigma} &= 0.
-\end{align*} +\end{align*}" src="form_7308.png"/>

      -

      The notation $\mathbf{F}^\pm$ indicates the limit values of the field when approaching the interface from above or below the interface: $\mathbf{F}^\pm(\mathbf{x})=\lim_{\delta\to0,\delta>0}\mathbf{F}(\mathbf{x}\pm\delta\mathbf{\nu})$.

      +

      The notation $\mathbf{F}^\pm$ indicates the limit values of the field when approaching the interface from above or below the interface: $\mathbf{F}^\pm(\mathbf{x})=\lim_{\delta\to0,\delta>0}\mathbf{F}(\mathbf{x}\pm\delta\mathbf{\nu})$.

      Rescaling

      We will be using a rescaled version of the Maxwell's equations described above. The rescaling has the following key differences:

      1. -Every length is rescaled by the free-space wavelength $2\pi k^{-1}
-\dealcoloneq 2\pi(\omega\sqrt{\varepsilon_0\mu_0})^{-1}$, in which $\varepsilon_0$ and $\mu_0$ denote the vacuum dielectric permittivity and magnetic permeability, respectively.
      2. +Every length is rescaled by the free-space wavelength $2\pi k^{-1}
+\dealcoloneq 2\pi(\omega\sqrt{\varepsilon_0\mu_0})^{-1}$, in which $\varepsilon_0$ and $\mu_0$ denote the vacuum dielectric permittivity and magnetic permeability, respectively.
      3. -$\mathbf{E}$, $\mathbf{H}$, $\mathbf{J}_a$, $\mathbf{M}_a$ are all rescaled by typical electric current strength $J_0$, i.e., the strength of the prescribed dipole source at location $a$ in the $e_i$ direction in Cartesian coordinates (here, $\delta$ is the Dirac delta operator).

        -\[
+<picture><source srcset=$\mathbf{E}$, $\mathbf{H}$, $\mathbf{J}_a$, $\mathbf{M}_a$ are all rescaled by typical electric current strength $J_0$, i.e., the strength of the prescribed dipole source at location $a$ in the $e_i$ direction in Cartesian coordinates (here, $\delta$ is the Dirac delta operator).

        +\[
 \mathbf{J}_a = J_0 \mathbf{e}_i\delta(x-a)
-\] +\]" src="form_7315.png"/>

      -

      Accordingly, our electric permittivity and magnetic permeability are rescaled by $\varepsilon_0$ and $\mu_0$ as

      -\[
+<p>Accordingly, our electric permittivity and magnetic permeability are rescaled by <picture><source srcset=$\varepsilon_0$ and $\mu_0$ as

      +\[
 \mu_r = \frac{1}{\mu_0}\mu
 \text{ and }
 \varepsilon_r = \frac{1}{\varepsilon_0}\varepsilon.
-\] +\]" src="form_7316.png"/>

      -

      We use the free-space wave number $k_0 = \omega\sqrt{\varepsilon_0\mu_0}$ and the dipole strength, $J_0$ to arrive at the following rescaling of the vector fields and coordinates:

      -\[
+<p>We use the free-space wave number <picture><source srcset=$k_0 = \omega\sqrt{\varepsilon_0\mu_0}$ and the dipole strength, $J_0$ to arrive at the following rescaling of the vector fields and coordinates:

      +\[
 \begin{align*}
 \hat{x} = k_0x, &\qquad
 \hat{\nabla} = \frac{1}{k_0}\nabla,\\
@@ -238,15 +238,15 @@
 \hat{\mathbf{J}}_a = \frac{1}{J_0}\mathbf{J}_a,&\qquad
 \hat{\mathbf{M}}_a = \frac{k_0}{\omega\mu_0 J_0}\mathbf{M}_a.
 \end{align*}
-\] +\]" src="form_7318.png"/>

      Finally, the interface conductivity is rescaled as

      -\[
+<picture><source srcset=\[
 \sigma^{\Sigma}_r = \sqrt{\frac{\mu_0}{\varepsilon_0}}\sigma^{\Sigma}.
-\] +\]" src="form_7319.png"/>

      Accordingly, our rescaled equations are

      -\begin{align*}
+<picture><source srcset=\begin{align*}
   -i\mu_r \hat{\mathbf{H}} + \hat{\nabla} \times \hat{\mathbf{E}}
   &= -\hat{\mathbf{M}}_a,
   \\
@@ -258,19 +258,19 @@
   \\
   \nabla\cdot(\varepsilon\mathbf{E}) &= \frac{1}{i\omega}\hat{\nabla}
   \cdot\hat{\mathbf{J}}_a.
-\end{align*} +\end{align*}" src="form_7320.png"/>

      We will omit the hat in further discussion for ease of notation.

      Variational Statement

      -

      Let $\Omega \subset \mathbb{R}^n$, $(n = 2,3)$ be a simply connected and bounded domain with Lipschitz-continuous and piecewise smooth boundary, $\partial\Omega$. Let $\Sigma$ be an oriented, Lipschitz-continuous, piecewise smooth hypersurface. Fix a normal field $\nu$ on $\Sigma$ and let $n$ denote the outer normal vector on $\partial\Omega$.

      -

      In order to arrive at the variational form, we will substitute for $\mathbf{H}$ in the first equation and obtain

      -\[
+<p>Let <picture><source srcset=$\Omega \subset \mathbb{R}^n$, $(n = 2,3)$ be a simply connected and bounded domain with Lipschitz-continuous and piecewise smooth boundary, $\partial\Omega$. Let $\Sigma$ be an oriented, Lipschitz-continuous, piecewise smooth hypersurface. Fix a normal field $\nu$ on $\Sigma$ and let $n$ denote the outer normal vector on $\partial\Omega$.

      +

      In order to arrive at the variational form, we will substitute for $\mathbf{H}$ in the first equation and obtain

      +\[
 \nabla \times (\mu_r^{-1}\nabla\times\mathbf{E}) - \varepsilon_r \mathbf{E}
 = i\mathbf{J}_a - \nabla\times (\mu_r^{-1}\mathbf{M}_a).
-\] +\]" src="form_7323.png"/>

      -

      Now, consider a smooth test function $\varphi$ with complex conjugate $\bar{\varphi}$. Multiply both sides of the above equation by $\bar{\varphi}$ and integrate by parts in $\Omega\backslash\Sigma$.

      -\[
+<p>Now, consider a smooth test function <picture><source srcset=$\varphi$ with complex conjugate $\bar{\varphi}$. Multiply both sides of the above equation by $\bar{\varphi}$ and integrate by parts in $\Omega\backslash\Sigma$.

      +\[
 \int_\Omega (\mu_r^{-1}\nabla\times\mathbf{E})\cdot (\nabla\times\bar{\varphi})\;\text{d}x
 - \int_\Omega \varepsilon_r\mathbf{E} \cdot \bar{\varphi}\;\text{d}x
 - \int_\Sigma [\nu \times (\mu_r^{-1}\nabla\times\mathbf{E} +
@@ -279,32 +279,32 @@
 \mu^{-1}\mathbf{M}_a)) \cdot \bar{\varphi}_T\;\text{d}o_x =
 i\int_\Omega \mathbf{J}_a \cdot \bar{\varphi}\;\text{d}x
 - \int_\Omega \mu_r^{-1}\mathbf{M}_a \cdot (\nabla \times \bar{\varphi})\;\text{d}x.
-\] +\]" src="form_7326.png"/>

      -

      We use the subscript $T$ to denote the tangential part of the given vector and $[\cdot]_{\Sigma}$ to denote a jump over $\Sigma$, i.e.,

      -\[
+<p>We use the subscript <picture><source srcset=$T$ to denote the tangential part of the given vector and $[\cdot]_{\Sigma}$ to denote a jump over $\Sigma$, i.e.,

      +\[
   \mathbf{F}_T = (\mathbf{\nu}\times \mathbf{F})\times\mathbf{\nu}
   \text{ and }
   [\mathbf{F}]_{\Sigma}(\mathbf{x}) = \lim\limits_{s\searrow 0}(\mathbf{F}(\mathbf{x}+s\mathbf{\nu})-\mathbf{F}(\mathbf{x}-s\mathbf{\nu}))
-\] +\]" src="form_7328.png"/>

      -

      for $\mathbf{x}\in \Sigma$.

      -

      For the computational domain $\Omega$, we introduce the absorbing boundary condition at $\partial\Omega$, which is obtained by using a first-order approximation of the Silver-Müller radiation condition, truncated at $\partial\Omega$ [Monk2003].

      -\[
+<p> for <picture><source srcset=$\mathbf{x}\in \Sigma$.

      +

      For the computational domain $\Omega$, we introduce the absorbing boundary condition at $\partial\Omega$, which is obtained by using a first-order approximation of the Silver-Müller radiation condition, truncated at $\partial\Omega$ [Monk2003].

      +\[
 \nu\times\mathbf{H}+\sqrt{\mu_r^{-1}\varepsilon_r}\mathbf{E}=0\qquad x\in\partial\Omega
-\] +\]" src="form_7330.png"/>

      -

      We assume that $\mu_r^{-1}$ and $\varepsilon$ have well-defined square roots. In our numerical computation, we combine the above absorbing boundary condition with a PML.

      +

      We assume that $\mu_r^{-1}$ and $\varepsilon$ have well-defined square roots. In our numerical computation, we combine the above absorbing boundary condition with a PML.

      The jump condition can be expressed as a weak discontinuity as follows:

      -\[
+<picture><source srcset=\[
 [\nu \times (\mu_r^{-1}\nabla\times\mathbf{E} + \mu^{-1}\mathbf{M}_a)]_{\Sigma}
 = i\sigma_r^{\Sigma}\mathbf{E}_T,\qquad \text{on }\Sigma\\
/usr/share/doc/packages/dealii/doxygen/deal.II/step_82.html differs (HTML document, UTF-8 Unicode text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_82.html	2024-01-30 03:04:56.224912842 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_82.html	2024-01-30 03:04:56.224912842 +0000
@@ -150,31 +150,31 @@
 <p><a class=

      Introduction

      Problem Statement

      In this example, we consider the local discontinuous Galerkin (LDG) method for approximating the solution to the bi-Laplacian problem,

      -\begin{align*}
+<picture><source srcset=\begin{align*}
 \Delta^2 u & = f \quad \mbox{in } \Omega, \\
 \nabla u & = \mathbf{0} \quad \mbox{on } \partial\Omega, \\
 u & = 0 \quad \mbox{on } \partial\Omega,
-\end{align*} +\end{align*}" src="form_7381.png"/>

      -

      where $\Omega\subset\mathbb{R}^d$ $(d=2,3)$ is an open bounded Lipschitz domain and $f\in L^2(\Omega)$. This is the same problem we have already considered in step-47, but we will take here a different approach towards solving it: Rather than using continuous finite elements and the interior penalty method, we consider discontinuous finite elements and the local discontinuous Galerkin method defined using lifting operators.

      -

      The weak formulation of this problem reads as follows: find $u\in H_0^2(\Omega)$ such that

      -\[
+<p> where <picture><source srcset=$\Omega\subset\mathbb{R}^d$ $(d=2,3)$ is an open bounded Lipschitz domain and $f\in L^2(\Omega)$. This is the same problem we have already considered in step-47, but we will take here a different approach towards solving it: Rather than using continuous finite elements and the interior penalty method, we consider discontinuous finite elements and the local discontinuous Galerkin method defined using lifting operators.

      +

      The weak formulation of this problem reads as follows: find $u\in H_0^2(\Omega)$ such that

      +\[
 \int_{\Omega}D^2u:D^2v = \int_{\Omega}fv \qquad \forall \, v\in H_0^2(\Omega),
-\] +\]" src="form_7385.png"/>

      -

      where $D^2v$ denotes the Hessian of $v$ and $H_0^2(\Omega)\dealcoloneq\{v\in H^2(\Omega): \,\, v=0 \mbox{ and } \nabla v=\mathbf{0} \,\, \mbox{ on } \partial\Omega\}$. Using so-called lifting operators as well as the Nitsche approach to impose the homogeneous Dirichlet boundary conditions, the LDG approximation of this problem consists of replacing the Hessians by discrete Hessians (see below) and adding penalty terms involving properly scaled jump terms. In particular, the versatility of the method described below is of particular interest for nonlinear problems or problems with intricate weak formulations for which the design of discrete algorithms is challenging.

      +

      where $D^2v$ denotes the Hessian of $v$ and $H_0^2(\Omega)\dealcoloneq\{v\in H^2(\Omega): \,\, v=0 \mbox{ and } \nabla v=\mathbf{0} \,\, \mbox{ on } \partial\Omega\}$. Using so-called lifting operators as well as the Nitsche approach to impose the homogeneous Dirichlet boundary conditions, the LDG approximation of this problem consists of replacing the Hessians by discrete Hessians (see below) and adding penalty terms involving properly scaled jump terms. In particular, the versatility of the method described below is of particular interest for nonlinear problems or problems with intricate weak formulations for which the design of discrete algorithms is challenging.

      Discretization

      Finite element spaces

      -

      For $h>0$, let $\mathcal{T}_h$ be a partition of $\Omega$ into quadrilateral (hexahedral if $d=3$) elements $K$ of diameter $h_{K}\leq h$ and let $\mathcal{E}_h=\mathcal{E}_h^0\cup\mathcal{E}_h^b$ denote the set of (interior and boundary) faces. We restrict the discussion to conforming subdivisions to avoid technicalities already addressed in previous tutorials. The diameter of $e \in \mathcal{E}_h$ is denoted $h_e$. For any integer $k\ge 2$, we introduce the (discontinuous) finite element space

      -\[
+<p>For <picture><source srcset=$h>0$, let $\mathcal{T}_h$ be a partition of $\Omega$ into quadrilateral (hexahedral if $d=3$) elements $K$ of diameter $h_{K}\leq h$ and let $\mathcal{E}_h=\mathcal{E}_h^0\cup\mathcal{E}_h^b$ denote the set of (interior and boundary) faces. We restrict the discussion to conforming subdivisions to avoid technicalities already addressed in previous tutorials. The diameter of $e \in \mathcal{E}_h$ is denoted $h_e$. For any integer $k\ge 2$, we introduce the (discontinuous) finite element space

      +\[
 \mathbb{V}_h\dealcoloneq\left\{v_h\in L^2(\Omega): \,\, v_h|_K\circ F_{K}\in\mathbb{Q}_k \quad \forall \, K \in\mathcal{T}_h \right\},
-\] +\]" src="form_7393.png"/>

      -

      where $F_{K}$ is the map from the reference element $\hat{K}$ (unit square/cube) to the physical element $K$. For $v_h\in\mathbb{V}_h$, the piecewise differential operators are denoted with a subscript $h$, for instance $\nabla_h v_h|_K=\nabla(v_h|_K)$ and $D_h^2 v_h=\nabla_h\nabla_h v_h$. For $e\in\mathcal{E}_h$, we assign a normal $\mathbf{n}_e$. The choice of normal is irrelevant except that when $e$ is a boundary face, $\mathbf{n}_e$ is the normal pointing outward $\Omega$.

      +

      where $F_{K}$ is the map from the reference element $\hat{K}$ (unit square/cube) to the physical element $K$. For $v_h\in\mathbb{V}_h$, the piecewise differential operators are denoted with a subscript $h$, for instance $\nabla_h v_h|_K=\nabla(v_h|_K)$ and $D_h^2 v_h=\nabla_h\nabla_h v_h$. For $e\in\mathcal{E}_h$, we assign a normal $\mathbf{n}_e$. The choice of normal is irrelevant except that when $e$ is a boundary face, $\mathbf{n}_e$ is the normal pointing outward $\Omega$.

      Jumps, averages, and discrete reconstruction of differential operators

      The piecewise differential operators do not have enough information to be accurate approximations of their continuous counterparts. They are missing inter-element information.

      This leads to the introductions of jump and average operators:

      -\[
+<picture><source srcset=\[
 \jump{v_h}|_e \dealcoloneq
 \left\{\begin{array}{ll}
 v_h|_{K_1}-v_h|_{K_2} & e\in\mathcal{E}_h^0 \\
@@ -184,154 +184,154 @@
 \frac{1}{2}(v_h|_{K_1}+v_h|_{K_2}) & e\in\mathcal{E}_h^0 \\
 v_h|_{K_1} & e\in\mathcal{E}_h^b,
 \end{array}\right.
-\] +\]" src="form_7399.png"/>

      -

      respectively, where $K_1$ and $K_2$ are the two elements adjacent to $e$ so that $\mathbf{n}_e$ points from $K_1$ to $K_2$ (with obvious modification when $e$ is a boundary edge). These are the same operators that we have previously used not only in step-47, but also in other tutorials related to discontinuous Galerkin methods (e.g., step-12).

      -

      With these notations, we are now in position to define the discrete/reconstructed Hessian $H_h(v_h)\in\left[L^2(\Omega)\right]^{d\times d}$ of $v_h\in\mathbb{V}_h$; that is, something that will take the role of $D^2 v$ in the definition of the weak formulation above when moving from the continuous to the discrete formulation. We first consider two local lifting operators $r_e:[L^2(e)]^d\rightarrow[\mathbb{V}_h]^{d\times d}$ and $b_e:L^2(e)\rightarrow[\mathbb{V}_h]^{d\times d}$ defined for $e\in\mathcal{E}_h$ by, respectively,

      -\[
+<p> respectively, where <picture><source srcset=$K_1$ and $K_2$ are the two elements adjacent to $e$ so that $\mathbf{n}_e$ points from $K_1$ to $K_2$ (with obvious modification when $e$ is a boundary edge). These are the same operators that we have previously used not only in step-47, but also in other tutorials related to discontinuous Galerkin methods (e.g., step-12).

      +

      With these notations, we are now in position to define the discrete/reconstructed Hessian $H_h(v_h)\in\left[L^2(\Omega)\right]^{d\times d}$ of $v_h\in\mathbb{V}_h$; that is, something that will take the role of $D^2 v$ in the definition of the weak formulation above when moving from the continuous to the discrete formulation. We first consider two local lifting operators $r_e:[L^2(e)]^d\rightarrow[\mathbb{V}_h]^{d\times d}$ and $b_e:L^2(e)\rightarrow[\mathbb{V}_h]^{d\times d}$ defined for $e\in\mathcal{E}_h$ by, respectively,

      +\[
 r_e\left(\boldsymbol{\phi}\right) \in [\mathbb{V}_h]^{d\times d}: \,
 \int_{\Omega} \tau_h : r_e\left(\boldsymbol{\phi}\right) = \int_e\average{\tau_h}\mathbf{n}_e\cdot\boldsymbol{\phi} \qquad \forall \, \tau_h\in [\mathbb{V}_h]^{d\times d}
-\] +\]" src="form_7406.png"/>

      and

      -\[
+<picture><source srcset=\[
 b_e(\phi) \in [\mathbb{V}_h]^{d\times d}: \,
 \int_{\Omega} \tau_h : b_e(\phi) = \int_e\average{{\rm div}\, \tau_h}\cdot\mathbf{n}_e\phi \qquad \forall \, \tau_h\in [\mathbb{V}_h]^{d\times d}.
-\] +\]" src="form_7407.png"/>

      -

      We have ${\rm supp}\,(r_e\left(\boldsymbol{\phi}\right))={\rm supp}\,(b_e(\phi))=\omega_e$, where $\omega_e$ denotes the patch of (one or two) elements having $e$ as part of their boundaries.

      -

      The discrete Hessian operator $H_h:\mathbb{V}_h\rightarrow\left[L^2(\Omega)\right]^{2\times 2}$ is then given by

      -\[
+<p> We have <picture><source srcset=${\rm supp}\,(r_e\left(\boldsymbol{\phi}\right))={\rm supp}\,(b_e(\phi))=\omega_e$, where $\omega_e$ denotes the patch of (one or two) elements having $e$ as part of their boundaries.

      +

      The discrete Hessian operator $H_h:\mathbb{V}_h\rightarrow\left[L^2(\Omega)\right]^{2\times 2}$ is then given by

      +\[
 H_h(v_h) \dealcoloneq D_h^2 v_h -R_h(\jump{\nabla_h v_h})+B_h(\jump{v_h}) \dealcoloneq D_h^2 v_h - \sum_{e\in\mathcal{E}_h}r_e\left(\jump{\nabla_h v_h}\right)+\sum_{e\in\mathcal{E}_h}b_e\left(\jump{v_h}\right).
-\] +\]" src="form_7411.png"/>

      Note
      In general, the polynomial degree of the finite element space for the two lifting terms do not need to be the same as the one used for the approximate solution. A different polynomial degree for each lifting term can also be considered.

      Note that other differential operators (e.g., gradient or divergence) can be reconstructed in a similar fashion, see for instance [DiPietro2011].

      Motivation for the lifting operators

      -

      The discrete Hessian $H_h$ is designed such that it weakly converges to the continuous Hessian $D^2$, see the note in the next section for a precise statement. As already mentioned above, the broken Hessian is not a suitable candidate as it contains no information about inter-element jumps. We provide here an informal discussion motivating the definition of the two lifting operators and we refer to [Pryer2014] and [Bonito2021] for more details (although the definitions are slightly different unless the mesh is affine). The goal is then to construct a discrete operator $H_h$ such that for all $\tau\in [C_0^{\infty}(\Omega)]^{d\times d}$ we have

      -\[
+<p>The discrete Hessian <picture><source srcset=$H_h$ is designed such that it weakly converges to the continuous Hessian $D^2$, see the note in the next section for a precise statement. As already mentioned above, the broken Hessian is not a suitable candidate as it contains no information about inter-element jumps. We provide here an informal discussion motivating the definition of the two lifting operators and we refer to [Pryer2014] and [Bonito2021] for more details (although the definitions are slightly different unless the mesh is affine). The goal is then to construct a discrete operator $H_h$ such that for all $\tau\in [C_0^{\infty}(\Omega)]^{d\times d}$ we have

      +\[
 \int_{\Omega}H_h(v_h):\tau\longrightarrow \int_{\Omega}D^2v:\tau \qquad \mbox{as } \,\, h\rightarrow 0
-\] +\]" src="form_7415.png"/>

      -

      for any sequence $\{v_h\}_{h>0}$ in $\mathbb{V}_h$ such that $v_h\rightarrow v$ in $L^2(\Omega)$ as $h\rightarrow 0$ for some $v\in H^2(\Omega)$. Let $\tau\in [C_0^{\infty}(\Omega)]^{d\times d}$. Integrating by parts twice we get

      -\[
+<p> for any sequence <picture><source srcset=$\{v_h\}_{h>0}$ in $\mathbb{V}_h$ such that $v_h\rightarrow v$ in $L^2(\Omega)$ as $h\rightarrow 0$ for some $v\in H^2(\Omega)$. Let $\tau\in [C_0^{\infty}(\Omega)]^{d\times d}$. Integrating by parts twice we get

      +\[
 \int_{\Omega}D^2v:\tau = -\int_{\Omega}\nabla v\cdot \mbox{div}(\tau) = \int_{\Omega}v \mbox{ div}(\mbox{div}(\tau))
-\] +\]" src="form_7419.png"/>

      while

      -\[
+<picture><source srcset=\[
 \int_{\Omega}v_h \mbox{ div}(\mbox{div}(\tau)) \longrightarrow \int_{\Omega}v \mbox{ div}(\mbox{div}(\tau)) \qquad \mbox{as } \,\, h\rightarrow 0.
-\] +\]" src="form_7420.png"/>

      -

      Now, we integrate two times by parts the left term, taking into account that $v_h$ is not necessarily continuous across interior faces. For any $K\in\mathcal{T}_h$ we have

      -\[
+<p> Now, we integrate two times by parts the left term, taking into account that <picture><source srcset=$v_h$ is not necessarily continuous across interior faces. For any $K\in\mathcal{T}_h$ we have

      +\[
 \int_K v_h \mbox{ div}(\mbox{div}(\tau)) = -\int_K \nabla v_h\cdot \mbox{div}(\tau) + \int_{\partial K} v_h \mbox{ div}(\tau)\cdot \mathbf{n}_K =\int_K D^2v_h:\tau - \int_{\partial K}\nabla v_h\cdot (\tau\mathbf{n}_K) + \int_{\partial K} v_h \mbox{ div}(\tau)\cdot \mathbf{n}_K,
-\] +\]" src="form_7422.png"/>

      -

      where $\mathbf{n}_K$ denotes the outward unit normal to $K$. Then, summing over the elements $K\in\mathcal{T}_h$ and using that $\tau$ is smooth, we obtain

      -\[
+<p> where <picture><source srcset=$\mathbf{n}_K$ denotes the outward unit normal to $K$. Then, summing over the elements $K\in\mathcal{T}_h$ and using that $\tau$ is smooth, we obtain

      +\[
 \int_{\Omega} v_h \mbox{ div}(\mbox{div}(\tau)) = \int_{\Omega} D_h^2v_h:\tau - \sum_{e\in\mathcal{E}_h}\int_e\jump{\nabla_h v_h}\cdot \average{\tau}\mathbf{n}_e + \sum_{e\in\mathcal{E}_h}\int_e v_h \average{\mbox{div}(\tau)}\cdot \mathbf{n}_e
-\] +\]" src="form_7424.png"/>

      -

      which reveals the motivation for the definition of the two lifting operators: if $\tau$ was an admissible test function, then the right-hand side would be equal to $\int_{\Omega}H_h(v_h):\tau$ and we would have shown the desired (weak) convergence. Actually, if we add and subtract $\tau_h$, the Lagrange interpolant of $\tau$ in $[\mathbb{V}_h\cap H_0^1(\Omega)]^{d\times d}$, we can show that the right-hand side is indeed equal to $\int_{\Omega}H_h(v_h):\tau$ up to terms that tends to zero as $h\rightarrow 0$ under appropriate assumptions on $v_h$.

      -

      It is worth mentioning that defining $H_h$ without the lifting operators $r_e$ and $b_e$ for $e\in\mathcal{E}_h^b$ would not affect the weak convergence property (the integrals over boundary faces are zero since $\tau$ is compactly supported in $\Omega$). However, they are included in $H_h$ to ensure that the solution of the discrete problem introduced in the next section satisfies the homogeneous Dirichlet boundary conditions in the limit $h\rightarrow 0$.

      +

      which reveals the motivation for the definition of the two lifting operators: if $\tau$ was an admissible test function, then the right-hand side would be equal to $\int_{\Omega}H_h(v_h):\tau$ and we would have shown the desired (weak) convergence. Actually, if we add and subtract $\tau_h$, the Lagrange interpolant of $\tau$ in $[\mathbb{V}_h\cap H_0^1(\Omega)]^{d\times d}$, we can show that the right-hand side is indeed equal to $\int_{\Omega}H_h(v_h):\tau$ up to terms that tends to zero as $h\rightarrow 0$ under appropriate assumptions on $v_h$.

      +

      It is worth mentioning that defining $H_h$ without the lifting operators $r_e$ and $b_e$ for $e\in\mathcal{E}_h^b$ would not affect the weak convergence property (the integrals over boundary faces are zero since $\tau$ is compactly supported in $\Omega$). However, they are included in $H_h$ to ensure that the solution of the discrete problem introduced in the next section satisfies the homogeneous Dirichlet boundary conditions in the limit $h\rightarrow 0$.

      LDG approximations

      -

      The proposed LDG approximation of the bi-Laplacian problem reads: find $u_h\in\mathbb{V}_h$ such that

      -\[
+<p>The proposed LDG approximation of the bi-Laplacian problem reads: find <picture><source srcset=$u_h\in\mathbb{V}_h$ such that

      +\[
 A_h(u_h,v_h)\dealcoloneq a_h(u_h,v_h)+j_h(u_h,v_h) = F_h(v_h) \qquad \forall \, v_h\in\mathbb{V}_h,
-\] +\]" src="form_7431.png"/>

      where

      -\begin{align*}
+<picture><source srcset=\begin{align*}
         a_h(u_h,v_h) & \dealcoloneq \int_{\Omega}H_h(u_h):H_h(v_h), \\
         j_h(u_h,v_h) & \dealcoloneq \gamma_1\sum_{e\in\mathcal{E}_h}h_e^{-1}\int_e\jump{\nabla_h u_h}\cdot\jump{\nabla_h v_h}+\gamma_0\sum_{e\in\mathcal{E}_h}h_e^{-3}\int_e\jump{u_h}\jump{v_h}, \\
         F_h(v_h) & \dealcoloneq \int_{\Omega}fv_h.
-\end{align*} +\end{align*}" src="form_7432.png"/>

      -

      Here, $\gamma_0,\gamma_1>0$ are penalty parameters.

      -

      Let $\{\varphi_i\}_{i=1}^{N_h}$ be the standard basis functions that generate $\mathbb{V}_h$. We can then express the solution as $u_h=\sum_{j=1}^{N_h}U_j\varphi_j$ and the problem reads: find $\boldsymbol{U}=(U_j)_{j=1}^{N_h}\in\mathbb{R}^{N_h}$ such that

      -\[
+<p> Here, <picture><source srcset=$\gamma_0,\gamma_1>0$ are penalty parameters.

      +

      Let $\{\varphi_i\}_{i=1}^{N_h}$ be the standard basis functions that generate $\mathbb{V}_h$. We can then express the solution as $u_h=\sum_{j=1}^{N_h}U_j\varphi_j$ and the problem reads: find $\boldsymbol{U}=(U_j)_{j=1}^{N_h}\in\mathbb{R}^{N_h}$ such that

      +\[
 A\boldsymbol{U} = \boldsymbol{F},
-\] +\]" src="form_7437.png"/>

      -

      where $A=(A_{ij})_{i,j=1}^{N_h}\in\mathbb{R}^{N_h\times N_h}$ and $\boldsymbol{F}=(F_i)_{i=1}^{N_h}\in\mathbb{R}^{N_h}$ are defined by

      -\[
+<p> where <picture><source srcset=$A=(A_{ij})_{i,j=1}^{N_h}\in\mathbb{R}^{N_h\times N_h}$ and $\boldsymbol{F}=(F_i)_{i=1}^{N_h}\in\mathbb{R}^{N_h}$ are defined by

      +\[
 A_{ij}\dealcoloneq A_h(\varphi_j,\varphi_i) \quad \text{and} \quad F_i\dealcoloneq F_h(\varphi_i), \qquad 1\leq i,j \leq N_h.
-\] +\]" src="form_7440.png"/>

      -
      Note
      The sparsity pattern associated with the above LDG method is slightly larger than that of, e.g., the symmetric interior penalty discontinuous Galerkin (SIPG) method. This is because the lifting operators in $H_h$ extend shape functions defined on one cell to the neighboring cell where it may overlap with the lifted shape functions from a neighbor of the neighbor. However, we have the following interesting properties:
        +
        Note
        The sparsity pattern associated with the above LDG method is slightly larger than that of, e.g., the symmetric interior penalty discontinuous Galerkin (SIPG) method. This is because the lifting operators in $H_h$ extend shape functions defined on one cell to the neighboring cell where it may overlap with the lifted shape functions from a neighbor of the neighbor. However, we have the following interesting properties:
        1. -The bilinear form $A_h(\cdot,\cdot)$ is coercive with respect to the DG $H^2$ norm

          -\[
+The bilinear form <picture><source srcset=$A_h(\cdot,\cdot)$ is coercive with respect to the DG $H^2$ norm

          +\[
   \|v_h\|_{H_h^2(\Omega)}^2\dealcoloneq\|D_h^2v_h\|_{L^2(\Omega)}^2+\sum_{e\in\mathcal{E}_h}h_e^{-1}\|\jump{\nabla_h v_h}\|_{L^2(e)}^2+\sum_{e\in\mathcal{E}_h}h_e^{-3}\|\jump{v_h}\|_{L^2(e)}^2
-  \] + \]" src="form_7442.png"/>

          - for any choice of penalty parameters $\gamma_0,\gamma_1>0$. In other words, the stability of the method is ensured for any positive parameters. This is in contrast with interior penalty methods for which they need to be large enough. (See also the discussions about penalty parameters in the step-39, step-47, and step-74 programs.)
        2. + for any choice of penalty parameters $\gamma_0,\gamma_1>0$. In other words, the stability of the method is ensured for any positive parameters. This is in contrast with interior penalty methods for which they need to be large enough. (See also the discussions about penalty parameters in the step-39, step-47, and step-74 programs.)
        3. -If $\{v_h\}_{h>0}\subset \mathbb{V}_h$ is a sequence uniformly bounded in the $\|\cdot\|_{H_h^2(\Omega)}$ norm such that $v_h\rightarrow v$ in $L^2(\Omega)$ as $h\rightarrow 0$ for some $v\in H^2(\Omega)$, then the discrete Hessian $H_h(v_h)$ weakly converges to $D^2v$ in $[L^2(\Omega)]^{2\times 2}$ as $h\rightarrow 0$. Note that the uniform boundedness assumption implies that the limit $v$ belongs to $H_0^2(\Omega)$.
        4. +If $\{v_h\}_{h>0}\subset \mathbb{V}_h$ is a sequence uniformly bounded in the $\|\cdot\|_{H_h^2(\Omega)}$ norm such that $v_h\rightarrow v$ in $L^2(\Omega)$ as $h\rightarrow 0$ for some $v\in H^2(\Omega)$, then the discrete Hessian $H_h(v_h)$ weakly converges to $D^2v$ in $[L^2(\Omega)]^{2\times 2}$ as $h\rightarrow 0$. Note that the uniform boundedness assumption implies that the limit $v$ belongs to $H_0^2(\Omega)$.
        5. The use of a reconstructed operator simplifies the design of the numerical algorithm. In particular, no integration by parts is needed to derive the discrete problem. This strategy of replacing differential operators by appropriate discrete counter-parts can be applied to nonlinear and more general problems, for instance variational problems without a readily accessible strong formulation. It has been used for instance in [BGNY2020] and [BGNY2021] in the context of large bending deformation of plates.
        -

        As in step-47, we could consider $C^0$ finite element approximations by replacing FE_DGQ<dim> by FE_Q<dim> (and include the appropriate header file deal.II/fe/fe_q.h) in the program below. In this case, the jump of the basis functions across any interior face is zero, and thus $b_e\left(\jump{\varphi_i}\right)=\mathbf{0}$ for all $e\in\mathcal{E}_h^0$, and could be dropped to save computational time. While an overkill for the bi-Laplacian problem, the flexibility of fully discontinuous methods combined with reconstructed differential operators is advantageous for nonlinear problems.

        +

        As in step-47, we could consider $C^0$ finite element approximations by replacing FE_DGQ<dim> by FE_Q<dim> (and include the appropriate header file deal.II/fe/fe_q.h) in the program below. In this case, the jump of the basis functions across any interior face is zero, and thus $b_e\left(\jump{\varphi_i}\right)=\mathbf{0}$ for all $e\in\mathcal{E}_h^0$, and could be dropped to save computational time. While an overkill for the bi-Laplacian problem, the flexibility of fully discontinuous methods combined with reconstructed differential operators is advantageous for nonlinear problems.

        Implementation

        -

        As customary, we assemble the matrix $A$ and the right-hand side $\boldsymbol{F}$ by looping over the elements $K\in\mathcal{T}_h$. Since we are using discontinuous finite elements, the support of each $\varphi_i$ is only one element $K\in\mathcal{T}_h$. However, due to the lifting operators, the support of $H_h(\varphi_i)$ is $K$ plus all the neighbors of $K$ (recall that for $e\in \mathcal{E}_h$, the support of the lifting operators $r_e$ and $b_e$ is $\omega_e$). Therefore, when integrating over a cell $K_c$, we need to consider the following interactions (case $d=2$)

        +

        As customary, we assemble the matrix $A$ and the right-hand side $\boldsymbol{F}$ by looping over the elements $K\in\mathcal{T}_h$. Since we are using discontinuous finite elements, the support of each $\varphi_i$ is only one element $K\in\mathcal{T}_h$. However, due to the lifting operators, the support of $H_h(\varphi_i)$ is $K$ plus all the neighbors of $K$ (recall that for $e\in \mathcal{E}_h$, the support of the lifting operators $r_e$ and $b_e$ is $\omega_e$). Therefore, when integrating over a cell $K_c$, we need to consider the following interactions (case $d=2$)

        - + - + - /usr/share/doc/packages/dealii/doxygen/deal.II/step_85.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_85.html 2024-01-30 03:04:56.280913309 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_85.html 2024-01-30 03:04:56.280913309 +0000 @@ -128,66 +128,66 @@

        Introduction

        The Cut Finite Element Method

        In this example, we show how to use the cut finite element method (CutFEM) in deal.II. For illustration, we want to solve the simplest possible problem, so we again consider Poisson's equation:

        -\begin{align*}
+<picture><source srcset=\begin{align*}
   -\Delta u &= f \qquad && \text{in }\, \Omega,
   \\
   u &= u_D       \qquad && \text{on }\, \Gamma = \partial \Omega,
-\end{align*} +\end{align*}" src="form_7537.png"/>

        -

        where we choose $f(x) = 4$ and $u_D(x) = 1$. CutFEM is an immersed method. In this context, "immersed" means that the mesh is unfitted to the geometry of the domain, $\Omega$. Instead, $\Omega$ floats freely on top of a uniform background mesh, $\mathcal{T}^h$.

        +

        where we choose $f(x) = 4$ and $u_D(x) = 1$. CutFEM is an immersed method. In this context, "immersed" means that the mesh is unfitted to the geometry of the domain, $\Omega$. Instead, $\Omega$ floats freely on top of a uniform background mesh, $\mathcal{T}^h$.

        -

        Since we no longer use the mesh to describe the geometry of the domain, we need some other way to represent it. This can be done in several ways but here we assume that $\Omega$ is described by a level set function, $\psi : \mathbb{R}^{\text{dim}} \to \mathbb{R}$ such that

        -\begin{align*}
+<p> Since we no longer use the mesh to describe the geometry of the domain, we need some other way to represent it. This can be done in several ways but here we assume that <picture><source srcset=$\Omega$ is described by a level set function, $\psi : \mathbb{R}^{\text{dim}} \to \mathbb{R}$ such that

        +\begin{align*}
   \Omega &= \{x \in \mathbb{R}^{\text{dim}} : \psi(x) < 0 \}, \\
   \Gamma &= \{x \in \mathbb{R}^{\text{dim}} : \psi(x) = 0 \}.
-\end{align*} +\end{align*}" src="form_7542.png"/>

        -

        For simplicity, we choose $\Omega$ to be a unit disk, so that

        -\begin{equation*}
+<p> For simplicity, we choose <picture><source srcset=$\Omega$ to be a unit disk, so that

        +\begin{equation*}
   \psi(x) = \| x \| - 1.
-\end{equation*} +\end{equation*}" src="form_7543.png"/>

        -

        As can be seen from the figure below, the level set function is negative for points in $\Omega$, zero on the boundary, and positive everywhere else.

        +

        As can be seen from the figure below, the level set function is negative for points in $\Omega$, zero on the boundary, and positive everywhere else.

        -

        To solve this problem, we want to distribute degrees of freedom over the smallest submesh, $\mathcal{T}_\Omega^h$, that completely covers the domain:

        -\begin{equation*}
+<p> To solve this problem, we want to distribute degrees of freedom over the smallest submesh, <picture><source srcset=$\mathcal{T}_\Omega^h$, that completely covers the domain:

        +\begin{equation*}
   \mathcal{T}_\Omega^h = \{ T \in \mathcal{T}^{h} : T \cap \Omega \neq \emptyset \}.
-\end{equation*} +\end{equation*}" src="form_7545.png"/>

        This is usually referred to as the "active mesh".

        -

        The finite element space where we want to find our numerical solution, $u_h$, is now

        -\begin{equation*}
+<p> The finite element space where we want to find our numerical solution, <picture><source srcset=$u_h$, is now

        +\begin{equation*}
   V_\Omega^h = \{ v \in C(\mathcal{N}_\Omega^h) : v \in Q_p(T), \, T \in \mathcal{T}_\Omega^h \},
-\end{equation*} +\end{equation*}" src="form_7546.png"/>

        where

        -\begin{equation*}
+<picture><source srcset=\begin{equation*}
   \mathcal{N}_\Omega^h = \bigcup_{T \in \mathcal{T}_\Omega^h} \overline{T},
-\end{equation*} +\end{equation*}" src="form_7547.png"/>

        -

        and $\overline{T}$ denotes the closure of $T$. The set $\mathcal{N}_\Omega^h$ is sometimes referred to as the "fictitious domain". Since $\Omega \subset \mathcal{N}_\Omega^h$, we see that the numerical solution is defined over a slightly larger region than the analytical solution.

        -

        In this type of immersed finite element method, the standard way to apply boundary conditions is using Nitsche's method. Multiplying the PDE with a test function, $v_h \in V_\Omega^h$, and integrating by parts over $\Omega$, as usual, gives us

        -\begin{equation*}
+<p> and <picture><source srcset=$\overline{T}$ denotes the closure of $T$. The set $\mathcal{N}_\Omega^h$ is sometimes referred to as the "fictitious domain". Since $\Omega \subset \mathcal{N}_\Omega^h$, we see that the numerical solution is defined over a slightly larger region than the analytical solution.

        +

        In this type of immersed finite element method, the standard way to apply boundary conditions is using Nitsche's method. Multiplying the PDE with a test function, $v_h \in V_\Omega^h$, and integrating by parts over $\Omega$, as usual, gives us

        +\begin{equation*}
   (\nabla u_h, \nabla v_h)_\Omega - (\partial_n u_h, v_h)_\Gamma = (f,v)_\Omega.
-\end{equation*} +\end{equation*}" src="form_7552.png"/>

        -

        Let $\gamma_D > 0$ be a scalar penalty parameter and let $h$ be some measure of the local cell size. We now note that the following terms are consistent with the Dirichlet boundary condition:

        -\begin{align*}
+<p> Let <picture><source srcset=$\gamma_D > 0$ be a scalar penalty parameter and let $h$ be some measure of the local cell size. We now note that the following terms are consistent with the Dirichlet boundary condition:

        +\begin{align*}
   -(u_h, \partial_n v_h)_\Gamma &= -(u_D, \partial_n v_h)_\Gamma, \\
   \left (\frac{\gamma_D}{h} u_h, v_h \right )_\Gamma &= \left (\frac{\gamma_D}{h}u_D, v_h \right )_\Gamma.
-\end{align*} +\end{align*}" src="form_7554.png"/>

        -

        Thus, we can add these to the weak formulation to enforce the boundary condition. This leads to the following weak formulation: Find $u_h \in V_\Omega^h$ such that

        -\begin{equation*}
+<p> Thus, we can add these to the weak formulation to enforce the boundary condition. This leads to the following weak formulation: Find <picture><source srcset=$u_h \in V_\Omega^h$ such that

        +\begin{equation*}
   a_h(u_h, v_h) = L_h(v_h), \quad \forall v_h \in V_\Omega^h,
-\end{equation*} +\end{equation*}" src="form_7556.png"/>

        where

        -\begin{align*}
+<picture><source srcset=\begin{align*}
   a_h(u_h, v_h) &=  (\nabla u_h, \nabla v_h)_\Omega
                   - (\partial_n u_h, v_h)_\Gamma
                   - (u_h, \partial_n v_h)_\Gamma
@@ -195,95 +195,95 @@
   \\
   L_h(v_h)      &=  (f,v)_\Omega
                   + \left (u_D, \frac{\gamma_D}{h} v_h -\partial_n v_h \right )_\Gamma.
-\end{align*} +\end{align*}" src="form_7557.png"/>

        -

        In this formulation, there is one big difference, compared to a standard boundary-fitted finite element method. On each cell, we need to integrate over the part of the domain and the part of the boundary that falls within the cell. Thus, on each cell intersected by $\Gamma$, we need special quadrature rules that only integrate over these parts of the cell, that is, over $T \cap \Omega$ and $T \cap \Gamma$.

        +

        In this formulation, there is one big difference, compared to a standard boundary-fitted finite element method. On each cell, we need to integrate over the part of the domain and the part of the boundary that falls within the cell. Thus, on each cell intersected by $\Gamma$, we need special quadrature rules that only integrate over these parts of the cell, that is, over $T \cap \Omega$ and $T \cap \Gamma$.

        -

        Since $\Omega \cap T$ is the part of the cell that lies inside the domain, we shall refer to the following regions

        -\begin{align*}
+<p> Since <picture><source srcset=$\Omega \cap T$ is the part of the cell that lies inside the domain, we shall refer to the following regions

        +\begin{align*}
   \{x \in T : \psi(x) < 0 \}, \\
   \{x \in T : \psi(x) > 0 \}, \\
   \{x \in T : \psi(x) = 0 \},
-\end{align*} +\end{align*}" src="form_7561.png"/>

        -

        as the "inside", "outside" and the "surface region" of the cell $T$.

        -

        The above finite element method that uses the bilinear form $a_h(\cdot, \cdot)$ is sometimes referred to as the "naive weak formulation" because it suffers from the so-called "small cut problem". Depending on how $\Omega$ is located relative to $\mathcal{T}_h$, a cut between a cell, $T \in \mathcal{T}_h$, and $\Omega$ can become arbitrarily small: $|\Omega \cap T | \rightarrow 0$. For Neumann boundary conditions, the consequence is that the stiffness matrix can become arbitrarily ill-conditioned as the cut-size approaches zero. For a Dirichlet condition, the situation is even worse. For any finite choice of Nitsche constant, $\gamma_D$, the bilinear form $a_h(\cdot,\cdot)$ loses coercivity as the size of a cell cut approaches zero. This makes the above weak formulation essentially useless because as we refine we typically can not control how the cells intersect $\Gamma$. One way to avoid this problem is to add a so-called ghost penalty term, $g_h$, to the weak formulation (see e.g. [burman_hansbo_2012] and [cutfem_2015]). This leads to the stabilized cut finite element method, which reads: Find $u_h \in V_\Omega^h$ such that

        -\begin{equation*}
+<p> as the $T$.

        +

        The above finite element method that uses the bilinear form $a_h(\cdot, \cdot)$ is sometimes referred to as the "naive weak formulation" because it suffers from the so-called "small cut problem". Depending on how $\Omega$ is located relative to $\mathcal{T}_h$, a cut between a cell, $T \in \mathcal{T}_h$, and $\Omega$ can become arbitrarily small: $|\Omega \cap T | \rightarrow 0$. For Neumann boundary conditions, the consequence is that the stiffness matrix can become arbitrarily ill-conditioned as the cut-size approaches zero. For a Dirichlet condition, the situation is even worse. For any finite choice of Nitsche constant, $\gamma_D$, the bilinear form $a_h(\cdot,\cdot)$ loses coercivity as the size of a cell cut approaches zero. This makes the above weak formulation essentially useless because as we refine we typically can not control how the cells intersect $\Gamma$. One way to avoid this problem is to add a so-called ghost penalty term, $g_h$, to the weak formulation (see e.g. [burman_hansbo_2012] and [cutfem_2015]). This leads to the stabilized cut finite element method, which reads: Find $u_h \in V_\Omega^h$ such that

        +\begin{equation*}
   A_h(u_h, v_h) = L_h(v_h), \quad \forall v_h \in V_\Omega^h,
-\end{equation*} +\end{equation*}" src="form_7567.png"/>

        where

        -\begin{equation*}
+<picture><source srcset=\begin{equation*}
   A_h(u_h,v_h) = a_h(u_h,v_h) + g_h(u_h, v_h).
-\end{equation*} +\end{equation*}" src="form_7568.png"/>

        -

        The point of this ghost penalty is that it makes the numerical method essentially independent of how $\Omega$ relates to the background mesh. In particular, $A_h$ can be shown to be continuous and coercive, with constants that do not depend on how $\Omega$ intersects $\mathcal{T}^h$. To define the ghost penalty, let $\mathcal{T}_\Gamma^h$ be the set of intersected cells:

        -\begin{equation*}
+<p> The point of this ghost penalty is that it makes the numerical method essentially independent of how <picture><source srcset=$\Omega$ relates to the background mesh. In particular, $A_h$ can be shown to be continuous and coercive, with constants that do not depend on how $\Omega$ intersects $\mathcal{T}^h$. To define the ghost penalty, let $\mathcal{T}_\Gamma^h$ be the set of intersected cells:

        +\begin{equation*}
   \mathcal{T}_{\Gamma}^h = \{ T \in \mathcal{T}_{\Omega}^{h} : T \cap \Gamma \neq \emptyset \},
-\end{equation*} +\end{equation*}" src="form_7571.png"/>

        -

        and let $\mathcal{F}_h$ denote the interior faces of the intersected cells in the active mesh:

        -\begin{equation*}
+<p> and let <picture><source srcset=$\mathcal{F}_h$ denote the interior faces of the intersected cells in the active mesh:

        +\begin{equation*}
   \mathcal{F}_h = \{ F = \overline{T}_+ \cap \overline{T}_- : \,
                      T_+ \in \mathcal{T}_{\Gamma}^h, \,
                      T_- \in \mathcal{T}_{\Omega}^h
                   \}.
-\end{equation*} +\end{equation*}" src="form_7573.png"/>

        The ghost penalty acts on these faces and reads

        -\begin{equation*}
+<picture><source srcset=\begin{equation*}
   g_h(u_h,v_h) = \gamma_A \sum_{F \in \mathcal{F}_h} g_F(u_h, v_h),
-\end{equation*} +\end{equation*}" src="form_7574.png"/>

        -

        where $g_F$ is the face-wise ghost penalty:

        -\begin{equation*}
+<p> where <picture><source srcset=$g_F$ is the face-wise ghost penalty:

        +\begin{equation*}
   g_F(u_h, v_h) = \gamma_A \sum_{k=0}^p \left(\frac{h_F^{2k-1}}{k!^2}[\partial_n^k u_h], [\partial_n^k v_h] \right)_F.
-\end{equation*} +\end{equation*}" src="form_7576.png"/>

        -

        Here, $\gamma_A$ is a penalty parameter and $h_F$ is some measure of the face size. We see that $g_F$ penalizes the jumps in the face-normal derivatives, $\partial_n^k$, over $F = \overline{T}_+ \cap \overline{T}_-$. Since we include all normal derivatives up to the polynomial degree, we weakly force the piecewise polynomial to behave as a single polynomial over $\overline{T}_+ \cup \overline{T}_-$. Hand-wavingly speaking, this is the reason why we obtain a cut-independent method when we enforce $g_F(u_h, v_h) = 0$ over the faces in $\mathcal{F}_h$. Here, we shall use a continuous space of $Q_1$-elements, so the ghost penalty is reduced to

        -\begin{equation*}
+<p> Here, <picture><source srcset=$\gamma_A$ is a penalty parameter and $h_F$ is some measure of the face size. We see that $g_F$ penalizes the jumps in the face-normal derivatives, $\partial_n^k$, over $F = \overline{T}_+ \cap \overline{T}_-$. Since we include all normal derivatives up to the polynomial degree, we weakly force the piecewise polynomial to behave as a single polynomial over $\overline{T}_+ \cup \overline{T}_-$. Hand-wavingly speaking, this is the reason why we obtain a cut-independent method when we enforce $g_F(u_h, v_h) = 0$ over the faces in $\mathcal{F}_h$. Here, we shall use a continuous space of $Q_1$-elements, so the ghost penalty is reduced to

        +\begin{equation*}
   g_h(u_h,v_h) = \gamma_A \sum_{F \in \mathcal{F}_h} (h_F [\partial_n u_h], [\partial_n v_h])_F.
-\end{equation*} +\end{equation*}" src="form_7582.png"/>

        Discrete Level Set Function

        -

        A typical use case of a level set method is a problem where the domain is advected in a velocity field, such that the domain deforms with time. For such a problem, one would typically solve for an approximation of the level set function, $\psi_h \in V^h$, in a separate finite element space over the whole background mesh:

        -\begin{equation*}
+<p>A typical use case of a level set method is a problem where the domain is advected in a velocity field, such that the domain deforms with time. For such a problem, one would typically solve for an approximation of the level set function, <picture><source srcset=$\psi_h \in V^h$, in a separate finite element space over the whole background mesh:

        +\begin{equation*}
   V^h = \{ v \in C(\mathcal{N}^h) : v \in Q_p(T), \, T \in \mathcal{T}^h \},
-\end{equation*} +\end{equation*}" src="form_7584.png"/>

        -

        where $\mathcal{N}^h = \bigcup_{T \in \mathcal{T}^h} \overline{T}$. Even if we solve a much simpler problem with a stationary domain in this tutorial, we shall, just to illustrate, still use a discrete level set function for the Poisson problem. Technically, this is a so-called "variational crime" because we are actually not using the bilinear form $a_h$ but instead

        -\begin{equation*}
+<p> where <picture><source srcset=$\mathcal{N}^h = \bigcup_{T \in \mathcal{T}^h} \overline{T}$. Even if we solve a much simpler problem with a stationary domain in this tutorial, we shall, just to illustrate, still use a discrete level set function for the Poisson problem. Technically, this is a so-called "variational crime" because we are actually not using the bilinear form $a_h$ but instead

        +\begin{equation*}
   a_h^\star(u_h, v_h) = (\nabla u_h, \nabla v_h)_{\Omega_h}
                       - (\partial_n u_h, v_h)_{\Gamma_h} + \ldots
-\end{equation*} +\end{equation*}" src="form_7587.png"/>

        -

        This is an approximation of $a_h$ since we integrate over the approximations of the geometry that we get via the discrete level set function:

        /usr/share/doc/packages/dealii/doxygen/deal.II/step_9.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_9.html 2024-01-30 03:04:56.348913875 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_9.html 2024-01-30 03:04:56.348913875 +0000 @@ -137,7 +137,7 @@ \beta \cdot \nabla u = f, \]" src="form_7604.png"/>

        -

        where $\beta$ is a vector field that describes the advection direction and speed (which may be dependent on the space variables if $\beta=\beta(\mathbf x)$), $f$ is a source function, and $u$ is the solution. The physical process that this equation describes is that of a given flow field $\beta$, with which another substance is transported, the density or concentration of which is given by $u$. The equation does not contain diffusion of this second species within its carrier substance, but there are source terms.

        +

        where $\beta$ is a vector field that describes the advection direction and speed (which may be dependent on the space variables if $\beta=\beta(\mathbf x)$), $f$ is a source function, and $u$ is the solution. The physical process that this equation describes is that of a given flow field $\beta$, with which another substance is transported, the density or concentration of which is given by $u$. The equation does not contain diffusion of this second species within its carrier substance, but there are source terms.

        It is obvious that at the inflow, the above equation needs to be augmented by boundary conditions:

        \[
   u = g \qquad\qquad \mathrm{on}\ \partial\Omega_-,
@@ -152,10 +152,10 @@
 </p>
 <p> and <picture><source srcset=${\mathbf n}({\mathbf x})$ being the outward normal to the domain at point ${\mathbf x}\in\partial\Omega$. This definition is quite intuitive, since as ${\mathbf n}$ points outward, the scalar product with $\beta$ can only be negative if the transport direction $\beta$ points inward, i.e. at the inflow boundary. The mathematical theory states that we must not pose any boundary condition on the outflow part of the boundary.

        Unfortunately, the equation stated above cannot be solved in a stable way using the standard finite element method. The problem is that solutions to this equation possess insufficient regularity perpendicular to the transport direction: while they are smooth along the streamlines defined by the "wind field" $\beta$, they may be discontinuous perpendicular to this direction. This is easy to understand: what the equation $\beta \cdot
-\nabla u = f$ means is in essence that the rate of change of $u$ in direction $\beta$ equals $f$. But the equation has no implications for the derivatives in the perpendicular direction, and consequently if $u$ is discontinuous at a point on the inflow boundary, then this discontinuity will simply be transported along the streamline of the wind field that starts at this boundary point. These discontinuities lead to numerical instabilities that make a stable solution by a standard continuous finite element discretization impossible.

        +\nabla u = f$" src="form_7610.png"/> means is in essence that the rate of change of $u$ in direction $\beta$ equals $f$. But the equation has no implications for the derivatives in the perpendicular direction, and consequently if $u$ is discontinuous at a point on the inflow boundary, then this discontinuity will simply be transported along the streamline of the wind field that starts at this boundary point. These discontinuities lead to numerical instabilities that make a stable solution by a standard continuous finite element discretization impossible.

        A standard approach to address this difficulty is the "streamline-upwind -Petrov-Galerkin" (SUPG) method, sometimes also called the streamline diffusion method. A good explanation of the method can be found in [elman2005] . Formally, this method replaces the step in which we derive the weak form of the differential equation from the strong form: Instead of multiplying the equation by a test function $v$ and integrating over the domain, we instead multiply by $v + \delta \beta\cdot\nabla v$, where $\delta$ is a parameter that is chosen in the range of the (local) mesh width $h$; good results are usually obtained by setting $\delta=0.1h$. (Why this is called "streamline diffusion" will be explained below; for the moment, let us simply take for granted that this is how we derive a stable discrete formulation.) The value for $\delta$ here is small enough that we do not introduce excessive diffusion, but large enough that the resulting problem is well-posed.

        -

        Using the test functions as defined above, an initial weak form of the problem would ask for finding a function $u_h$ so that for all test functions $v_h$ we have

        +Petrov-Galerkin" (SUPG) method, sometimes also called the streamline diffusion method. A good explanation of the method can be found in [elman2005] . Formally, this method replaces the step in which we derive the weak form of the differential equation from the strong form: Instead of multiplying the equation by a test function $v$ and integrating over the domain, we instead multiply by $v + \delta \beta\cdot\nabla v$, where $\delta$ is a parameter that is chosen in the range of the (local) mesh width $h$; good results are usually obtained by setting $\delta=0.1h$. (Why this is called "streamline diffusion" will be explained below; for the moment, let us simply take for granted that this is how we derive a stable discrete formulation.) The value for $\delta$ here is small enough that we do not introduce excessive diffusion, but large enough that the resulting problem is well-posed.

        +

        Using the test functions as defined above, an initial weak form of the problem would ask for finding a function $u_h$ so that for all test functions $v_h$ we have

        \[
   (\beta \cdot \nabla u_h, v_h + \delta \beta\cdot\nabla v_h)_\Omega
   =
@@ -176,7 +176,7 @@
   (g, \beta\cdot {\mathbf n} v_h)_{\partial\Omega_-}.
 \]

        -

        Without attempting a justification (see again the literature on the finite element method in general, and the streamline diffusion method in particular), we can combine the equations for the differential equation and the boundary values in the following weak formulation of our stabilized problem: find a discrete function $u_h$ such that for all discrete test functions $v_h$ there holds

        +

        Without attempting a justification (see again the literature on the finite element method in general, and the streamline diffusion method in particular), we can combine the equations for the differential equation and the boundary values in the following weak formulation of our stabilized problem: find a discrete function $u_h$ such that for all discrete test functions $v_h$ there holds

        \[
   (\beta \cdot \nabla u_h, v_h + \delta \beta\cdot\nabla v_h)_\Omega
   -
@@ -196,7 +196,7 @@
   (\varphi_i, \beta\cdot {\mathbf n} \varphi_j)_{\partial\Omega_-},
 \]

        -

        with basis functions $\varphi_i,\varphi_j$. However, this is a pitfall that happens to every numerical analyst at least once (including the author): we have here expanded the solution $u_h = \sum_i U_i \varphi_i$, but if we do so, we will have to solve the problem

        +

        with basis functions $\varphi_i,\varphi_j$. However, this is a pitfall that happens to every numerical analyst at least once (including the author): we have here expanded the solution $u_h = \sum_i U_i \varphi_i$, but if we do so, we will have to solve the problem

        \[
   U^T A = F^T,
 \] @@ -275,9 +275,9 @@

        In other words, the unusual choice of test function is equivalent to the addition of term to the strong form that corresponds to a second order (i.e., diffusion) differential operator in the direction of the wind field $\beta$, i.e., in "streamline direction". A fuller account would also have to explore the effect of the test function on boundary values and why it is necessary to also use the same test function for the right hand side, but the discussion above might make clear where the name "streamline diffusion" for the method originates from.

        Why is this method also called "Petrov-Galerkin"?

        -

        A "Galerkin method" is one where one obtains the weak formulation by multiplying the equation by a test function $v$ (and then integrating over $\Omega$) where the functions $v$ are from the same space as the solution $u$ (though possibly with different boundary values). But this is not strictly necessary: One could also imagine choosing the test functions from a different set of functions, as long as that different set has "as many dimensions" as the original set of functions so that we end up with as many independent equations as there are degrees of freedom (where all of this needs to be appropriately defined in the infinite-dimensional case). Methods that make use of this possibility (i.e., choose the set of test functions differently than the set of solutions) are called "Petrov-Galerkin" methods. In the current case, the test functions all have the form $v+\beta\cdot\nabla v$ where $v$ is from the set of solutions.

        +

        A "Galerkin method" is one where one obtains the weak formulation by multiplying the equation by a test function $v$ (and then integrating over $\Omega$) where the functions $v$ are from the same space as the solution $u$ (though possibly with different boundary values). But this is not strictly necessary: One could also imagine choosing the test functions from a different set of functions, as long as that different set has "as many dimensions" as the original set of functions so that we end up with as many independent equations as there are degrees of freedom (where all of this needs to be appropriately defined in the infinite-dimensional case). Methods that make use of this possibility (i.e., choose the set of test functions differently than the set of solutions) are called "Petrov-Galerkin" methods. In the current case, the test functions all have the form $v+\beta\cdot\nabla v$ where $v$ is from the set of solutions.

        Why is this method also called "streamline-upwind"?

        -

        Upwind methods have a long history in the derivation of stabilized schemes for advection equations. Generally, the idea is that instead of looking at a function "here", we look at it a small distance further "upstream" or "upwind", i.e., where the information "here" originally came from. This might suggest not considering $u(\mathbf x)$, but something like $u(\mathbf x - \delta \beta)$. Or, equivalently upon integration, we could evaluate $u(\mathbf x)$ and instead consider $v$ a bit downstream: $v(\mathbf x+\delta \beta)$. This would be cumbersome for a variety of reasons: First, we would have to define what $v$ should be if $\mathbf x + \delta \beta$ happens to be outside $\Omega$; second, computing integrals numerically would be much more awkward since we no longer evaluate $u$ and $v$ at the same quadrature points. But since we assume that $\delta$ is small, we can do a Taylor expansion:

        +

        Upwind methods have a long history in the derivation of stabilized schemes for advection equations. Generally, the idea is that instead of looking at a function "here", we look at it a small distance further "upstream" or "upwind", i.e., where the information "here" originally came from. This might suggest not considering $u(\mathbf x)$, but something like $u(\mathbf x - \delta \beta)$. Or, equivalently upon integration, we could evaluate $u(\mathbf x)$ and instead consider $v$ a bit downstream: $v(\mathbf x+\delta \beta)$. This would be cumbersome for a variety of reasons: First, we would have to define what $v$ should be if $\mathbf x + \delta \beta$ happens to be outside $\Omega$; second, computing integrals numerically would be much more awkward since we no longer evaluate $u$ and $v$ at the same quadrature points. But since we assume that $\delta$ is small, we can do a Taylor expansion:

        \[
   v(\mathbf x + \delta \beta)
   \approx
@@ -288,7 +288,7 @@
 <p><a class=

        Solving the linear system that corresponds to the advection equation

        As the resulting matrix is no longer symmetric positive definite, we cannot use the usual Conjugate Gradient method (implemented in the SolverCG class) to solve the system. Instead, we use the GMRES (Generalized Minimum RESidual) method (implemented in SolverGMRES) that is suitable for problems of the kind we have here.

        The test case

        -

        For the problem which we will solve in this tutorial program, we use the following domain and functions (in $d=2$ space dimensions):

        +

        For the problem which we will solve in this tutorial program, we use the following domain and functions (in $d=2$ space dimensions):

        \begin{eqnarray*}
   \Omega &=& [-1,1]^d \\
   \beta({\mathbf x})
@@ -350,14 +350,14 @@
 </p>
 <p> which itself is related to the error size in the energy norm.</p>
 <p>The problem with this error indicator in the present case is that it assumes that the exact solution possesses second derivatives. This is already questionable for solutions to Laplace's problem in some cases, although there most problems allow solutions in <picture><source srcset=$H^2$. If solutions are only in $H^1$, then the second derivatives would be singular in some parts (of lower dimension) of the domain and the error indicators would not reduce there under mesh refinement. Thus, the algorithm would continuously refine the cells around these parts, i.e. would refine into points or lines (in 2d).

        -

        However, for the present case, solutions are usually not even in $H^1$ (and this missing regularity is not the exceptional case as for Laplace's equation), so the error indicator described above is not really applicable. We will thus develop an indicator that is based on a discrete approximation of the gradient. Although the gradient often does not exist, this is the only criterion available to us, at least as long as we use continuous elements as in the present example. To start with, we note that given two cells $K$, $K'$ of which the centers are connected by the vector ${\mathbf y}_{KK'}$, we can approximate the directional derivative of a function $u$ as follows:

        +

        However, for the present case, solutions are usually not even in $H^1$ (and this missing regularity is not the exceptional case as for Laplace's equation), so the error indicator described above is not really applicable. We will thus develop an indicator that is based on a discrete approximation of the gradient. Although the gradient often does not exist, this is the only criterion available to us, at least as long as we use continuous elements as in the present example. To start with, we note that given two cells $K$, $K'$ of which the centers are connected by the vector ${\mathbf y}_{KK'}$, we can approximate the directional derivative of a function $u$ as follows:

        \[
   \frac{{\mathbf y}_{KK'}^T}{|{\mathbf y}_{KK'}|} \nabla u
   \approx
   \frac{u(K') - u(K)}{|{\mathbf y}_{KK'}|},
 \]

        -

        where $u(K)$ and $u(K')$ denote $u$ evaluated at the centers of the respective cells. We now multiply the above approximation by ${\mathbf y}_{KK'}/|{\mathbf y}_{KK'}|$ and sum over all neighbors $K'$ of $K$:

        +

        where $u(K)$ and $u(K')$ denote $u$ evaluated at the centers of the respective cells. We now multiply the above approximation by ${\mathbf y}_{KK'}/|{\mathbf y}_{KK'}|$ and sum over all neighbors $K'$ of $K$:

        \[
   \underbrace{
     \left(\sum_{K'} \frac{{\mathbf y}_{KK'} {\mathbf y}_{KK'}^T}
@@ -369,7 +369,7 @@
   \frac{u(K') - u(K)}{|{\mathbf y}_{KK'}|}.
 \]

        -

        If the vectors ${\mathbf y}_{KK'}$ connecting $K$ with its neighbors span the whole space (i.e. roughly: $K$ has neighbors in all directions), then the term in parentheses in the left hand side expression forms a regular matrix, which we can invert to obtain an approximation of the gradient of $u$ on $K$:

        +

        If the vectors ${\mathbf y}_{KK'}$ connecting $K$ with its neighbors span the whole space (i.e. roughly: $K$ has neighbors in all directions), then the term in parentheses in the left hand side expression forms a regular matrix, which we can invert to obtain an approximation of the gradient of $u$ on $K$:

        \[
   \nabla u
   \approx
/usr/share/doc/packages/dealii/doxygen/deal.II/structCellData.html differs (HTML document, ASCII text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/structCellData.html	2024-01-30 03:04:56.372914075 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/structCellData.html	2024-01-30 03:04:56.372914075 +0000
@@ -127,7 +127,7 @@
 </table>
 <a name=

        Detailed Description

        template<int structdim>
        -struct CellData< structdim >

        The CellData class (and the related SubCellData class) is used to provide a comprehensive, but minimal, description of the cells when creating a triangulation via Triangulation::create_triangulation(). Specifically, each CellData object – describing one cell in a triangulation – has member variables for indices of the $2^d$ vertices (the actual coordinates of the vertices are described in a separate vector passed to Triangulation::create_triangulation(), so the CellData object only needs to store indices into that vector), the material id of the cell that can be used in applications to describe which part of the domain a cell belongs to (see the glossary entry on material ids), and a manifold id that is used to describe the geometry object that is responsible for this cell (see the glossary entry on manifold ids) to describe the manifold this object belongs to.

        +struct CellData< structdim >

        The CellData class (and the related SubCellData class) is used to provide a comprehensive, but minimal, description of the cells when creating a triangulation via Triangulation::create_triangulation(). Specifically, each CellData object – describing one cell in a triangulation – has member variables for indices of the $2^d$ vertices (the actual coordinates of the vertices are described in a separate vector passed to Triangulation::create_triangulation(), so the CellData object only needs to store indices into that vector), the material id of the cell that can be used in applications to describe which part of the domain a cell belongs to (see the glossary entry on material ids), and a manifold id that is used to describe the geometry object that is responsible for this cell (see the glossary entry on manifold ids) to describe the manifold this object belongs to.

        This structure is also used to represent data for faces and edges when used as a member of the SubCellData class. In this case, the template argument structdim of an object will be less than the dimension dim of the triangulation. If this is so, then vertices array represents the indices of the vertices of one face or edge of one of the cells passed to Triangulation::create_triangulation(). Furthermore, for faces the material id has no meaning, and the material_id field is reused to store a boundary_id instead to designate which part of the boundary the face or edge belongs to (see the glossary entry on boundary ids).

        An example showing how this class can be used is in the create_coarse_grid() function of step-14. There are also many more use cases in the implementation of the functions of the GridGenerator namespace.

        /usr/share/doc/packages/dealii/doxygen/deal.II/structColorEnriched_1_1Helper.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/structColorEnriched_1_1Helper.html 2024-01-30 03:04:56.396914275 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/structColorEnriched_1_1Helper.html 2024-01-30 03:04:56.396914275 +0000 @@ -141,7 +141,7 @@

        Detailed Description

        template<int dim, int spacedim = dim>
        struct ColorEnriched::Helper< dim, spacedim >

        ColorEnriched::Helper class creates a collection of FE_Enriched finite elements (hp::FECollection) to be used with DoFHandler in a domain with multiple, possibly overlapping, sub-domains with individual enrichment functions. Note that the overlapping regions may have multiple enrichment functions associated with them. This is implemented using a general constructor of FE_Enriched object which allows different enrichment functions.

        -

        Consider a domain with multiple enriched sub-domains which are disjoint i.e. not connected with each other. To ensure $C^0$ continuity at the interface between the enriched sub-domain (characterized by a single enrichment function) and the non-enriched domain, we can use an FE_Enriched object in the enriched sub-domain and in the non-enriched domain a standard finite element (eg: FE_Q) wrapped into an FE_Enriched object (which internally uses a dominating FE_Nothing object). Refer to the documentation on FE_Enriched for more information on this. It is to be noted that an FE_Enriched object is constructed using a base FE (FiniteElement objects) and one or more enriched FEs. FE_Nothing is a dummy enriched FE.

        +

        Consider a domain with multiple enriched sub-domains which are disjoint i.e. not connected with each other. To ensure $C^0$ continuity at the interface between the enriched sub-domain (characterized by a single enrichment function) and the non-enriched domain, we can use an FE_Enriched object in the enriched sub-domain and in the non-enriched domain a standard finite element (eg: FE_Q) wrapped into an FE_Enriched object (which internally uses a dominating FE_Nothing object). Refer to the documentation on FE_Enriched for more information on this. It is to be noted that an FE_Enriched object is constructed using a base FE (FiniteElement objects) and one or more enriched FEs. FE_Nothing is a dummy enriched FE.

        The situation becomes more complicated when two enriched sub-domains share an interface. When the number of enrichment functions are same for the sub-domains, FE_Enriched object of one sub-domain is constructed such that each enriched FE is paired (figuratively) with a FE_Nothing in the FE_Enriched object of the other sub-domain. For example, let the FEs fe_enr1 and fe_enr2, which will be used with enrichment functions, correspond to the two sub-domains. Then the FE_Enriched objects of the two sub-domains are built using [fe_base, fe_enr1, fe_nothing] and [fe_base, fe_nothing, fe_enr2] respectively. Note that the size of the vector of enriched FEs (used in FE_Enriched constructor) is equal to 2, the same as the number of enrichment functions. When the number of enrichment functions is not the same, additional enriched FEs are paired with FE_Nothing. This ensures that the enriched DOF's at the interface are set to zero by the DoFTools::make_hanging_node_constraints() function. Using these two strategies, we construct the appropriate FE_Enriched using the general constructor. Note that this is done on a mesh without hanging nodes.

        Now consider a domain with multiple sub-domains which may share an interface with each other. As discussed previously, the number of enriched FEs in the FE_Enriched object of each sub-domain needs to be equal to the number of sub-domains. This is because we are not using the information of how the domains are connected and any sub-domain may share interface with any other sub-domain (not considering overlaps for now!). However, in general, a given sub-domain shares an interface only with a few sub-domains. This warrants the use of a graph coloring algorithm to reduce the size of the vector of enriched FEs (used in the FE_Enriched constructor). By giving the sub-domains that share no interface the same color, a single 'std::function' that returns different enrichment functions for each sub-domain can be constructed. Then the size of the vector of enriched FEs is equal to the number of different colors used for predicates (or sub-domains).

        Note
        The graph coloring function, SparsityTools::color_sparsity_pattern, used for assigning colors to the sub-domains needs MPI (use Utilities::MPI::MPI_InitFinalize to initialize MPI and the necessary Zoltan setup). The coloring function, based on Zoltan, is a parallel coloring algorithm but is used in serial by SparsityTools::color_sparsity_pattern.
        /usr/share/doc/packages/dealii/doxygen/deal.II/structDataPostprocessorInputs_1_1CommonInputs.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/structDataPostprocessorInputs_1_1CommonInputs.html 2024-01-30 03:04:56.420914475 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/structDataPostprocessorInputs_1_1CommonInputs.html 2024-01-30 03:04:56.420914475 +0000 @@ -149,7 +149,7 @@

        In other words, just because we know the value of the spacedim template argument of the current class does not mean that the data type of the cell iterator that is currently being worked on is obvious.

        To make the cell iterator accessible nevertheless, this class uses an object of type boost::any to store the cell iterator. You can think of this as being a void pointer that can point to anything. To use what is being used therefore requires the user to know the data type of the thing being pointed to.

        To make this work, the DataOut and related classes store in objects of the current type a representation of the cell. To get it back out, you would use the get_cell() function that requires you to say, as a template parameter, the dimension of the cell that is currently being processed. This is knowledge you typically have in an application: for example, if your application runs in dim space dimensions and you are currently using the DataOut class, then the cells that are worked on have data type DataOut<dim>::cell_iterator. Consequently, in a postprocessor, you can call inputs.get_cell<dim> . For technical reasons, however, C++ will typically require you to write this as inputs.template get_cell<dim> because the member function we call here requires that we explicitly provide the template argument.

        -

        Let us consider a complete example of a postprocessor that computes the fluid norm of the stress $\|\sigma\| = \|\eta \nabla u\|$ from the viscosity $\eta$ and the gradient of the fluid velocity, $\nabla u$, assuming that the viscosity is something that depends on the cell's material id. This can be done using a class we derive from DataPostprocessorScalar where we overload the DataPostprocessor::evaluate_vector_field() function that receives the values and gradients of the velocity (plus of other solution variables such as the pressure, but let's ignore those for the moment). Then we could use code such as this:

        template <int dim>
        +

        Let us consider a complete example of a postprocessor that computes the fluid norm of the stress $\|\sigma\| = \|\eta \nabla u\|$ from the viscosity $\eta$ and the gradient of the fluid velocity, $\nabla u$, assuming that the viscosity is something that depends on the cell's material id. This can be done using a class we derive from DataPostprocessorScalar where we overload the DataPostprocessor::evaluate_vector_field() function that receives the values and gradients of the velocity (plus of other solution variables such as the pressure, but let's ignore those for the moment). Then we could use code such as this:

        template <int dim>
        class ComputeStress : public DataPostprocessorScalar<dim>
        {
        public:
        /usr/share/doc/packages/dealii/doxygen/deal.II/structDataPostprocessorInputs_1_1Vector.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/structDataPostprocessorInputs_1_1Vector.html 2024-01-30 03:04:56.444914675 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/structDataPostprocessorInputs_1_1Vector.html 2024-01-30 03:04:56.444914675 +0000 @@ -144,7 +144,7 @@

        Detailed Description

        template<int spacedim>
        struct DataPostprocessorInputs::Vector< spacedim >

        A structure that is used to pass information to DataPostprocessor::evaluate_vector_field(). It contains the values and (if requested) derivatives of a vector-valued solution variable at the evaluation points on a cell or face.

        -

        This class is also used if the solution vector is complex-valued (whether it is scalar- or vector-valued is immaterial in that case) since in that case, the DataOut and related classes take apart the real and imaginary parts of a solution vector. In practice, that means that if a solution vector has $N$ vector components (i.e., there are $N$ functions that form the solution of the PDE you are dealing with; $N$ is not the size of the solution vector), then if the solution is real-valued the solution_values variable below will be an array with as many entries as there are evaluation points on a cell, and each entry is a vector of length $N$ representing the $N$ solution functions evaluated at a point. On the other hand, if the solution is complex-valued (i.e., the vector passed to DataOut::build_patches() has complex-valued entries), then the solution_values member variable of this class will have $2N$ entries for each evaluation point. The first $N$ of these entries represent the real parts of the solution, and the second $N$ entries correspond to the imaginary parts of the solution evaluated at the evaluation point. The same layout is used for the solution_gradients and solution_hessians fields: First the gradients/Hessians of the real components, then all the gradients/Hessians of the imaginary components. There is more information about the subject in the documentation of the DataPostprocessor class itself. step-58 provides an example of how this class is used in a complex-valued situation.

        +

        This class is also used if the solution vector is complex-valued (whether it is scalar- or vector-valued is immaterial in that case) since in that case, the DataOut and related classes take apart the real and imaginary parts of a solution vector. In practice, that means that if a solution vector has $N$ vector components (i.e., there are $N$ functions that form the solution of the PDE you are dealing with; $N$ is not the size of the solution vector), then if the solution is real-valued the solution_values variable below will be an array with as many entries as there are evaluation points on a cell, and each entry is a vector of length $N$ representing the $N$ solution functions evaluated at a point. On the other hand, if the solution is complex-valued (i.e., the vector passed to DataOut::build_patches() has complex-valued entries), then the solution_values member variable of this class will have $2N$ entries for each evaluation point. The first $N$ of these entries represent the real parts of the solution, and the second $N$ entries correspond to the imaginary parts of the solution evaluated at the evaluation point. The same layout is used for the solution_gradients and solution_hessians fields: First the gradients/Hessians of the real components, then all the gradients/Hessians of the imaginary components. There is more information about the subject in the documentation of the DataPostprocessor class itself. step-58 provides an example of how this class is used in a complex-valued situation.

        Through the fields in the CommonInputs base class, this class also makes available access to the locations of evaluations points, normal vectors (if appropriate), and which cell data is currently being evaluated on (also if appropriate).

        Definition at line 401 of file data_postprocessor.h.

        /usr/share/doc/packages/dealii/doxygen/deal.II/structGeometryInfo.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/structGeometryInfo.html 2024-01-30 03:04:56.552915574 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/structGeometryInfo.html 2024-01-30 03:04:56.556915608 +0000 @@ -1512,7 +1512,7 @@
        &#href_anchor"center">
        dofs $K_c$ $\leftrightarrow$ dofs $K_c$   (stored in stiffness_matrix_cc)
        dofs $K_c$ $\leftrightarrow$ dofs $K_c$   (stored in stiffness_matrix_cc)
        dofs $K_c$ $\leftrightarrow$ dofs $K_{n_k}$   (stored in stiffness_matrix_cn and stiffness_matrix_nc)
        dofs $K_c$ $\leftrightarrow$ dofs $K_{n_k}$   (stored in stiffness_matrix_cn and stiffness_matrix_nc)
        dofs $K_{n_k}$ $\leftrightarrow$ dofs $K_{n_k}$   (stored in stiffness_matrix_nn)
        -

        Compute the value of the $i$-th $d$-linear (i.e. (bi-,tri-)linear) shape function at location $\xi$.

        +

        Compute the value of the $i$-th $d$-linear (i.e. (bi-,tri-)linear) shape function at location $\xi$.

        @@ -1540,7 +1540,7 @@
        -

        Compute the gradient of the $i$-th $d$-linear (i.e. (bi-,tri-)linear) shape function at location $\xi$.

        +

        Compute the gradient of the $i$-th $d$-linear (i.e. (bi-,tri-)linear) shape function at location $\xi$.

        @@ -1571,13 +1571,13 @@

        For a (bi-, tri-)linear mapping from the reference cell, face, or edge to the object specified by the given vertices, compute the alternating form of the transformed unit vectors vertices. For an object of dimensionality dim, there are dim vectors with spacedim components each, and the alternating form is a tensor of rank spacedim-dim that corresponds to the wedge product of the dim unit vectors, and it corresponds to the volume and normal vectors of the mapping from reference element to the element described by the vertices.

        -

        For example, if dim==spacedim==2, then the alternating form is a scalar (because spacedim-dim=0) and its value equals $\mathbf v_1\wedge \mathbf
-v_2=\mathbf v_1^\perp \cdot\mathbf v_2$, where $\mathbf v_1^\perp$ is a vector that is rotated to the right by 90 degrees from $\mathbf v_1$. If dim==spacedim==3, then the result is again a scalar with value $\mathbf
+<p>For example, if dim==spacedim==2, then the alternating form is a scalar (because spacedim-dim=0) and its value equals <picture><source srcset=$\mathbf v_1\wedge \mathbf
+v_2=\mathbf v_1^\perp \cdot\mathbf v_2$, where $\mathbf v_1^\perp$ is a vector that is rotated to the right by 90 degrees from $\mathbf v_1$. If dim==spacedim==3, then the result is again a scalar with value $\mathbf
 v_1\wedge \mathbf v_2 \wedge \mathbf v_3 = (\mathbf v_1\times \mathbf
-v_2)\cdot \mathbf v_3$, where $\mathbf v_1, \mathbf v_2, \mathbf v_3$ are the images of the unit vectors at a vertex of the unit dim-dimensional cell under transformation to the dim-dimensional cell in spacedim- dimensional space. In both cases, i.e. for dim==2 or 3, the result happens to equal the determinant of the Jacobian of the mapping from reference cell to cell in real space. Note that it is the actual determinant, not its absolute value as often used in transforming integrals from one coordinate system to another. In particular, if the object specified by the vertices is a parallelogram (i.e. a linear transformation of the reference cell) then the computed values are the same at all vertices and equal the (signed) area of the cell; similarly, for parallel-epipeds, it is the volume of the cell.

        -

        Likewise, if we have dim==spacedim-1 (e.g. we have a quad in 3d space, or a line in 2d), then the alternating product denotes the normal vector (i.e. a rank-1 tensor, since spacedim-dim=1) to the object at each vertex, where the normal vector's magnitude denotes the area element of the transformation from the reference object to the object given by the vertices. In particular, if again the mapping from reference object to the object under consideration here is linear (not bi- or trilinear), then the returned vectors are all parallel, perpendicular to the mapped object described by the vertices, and have a magnitude equal to the area/volume of the mapped object. If dim=1, spacedim=2, then the returned value is $\mathbf v_1^\perp$, where $\mathbf v_1$ is the image of the sole unit vector of a line mapped to the line in 2d given by the vertices; if dim=2, spacedim=3, then the returned values are $\mathbf v_1
-\wedge \mathbf v_2=\mathbf v_1 \times \mathbf v_2$ where $\mathbf
-v_1,\mathbf v_2$ are the two three-dimensional vectors that are tangential to the quad mapped into three-dimensional space.

        +v_2)\cdot \mathbf v_3$" src="form_555.png"/>, where $\mathbf v_1, \mathbf v_2, \mathbf v_3$ are the images of the unit vectors at a vertex of the unit dim-dimensional cell under transformation to the dim-dimensional cell in spacedim- dimensional space. In both cases, i.e. for dim==2 or 3, the result happens to equal the determinant of the Jacobian of the mapping from reference cell to cell in real space. Note that it is the actual determinant, not its absolute value as often used in transforming integrals from one coordinate system to another. In particular, if the object specified by the vertices is a parallelogram (i.e. a linear transformation of the reference cell) then the computed values are the same at all vertices and equal the (signed) area of the cell; similarly, for parallel-epipeds, it is the volume of the cell.

        +

        Likewise, if we have dim==spacedim-1 (e.g. we have a quad in 3d space, or a line in 2d), then the alternating product denotes the normal vector (i.e. a rank-1 tensor, since spacedim-dim=1) to the object at each vertex, where the normal vector's magnitude denotes the area element of the transformation from the reference object to the object given by the vertices. In particular, if again the mapping from reference object to the object under consideration here is linear (not bi- or trilinear), then the returned vectors are all parallel, perpendicular to the mapped object described by the vertices, and have a magnitude equal to the area/volume of the mapped object. If dim=1, spacedim=2, then the returned value is $\mathbf v_1^\perp$, where $\mathbf v_1$ is the image of the sole unit vector of a line mapped to the line in 2d given by the vertices; if dim=2, spacedim=3, then the returned values are $\mathbf v_1
+\wedge \mathbf v_2=\mathbf v_1 \times \mathbf v_2$ where $\mathbf
+v_1,\mathbf v_2$ are the two three-dimensional vectors that are tangential to the quad mapped into three-dimensional space.

        This function is used in order to determine how distorted a cell is (see the entry on distorted cells in the glossary).

        /usr/share/doc/packages/dealii/doxygen/deal.II/structNonMatching_1_1AdditionalQGeneratorData.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/structNonMatching_1_1AdditionalQGeneratorData.html 2024-01-30 03:04:56.576915774 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/structNonMatching_1_1AdditionalQGeneratorData.html 2024-01-30 03:04:56.576915774 +0000 @@ -203,11 +203,11 @@
        -

        For a level set function, $\psi$, the implicit function theorem states that it is possible to write one of the coordinates $x_i$ as a function of the others if

        -

        $|\frac{\partial \psi}{\partial x_i}| > 0$.

        +

        For a level set function, $\psi$, the implicit function theorem states that it is possible to write one of the coordinates $x_i$ as a function of the others if

        +

        $|\frac{\partial \psi}{\partial x_i}| > 0$.

        In practice, the bound we have for the expression in the left-hand side may be near but not equal to zero due to roundoff errors.

        This constant is a safety margin, $C$, that states that the implicit function theorem can be used when

        -

        $|\frac{\partial \psi}{\partial x_i}| > C$

        +

        $|\frac{\partial \psi}{\partial x_i}| > C$

        Thus this constant must be non-negative.

        Definition at line 96 of file quadrature_generator.h.

        @@ -242,8 +242,8 @@
        -

        A constant, $C$, controlling when a level set function, $\psi$, is considered positive or negative definite:

        -

        $\psi(x) >  C \Rightarrow \text{Positive definite}$, $\psi(x) < -C \Rightarrow \text{Negative definite}$.

        +

        A constant, $C$, controlling when a level set function, $\psi$, is considered positive or negative definite:

        +

        $\psi(x) >  C \Rightarrow \text{Positive definite}$, $\psi(x) < -C \Rightarrow \text{Negative definite}$.

        Definition at line 111 of file quadrature_generator.h.

        @@ -294,7 +294,7 @@
        -

        This determines how a box is split when this is necessary. If true, the box is split in two, if set to false the box is split into its $2^{dim}$ children.

        +

        This determines how a box is split when this is necessary. If true, the box is split in two, if set to false the box is split into its $2^{dim}$ children.

        Definition at line 129 of file quadrature_generator.h.

        /usr/share/doc/packages/dealii/doxygen/deal.II/structNonMatching_1_1RegionUpdateFlags.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/structNonMatching_1_1RegionUpdateFlags.html 2024-01-30 03:04:56.592915908 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/structNonMatching_1_1RegionUpdateFlags.html 2024-01-30 03:04:56.592915908 +0000 @@ -116,14 +116,14 @@  

        Detailed Description

        -

        Struct storing UpdateFlags for the 3 regions of a cell, $K$, that is defined by the sign of a level set function, $\psi$:

        -\[
+<div class=

        Struct storing UpdateFlags for the 3 regions of a cell, $K$, that is defined by the sign of a level set function, $\psi$:

        +\[
 N = \{x \in K : \psi(x) < 0 \}, \\
 P = \{x \in K : \psi(x) > 0 \}, \\
 S = \{x \in K : \psi(x) = 0 \}.
-\] +\]" src="form_2026.png"/>

        -

        As in the QuadratureGenerator class, we refer to $N$, $P$ and $S$ as the inside, outside, and surface region. RegionUpdateFlags is used to describe how the FEValues objects, which are created by NonMatching::FEValues, should be updated.

        +

        As in the QuadratureGenerator class, we refer to $N$, $P$ and $S$ as the inside, outside, and surface region. RegionUpdateFlags is used to describe how the FEValues objects, which are created by NonMatching::FEValues, should be updated.

        Definition at line 58 of file fe_values.h.

        Constructor & Destructor Documentation

        @@ -159,7 +159,7 @@
        -

        Flags for the region $\{x \in K : \psi(x) < 0 \}$

        +

        Flags for the region $\{x \in K : \psi(x) < 0 \}$

        Definition at line 68 of file fe_values.h.

        @@ -176,7 +176,7 @@
        -

        Flags for the region $\{x \in K : \psi(x) > 0 \}$

        +

        Flags for the region $\{x \in K : \psi(x) > 0 \}$

        Definition at line 73 of file fe_values.h.

        @@ -193,7 +193,7 @@
        -

        Flags for the region $\{x \in K : \psi(x) = 0 \}$

        +

        Flags for the region $\{x \in K : \psi(x) = 0 \}$

        Definition at line 78 of file fe_values.h.

        /usr/share/doc/packages/dealii/doxygen/deal.II/structNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1HeightDirectionData.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/structNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1HeightDirectionData.html 2024-01-30 03:04:56.604916008 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/structNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1HeightDirectionData.html 2024-01-30 03:04:56.604916008 +0000 @@ -116,10 +116,10 @@

        Detailed Description

        Data representing the best choice of height-function direction, which is returned by the function find_best_height_direction.

        This data consists of a coordinate direction

        -

        $i \in \{0, ..., dim - 1 \}$,

        +

        $i \in \{0, ..., dim - 1 \}$,

        and lower bound on the absolute value of the derivative of some associated function, f, taken in the above coordinate direction. That is, a bound $C$ such that

        -

        $|\frac{\partial f}{\partial x_i}| > C$,

        -

        holding over some subset of $\mathbb{R}^{dim}$.

        +

        $|\frac{\partial f}{\partial x_i}| > C$,

        +

        holding over some subset of $\mathbb{R}^{dim}$.

        Definition at line 931 of file quadrature_generator.h.

        Constructor & Destructor Documentation

        @@ -172,7 +172,7 @@
        -

        The lower bound on $|\frac{\partial f}{\partial x_i}|$, described above.

        +

        The lower bound on $|\frac{\partial f}{\partial x_i}|$, described above.

        Definition at line 949 of file quadrature_generator.h.

        /usr/share/doc/packages/dealii/doxygen/deal.II/structProductType.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/structProductType.html 2024-01-30 03:04:56.624916174 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/structProductType.html 2024-01-30 03:04:56.624916174 +0000 @@ -132,10 +132,10 @@
        auto product = t*u;

        The local alias of this structure represents the type the variable product would have.

        Where is this useful

        -

        The purpose of this class is principally to represent the type one needs to use to represent the values or gradients of finite element fields at quadrature points. For example, assume you are storing the values $U_j$ of unknowns in a Vector<float>, then evaluating $u_h(x_q) = \sum_j U_j
-\varphi_j(x_q)$ at quadrature points results in values $u_h(x_q)$ that need to be stored as double variables because the $U_j$ are float values and the $\varphi_j(x_q)$ are computed as double values, and the product are then double values. On the other hand, if you store your unknowns $U_j$ as std::complex<double> values and you try to evaluate $\nabla u_h(x_q) = \sum_j U_j \nabla\varphi_j(x_q)$ at quadrature points, then the gradients $\nabla u_h(x_q)$ need to be stored as objects of type Tensor<1,dim,std::complex<double>> because that's what you get when you multiply a complex number by a Tensor<1,dim> (the type used to represent the gradient of shape functions of scalar finite elements).

        -

        Likewise, if you are using a vector valued element (with dim components) and the $U_j$ are stored as double variables, then $u_h(x_q) = \sum_j
-U_j \varphi_j(x_q)$ needs to have type Tensor<1,dim> (because the shape functions have type Tensor<1,dim>). Finally, if you store the $U_j$ as objects of type std::complex<double> and you have a vector valued element, then the gradients $\nabla u_h(x_q) = \sum_j U_j \nabla\varphi_j(x_q)$ will result in objects of type Tensor<2,dim,std::complex<double> >.

        +

        The purpose of this class is principally to represent the type one needs to use to represent the values or gradients of finite element fields at quadrature points. For example, assume you are storing the values $U_j$ of unknowns in a Vector<float>, then evaluating $u_h(x_q) = \sum_j U_j
+\varphi_j(x_q)$ at quadrature points results in values $u_h(x_q)$ that need to be stored as double variables because the $U_j$ are float values and the $\varphi_j(x_q)$ are computed as double values, and the product are then double values. On the other hand, if you store your unknowns $U_j$ as std::complex<double> values and you try to evaluate $\nabla u_h(x_q) = \sum_j U_j \nabla\varphi_j(x_q)$ at quadrature points, then the gradients $\nabla u_h(x_q)$ need to be stored as objects of type Tensor<1,dim,std::complex<double>> because that's what you get when you multiply a complex number by a Tensor<1,dim> (the type used to represent the gradient of shape functions of scalar finite elements).

        +

        Likewise, if you are using a vector valued element (with dim components) and the $U_j$ are stored as double variables, then $u_h(x_q) = \sum_j
+U_j \varphi_j(x_q)$ needs to have type Tensor<1,dim> (because the shape functions have type Tensor<1,dim>). Finally, if you store the $U_j$ as objects of type std::complex<double> and you have a vector valued element, then the gradients $\nabla u_h(x_q) = \sum_j U_j \nabla\varphi_j(x_q)$ will result in objects of type Tensor<2,dim,std::complex<double> >.

        In all of these cases, this type is used to identify which type needs to be used for the result of computing the product of unknowns and the values, gradients, or other properties of shape functions.

        Definition at line 481 of file template_constraints.h.

        /usr/share/doc/packages/dealii/doxygen/deal.II/structSUNDIALS_1_1SundialsPreconditioner.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/structSUNDIALS_1_1SundialsPreconditioner.html 2024-01-30 03:04:56.640916307 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/structSUNDIALS_1_1SundialsPreconditioner.html 2024-01-30 03:04:56.640916307 +0000 @@ -186,7 +186,7 @@ const VectorType & src&#href_anchor"memdoc"> -

        Apply the wrapped preconditioner, i.e., solve $Px=b$ where $x$ is the dst vector and $b$ the src vector.

        +

        Apply the wrapped preconditioner, i.e., solve $Px=b$ where $x$ is the dst vector and $b$ the src vector.

        Parameters
        /usr/share/doc/packages/dealii/doxygen/deal.II/structSynchronousIterators.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/structSynchronousIterators.html 2024-01-30 03:04:56.660916474 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/structSynchronousIterators.html 2024-01-30 03:04:56.660916474 +0000 @@ -326,7 +326,7 @@
        dstResult vector of the preconditioner application
        -

        Advance a tuple of iterators by $n$.

        +

        Advance a tuple of iterators by $n$.

        Definition at line 148 of file synchronous_iterator.h.

        @@ -356,7 +356,7 @@
        -

        Advance a tuple of iterators by $n$.

        +

        Advance a tuple of iterators by $n$.

        Definition at line 161 of file synchronous_iterator.h.

        @@ -386,7 +386,7 @@
        -

        Advance a tuple of iterators by $n$.

        +

        Advance a tuple of iterators by $n$.

        Definition at line 175 of file synchronous_iterator.h.

        @@ -506,7 +506,7 @@
        -

        Advance the elements of this iterator by $n$.

        +

        Advance the elements of this iterator by $n$.

        Definition at line 236 of file synchronous_iterator.h.

        /usr/share/doc/packages/dealii/doxygen/deal.II/structinternal_1_1FEEvaluationImplSelector.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/structinternal_1_1FEEvaluationImplSelector.html 2024-01-30 03:04:56.680916641 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/structinternal_1_1FEEvaluationImplSelector.html 2024-01-30 03:04:56.680916641 +0000 @@ -116,8 +116,8 @@ static void evaluate_or_integrate (const unsigned int n_components, const EvaluationFlags::EvaluationFlags evaluation_flag, OtherNumber *values_dofs, FEEvaluationData< dim, Number, false > &fe_eval, const bool sum_into_values_array) &#href_anchor"details" id="details">

        Detailed Description

        template<int dim, typename Number, bool do_integrate>
        -struct internal::FEEvaluationImplSelector< dim, Number, do_integrate >

        This class chooses an appropriate evaluation/integration strategy based on the template parameters and the shape_info variable which contains runtime parameters for the strategy underlying FEEvaluation::evaluate(), i.e. this calls internal::FEEvaluationImpl::evaluate(), internal::FEEvaluationImplCollocation::evaluate() or internal::FEEvaluationImplTransformToCollocation::evaluate() with appropriate template parameters. In case the template parameters fe_degree and n_q_points_1d contain valid information (i.e. fe_degree>-1 and n_q_points_1d>0), we simply pass these values to the respective template specializations. Otherwise, we perform a runtime matching of the runtime parameters to find the correct specialization. This matching currently supports $0\leq fe\_degree \leq 9$ and $degree+1\leq
-n\_q\_points\_1d\leq fe\_degree+2$.

        +struct internal::FEEvaluationImplSelector< dim, Number, do_integrate >

        This class chooses an appropriate evaluation/integration strategy based on the template parameters and the shape_info variable which contains runtime parameters for the strategy underlying FEEvaluation::evaluate(), i.e. this calls internal::FEEvaluationImpl::evaluate(), internal::FEEvaluationImplCollocation::evaluate() or internal::FEEvaluationImplTransformToCollocation::evaluate() with appropriate template parameters. In case the template parameters fe_degree and n_q_points_1d contain valid information (i.e. fe_degree>-1 and n_q_points_1d>0), we simply pass these values to the respective template specializations. Otherwise, we perform a runtime matching of the runtime parameters to find the correct specialization. This matching currently supports $0\leq fe\_degree \leq 9$ and $degree+1\leq
+n\_q\_points\_1d\leq fe\_degree+2$.

        Definition at line 2274 of file evaluation_kernels.h.

        Member Function Documentation

        /usr/share/doc/packages/dealii/doxygen/deal.II/structinternal_1_1MatrixFreeFunctions_1_1MappingInfoStorage.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/structinternal_1_1MatrixFreeFunctions_1_1MappingInfoStorage.html 2024-01-30 03:04:56.704916841 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/structinternal_1_1MatrixFreeFunctions_1_1MappingInfoStorage.html 2024-01-30 03:04:56.704916841 +0000 @@ -416,7 +416,7 @@
        -

        The storage of the gradients of the inverse Jacobian transformation. Because of symmetry, only the upper diagonal and diagonal part are needed. The first index runs through the derivatives, starting with the diagonal and then continuing row-wise, i.e., $\partial^2/\partial x_1 \partial x_2$ first, then $\partial^2/\partial x_1 \partial x_3$, and so on. The second index is the spatial coordinate.

        +

        The storage of the gradients of the inverse Jacobian transformation. Because of symmetry, only the upper diagonal and diagonal part are needed. The first index runs through the derivatives, starting with the diagonal and then continuing row-wise, i.e., $\partial^2/\partial x_1 \partial x_2$ first, then $\partial^2/\partial x_1 \partial x_3$, and so on. The second index is the spatial coordinate.

        Indexed by data_index_offsets.

        Contains two fields for access from both sides for interior faces, but the default case (cell integrals or boundary integrals) only fills the zeroth component and ignores the first one.

        @@ -437,8 +437,8 @@
        -

        The storage of the gradients of the Jacobian transformation. Because of symmetry, only the upper diagonal and diagonal part are needed. The first index runs through the derivatives, starting with the diagonal and then continuing row-wise, i.e., $\partial^2/\partial x_1 \partial
-x_2$ first, then $\partial^2/\partial x_1 \partial x_3$, and so on. The second index is the spatial coordinate.

        +

        The storage of the gradients of the Jacobian transformation. Because of symmetry, only the upper diagonal and diagonal part are needed. The first index runs through the derivatives, starting with the diagonal and then continuing row-wise, i.e., $\partial^2/\partial x_1 \partial
+x_2$ first, then $\partial^2/\partial x_1 \partial x_3$, and so on. The second index is the spatial coordinate.

        Indexed by data_index_offsets.

        Contains two fields for access from both sides for interior faces, but the default case (cell integrals or boundary integrals) only fills the zeroth component and ignores the first one.

        /usr/share/doc/packages/dealii/doxygen/deal.II/structinternal_1_1MatrixFreeFunctions_1_1UnivariateShapeData.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/structinternal_1_1MatrixFreeFunctions_1_1UnivariateShapeData.html 2024-01-30 03:04:56.728917041 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/structinternal_1_1MatrixFreeFunctions_1_1UnivariateShapeData.html 2024-01-30 03:04:56.728917041 +0000 @@ -170,7 +170,7 @@

        Detailed Description

        template<typename Number>
        -struct internal::MatrixFreeFunctions::UnivariateShapeData< Number >

        This struct stores the shape functions, their gradients and Hessians evaluated for a one-dimensional section of a tensor product finite element and tensor product quadrature formula in reference coordinates. This data structure also includes the evaluation of quantities at the cell boundary and on the sub-interval $(0, 0.5)$ and $(0.5, 1)$ for face integrals.

        +struct internal::MatrixFreeFunctions::UnivariateShapeData< Number >

        This struct stores the shape functions, their gradients and Hessians evaluated for a one-dimensional section of a tensor product finite element and tensor product quadrature formula in reference coordinates. This data structure also includes the evaluation of quantities at the cell boundary and on the sub-interval $(0, 0.5)$ and $(0.5, 1)$ for face integrals.

        Definition at line 134 of file shape_info.h.

        Constructor & Destructor Documentation

        /usr/share/doc/packages/dealii/doxygen/deal.II/symmetric__tensor_8h.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/symmetric__tensor_8h.html 2024-01-30 03:04:56.768917374 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/symmetric__tensor_8h.html 2024-01-30 03:04:56.768917374 +0000 @@ -255,7 +255,7 @@

        This method potentially offers the quickest computation if the pathological case is not encountered.

        ql_implicit_shifts 

        The iterative QL algorithm with implicit shifts applied after tridiagonalization of the tensor using the householder method.

        -

        This method offers a compromise between speed of computation and its robustness. This method is particularly useful when the elements of $T$ have greatly varying magnitudes, which would typically lead to a loss of accuracy when computing the smaller eigenvalues.

        +

        This method offers a compromise between speed of computation and its robustness. This method is particularly useful when the elements of $T$ have greatly varying magnitudes, which would typically lead to a loss of accuracy when computing the smaller eigenvalues.

        jacobi 

        The iterative Jacobi algorithm.

        This method offers is the most robust of the available options, with reliable results obtained for even the most pathological cases. It is, however, the slowest algorithm of all of those implemented.

        @@ -901,7 +901,7 @@ \left[ (\text{tr} \mathbf A)^2 - \text{tr} (\mathbf{A}^2) \right]$" src="form_809.png"/>.

        For the kind of arguments to this function, i.e., a symmetric rank-2 tensor of size 2, the result is (counting indices starting at one) $I_2(\mathbf A) = II(\mathbf A) = \frac 12
   \left[ (A_{11} + A_{22})^2 - (A_{11}^2+2 A_{12}^2+ A_{22}^2) \right]
-  = A_{11} A_{22} - A_{12}^2$. As expected, for the $2\times 2$ symmetric tensors this function handles, this equals the determinant of the tensor. (This is so because for $2\times 2$ symmetric tensors, there really are only two invariants, so the second and third invariant are the same; the determinant is the third invariant.)

        + = A_{11} A_{22} - A_{12}^2$" src="form_810.png"/>. As expected, for the $2\times 2$ symmetric tensors this function handles, this equals the determinant of the tensor. (This is so because for $2\times 2$ symmetric tensors, there really are only two invariants, so the second and third invariant are the same; the determinant is the third invariant.)

        Definition at line 2917 of file symmetric_tensor.h.

        @@ -974,8 +974,8 @@
        -

        Return the eigenvalues of a symmetric $2\times 2$ tensor. The array of eigenvalues is sorted in descending order.

        -

        For $2\times 2$ tensors, the eigenvalues of tensor $\mathbf T$ are the roots of the characteristic polynomial $0 = \lambda^2
+<p>Return the eigenvalues of a symmetric <picture><source srcset=$2\times 2$ tensor. The array of eigenvalues is sorted in descending order.

        +

        For $2\times 2$ tensors, the eigenvalues of tensor $\mathbf T$ are the roots of the characteristic polynomial $0 = \lambda^2
 - \lambda\;\text{tr}\mathbf{T} + \det \mathbf{T}$ as given by $\lambda_1, \lambda_2 = \frac{1}{2} \left[ \text{tr} \mathbf{T} \pm
 \sqrt{(\text{tr} \mathbf{T})^2 - 4 \det \mathbf{T}} \right]$.

        Warning
        The algorithm employed here determines the eigenvalues by computing the roots of the characteristic polynomial. In the case that there exists a common root (the eigenvalues are equal), the computation is subject to round-off errors of order $\sqrt{\epsilon}$. As an alternative, the eigenvectors() function provides a more robust, but costly, method to compute the eigenvalues of a symmetric tensor.
        /usr/share/doc/packages/dealii/doxygen/deal.II/synchronous__iterator_8h.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/synchronous__iterator_8h.html 2024-01-30 03:04:56.788917541 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/synchronous__iterator_8h.html 2024-01-30 03:04:56.788917541 +0000 @@ -220,7 +220,7 @@
        -

        Advance a tuple of iterators by $n$.

        +

        Advance a tuple of iterators by $n$.

        Definition at line 148 of file synchronous_iterator.h.

        @@ -250,7 +250,7 @@
        -

        Advance a tuple of iterators by $n$.

        +

        Advance a tuple of iterators by $n$.

        Definition at line 161 of file synchronous_iterator.h.

        @@ -280,7 +280,7 @@
        -

        Advance a tuple of iterators by $n$.

        +

        Advance a tuple of iterators by $n$.

        Definition at line 175 of file synchronous_iterator.h.

        @@ -400,7 +400,7 @@
        -

        Advance the elements of this iterator by $n$.

        +

        Advance the elements of this iterator by $n$.

        Definition at line 236 of file synchronous_iterator.h.

        /usr/share/doc/packages/dealii/doxygen/deal.II/tensor_8h.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/tensor_8h.html 2024-01-30 03:04:56.816917774 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/tensor_8h.html 2024-01-30 03:04:56.820917807 +0000 @@ -633,11 +633,11 @@

        Entrywise multiplication of two tensor objects of general rank.

        This multiplication is also called "Hadamard-product" (c.f. https://en.wikipedia.org/wiki/Hadamard_product_(matrices)), and generates a new tensor of size <rank, dim>:

        -\[
+<picture><source srcset=\[
   \text{result}_{i, j}
   = \text{left}_{i, j}\circ
     \text{right}_{i, j}
-\] +\]" src="form_857.png"/>

        Template Parameters
        @@ -674,17 +674,17 @@
        -

        The dot product (single contraction) for tensors. This function return a tensor of rank $(\text{rank}_1 + \text{rank}_2 - 2)$ that is the contraction of the last index of a tensor src1 of rank rank_1 with the first index of a tensor src2 of rank rank_2:

        -\[
+<p>The dot product (single contraction) for tensors. This function return a tensor of rank <picture><source srcset=$(\text{rank}_1 + \text{rank}_2 - 2)$ that is the contraction of the last index of a tensor src1 of rank rank_1 with the first index of a tensor src2 of rank rank_2:

        +\[
   \text{result}_{i_1,\ldots,i_{r1},j_1,\ldots,j_{r2}}
   = \sum_{k}
     \text{left}_{i_1,\ldots,i_{r1}, k}
     \text{right}_{k, j_1,\ldots,j_{r2}}
-\] +\]" src="form_827.png"/>

        Note
        For the Tensor class, the multiplication operator only performs a contraction over a single pair of indices. This is in contrast to the multiplication operator for SymmetricTensor, for which the corresponding operator*() performs a double contraction. The origin of the difference in how operator*() is implemented between Tensor and SymmetricTensor is that for the former, the product between two Tensor objects of same rank and dimension results in another Tensor object – that it, operator*() corresponds to the multiplicative group action within the group of tensors. On the other hand, there is no corresponding multiplicative group action with the set of symmetric tensors because, in general, the product of two symmetric tensors is a nonsymmetric tensor. As a consequence, for a mathematician, it is clear that operator*() for symmetric tensors must have a different meaning: namely the dot or scalar product that maps two symmetric tensors of rank 2 to a scalar. This corresponds to the double-dot (colon) operator whose meaning is then extended to the product of any two even-ranked symmetric tensors.
        -In case the contraction yields a tensor of rank 0, that is, if rank_1==rank_2==1, then a scalar number is returned as an unwrapped number type. Return the $l_1$ norm of the given rank-2 tensor, where $\|\mathbf T\|_1 = \max_j \sum_i |T_{ij}|$ (maximum of the sums over columns).
        +In case the contraction yields a tensor of rank 0, that is, if rank_1==rank_2==1, then a scalar number is returned as an unwrapped number type. Return the $l_1$ norm of the given rank-2 tensor, where $\|\mathbf T\|_1 = \max_j \sum_i |T_{ij}|$ (maximum of the sums over columns).

        Definition at line 3035 of file tensor.h.

        @@ -714,7 +714,7 @@
        -

        Return the $l_\infty$ norm of the given rank-2 tensor, where $\|\mathbf T\|_\infty = \max_i \sum_j |T_{ij}|$ (maximum of the sums over rows).

        +

        Return the $l_\infty$ norm of the given rank-2 tensor, where $\|\mathbf T\|_\infty = \max_i \sum_j |T_{ij}|$ (maximum of the sums over rows).

        Definition at line 3061 of file tensor.h.

        /usr/share/doc/packages/dealii/doxygen/deal.II/todo.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/todo.html 2024-01-30 03:04:56.836917940 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/todo.html 2024-01-30 03:04:56.836917940 +0000 @@ -106,20 +106,20 @@
        Class Differentiation::AD::HelperBase< ADNumberTypeCode, ScalarType >
        Make this class thread safe for Sacado number and ADOL-C tapeless numbers (if supported).
        Member DoFTools::ExcFiniteElementsDontMatch ()
        -
        Write description
        +
        Write description
        Member DoFTools::ExcGridNotCoarser ()
        -
        Write description
        +
        Write description
        Member DoFTools::ExcGridsDontMatch ()
        -
        Write description
        +
        Write description
        Member DoFTools::ExcInvalidBoundaryIndicator ()
        -
        Write description
        +
        Write description
        Member DoFTools::map_support_points_to_dofs (const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof_handler, std::map< Point< spacedim >, types::global_dof_index, Comp > &point_to_index_map)
        -
        This function should generate a multimap, rather than just a map, since several dofs may be located at the same support point. Currently, only the last value in the map returned by map_dofs_to_support_points() for each point will be returned.
        +
        This function should generate a multimap, rather than just a map, since several dofs may be located at the same support point. Currently, only the last value in the map returned by map_dofs_to_support_points() for each point will be returned.
        Class FE_ABF< dim >
        -
        Even if this element is implemented for two and three space dimensions, the definition of the node values relies on consistently oriented faces in 3d. Therefore, care should be taken on complicated meshes.
        +
        Even if this element is implemented for two and three space dimensions, the definition of the node values relies on consistently oriented faces in 3d. Therefore, care should be taken on complicated meshes.
        Class FE_BDM< dim >
        -

        Restriction matrices are missing.

        -

        The 3d version exhibits some numerical instabilities, in particular for higher order

        +

        The 3d version exhibits some numerical instabilities, in particular for higher order

        +

        Restriction matrices are missing.

        Class FE_Nedelec< dim >
        Even if this element is implemented for two and three space dimensions, the definition of the node values relies on consistently oriented faces in 3d. Therefore, care should be taken on complicated meshes.
        @@ -128,9 +128,9 @@
        Class FE_RT_Bubbles< dim >
        Implement restriction matrices
        Member FESubfaceValues< dim, spacedim >::ExcFaceHasNoSubfaces ()
        -
        Document this
        +
        Document this
        Member FESubfaceValues< dim, spacedim >::ExcReinitCalledWithBoundaryFace ()
        -
        Document this
        +
        Document this
        Member GinkgoWrappers::SolverBase< ValueType, IndexType >::system_matrix
        Templatize based on Matrix type.
        Member internal::TriangulationImplementation::TriaObjects::next_free_pair_object (const Triangulation< dim, spacedim > &tria)
        @@ -142,25 +142,25 @@
        Member LocalIntegrators::Divergence::gradient_residual (Vector< number > &result, const FEValuesBase< dim > &fetest, const std::vector< double > &input, const double factor=1.)
        Verify: The function gradient_matrix() is the Frechet derivative of this function with respect to the test functions.
        Class Mapping< dim, spacedim >
        -
        Document what happens in the codimension-1 case.
        +
        Document what happens in the codimension-1 case.
        Member Mapping< dim, spacedim >::transform (const ArrayView< const Tensor< 2, dim > > &input, const MappingKind kind, const typename Mapping< dim, spacedim >::InternalDataBase &internal, const ArrayView< Tensor< 2, spacedim > > &output) const =0
        -
        The formulas for mapping_covariant_gradient, mapping_contravariant_gradient and mapping_piola_gradient are only true as stated for linear mappings. If, for example, the mapping is bilinear (or has a higher order polynomial degree) then there is a missing term associated with the derivative of $J$.
        +
        The formulas for mapping_covariant_gradient, mapping_contravariant_gradient and mapping_piola_gradient are only true as stated for linear mappings. If, for example, the mapping is bilinear (or has a higher order polynomial degree) then there is a missing term associated with the derivative of $J$.
        Class MatrixBlock< MatrixType >
        Example for the product preconditioner of the pressure Schur complement.
        +
        Member MatrixBlock< MatrixType >::add (const std::vector< size_type > &indices, const FullMatrix< number > &full_matrix, const bool elide_zero_values=true)
        +
        elide_zero_values is currently ignored.
        Member MatrixBlock< MatrixType >::add (const std::vector< size_type > &row_indices, const std::vector< size_type > &col_indices, const FullMatrix< number > &full_matrix, const bool elide_zero_values=true)
        elide_zero_values is currently ignored.
        Member MatrixBlock< MatrixType >::add (const size_type row_index, const std::vector< size_type > &col_indices, const std::vector< number > &values, const bool elide_zero_values=true)
        elide_zero_values is currently ignored.
        -
        Member MatrixBlock< MatrixType >::add (const std::vector< size_type > &indices, const FullMatrix< number > &full_matrix, const bool elide_zero_values=true)
        -
        elide_zero_values is currently ignored.
        Member MatrixCreator::create_boundary_mass_matrix (const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const Quadrature< dim - 1 > &q, SparseMatrix< number > &matrix, const std::map< types::boundary_id, const Function< spacedim, number > * > &boundary_functions, Vector< number > &rhs_vector, std::vector< types::global_dof_index > &dof_to_boundary_mapping, const Function< spacedim, number > *const weight=0, std::vector< unsigned int > component_mapping={})
        This function does not work for finite elements with cell-dependent shape functions.
        Class MeshWorker::Assembler::MGMatrixSimple< MatrixType >
        The matrix structures needed for assembling level matrices with local refinement and continuous elements are missing.
        Class MeshWorker::Assembler::ResidualLocalBlocksToGlobalBlocks< VectorType >
        -
        Comprehensive model currently not implemented.
        +
        Comprehensive model currently not implemented.
        Class MeshWorker::DoFInfoBox< dim, DOFINFO >
        -
        Currently, we are storing an object for the cells and two for each face. We could gather all face data pertaining to the cell itself in one object, saving a bit of memory and a few operations, but sacrificing some cleanliness.
        +
        Currently, we are storing an object for the cells and two for each face. We could gather all face data pertaining to the cell itself in one object, saving a bit of memory and a few operations, but sacrificing some cleanliness.
        Class MGTransferBase< VectorType >
        update the following documentation, since it does not reflect the latest changes in structure.
        Class PathSearch
        /usr/share/doc/packages/dealii/doxygen/deal.tag differs (XML 1.0 document text) --- old//usr/share/doc/packages/dealii/doxygen/deal.tag 2023-10-24 00:00:00.000000000 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.tag 2023-10-24 00:00:00.000000000 +0000 @@ -460426,12 +460426,12 @@ changes_between_4_0_and_5_0 Changes between Version 4.0 and 5.0 changes_between_4_0_and_5_0.html + incompatible changes_between_5_0_and_5_1 Changes between Version 5.0 and 5.1 changes_between_5_0_and_5_1.html - incompatible changes_between_5_1_and_5_2 @@ -460790,6 +460790,7 @@ step_18.html Quasistaticelasticdeformation Motivationofthemodel + Timediscretization Updatingthestressvariable Parallelgraphicaloutput Atriangulationwithautomaticpartitioning @@ -460823,7 +460824,6 @@ step_19 The step-19 tutorial program step_19.html - Timediscretization Spatialdiscretization Dealingwithparticlesprogrammatically Globaldefinitions overalldiffered=1 (number of pkgs that are not bit-by-bit identical: 0 is good) overall=1