%%%%%%%%%%%%%%%%%%%%%%% file typeinst.tex %%%%%%%%%%%%%%%%%%%%%%%%%
%
% This is the LaTeX source for the instructions to authors using
% the LaTeX document class 'llncs.cls' for contributions to
% the Lecture Notes in Computer Sciences series.
% http://www.springer.com/lncs Springer Heidelberg 2006/05/04
%
% It may be used as a template for your own input - copy it
% to a new file with a new name and use it as the basis
% for your article.
%
% NB: the document class 'llncs' has its own and detailed documentation, see
% ftp://ftp.springer.de/data/pubftp/pub/tex/latex/llncs/latex2e/llncsdoc.pdf
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\documentclass[runningheads,a4paper]{llncs}
\usepackage{amssymb}
\usepackage{graphicx}
\usepackage{listings}
\usepackage{amsmath}
\usepackage{subfigure}
%\usepackage{subcaption}
% \newcommand{\@chapapp}{\relax}%
% \usepackage[titletoc,title]{appendix}
\usepackage{url}
\urldef{\mailsa}\path|{cda,emmmarou,mav}@uth.gr|
\newcommand{\keywords}[1]{\par\addvspace\baselineskip
\noindent\keywordname\enspace\ignorespaces#1}
\lstset{escapeinside={(*@}{@*)}}
\begin{document}
\mainmatter
\title{On PDE problem solving environments \\ for multidomain multiphysics problems}
\titlerunning{PDE problem solving environments for MDMP problems}
\author{Christos Antonopoulos \and Manolis Maroudas \and Manolis Vavalis}
\authorrunning{Maroudas, Antonopoulos, Vavalis}
\institute{University of Thessaly, Department of Electrical and Computer Engineering,\\
Gklavani 37, 38221 Volos, Greece\\
\mailsa\\}
\maketitle
\begin{abstract}
This paper presents the design, the prototype implementation and the preliminary evaluation of an enhanced meta-computing environment based on the FEniCS Project and focused on multi-domain multi-physics problems modeled with partial differential equations. It is based on scripting languages and their practices, and on the Service Oriented Architecture paradigm and the associated web services technologies. Our design is generic, covering a wide range of problems but our proof of concept implementation is restricted to elliptic PDEs in two or three dimensions.
\keywords{problem solving environments, numerical solution of PDEs, scientific high performance meta-computing, numerical software}
\end{abstract}
\section{Introduction}
Advances in hardware and software technologies in the 1980s led to the modern era of scientific modeling and simulation. This era seems to come to an end. The simulation needs in both industry and academia mismatch with the existing software platforms and practices, which to a great extent have remained unchanged for the past several decades. We foresee that this mismatch, together with the emerging ICT advances and the cultural changes in scientific approaches will lead to a new generation of modeling and simulation.
This paper proposes approaches for designing, analyzing, implementing and evaluating new simulation frameworks particularly suited to multi-domain and multi-physics (MDMP) problems that have Partial Differential Equations (PDEs) in their foundations. These types of problems appear frequently on real world problems. Considering also their heavy computational needs it seems reasonable to make them more accessible to the programmer while reducing their execution time using every available device/machine on a system/network.
We focus on designing a software platform that facilitates the numerical solution of PDEs associated with MDMP mathematical models. In particular, we propose an enhanced meta-computing environment which is based on: (a) scripting languages (Python) and their practices and (b) on the Service Oriented Architecture (SOA) paradigm and the associated web services technologies.
Although our design is generic, covering a wide range of problems, our proof of concept implementation is restricted to elliptic PDEs in two or three dimensions. Furthermore, we show that that our approach can easily exploit state of the art meta-computing methodologies (Schwartz splitting, hybrid stochastic deterministic methods, ...), numerical solvers (finite element modules from deal.II, interpolants, ...) and modern computer architectures. Specifically, it clearly shows that our tool can easily exploit state of the art numerical solvers including those available in FEniCS \cite{LoggMardalEtAl2012a} and deal.II \cite{BangerthHartmannKanschat2007}, domain decomposition methods with or without overlapping \cite{Gander08schwarzmethods} \cite{tsompanopoulou20081620} Monte Carlo based hybrid solvers \cite{Vavalis2013}, rectangular or curvilinear domains and interfaces and beyond.
\section{Meta-computing algorithms for MDMP problems}
Traditional linear PDE solvers follow a simple workflow. We first discretize the problem (domain and derivatives), and then solve the resulting linear algebraic problem. Unfortunately, this approach is not best suited for MDMP problems since it leads to monolithic PDE solvers that treat the MDMP problem as a coherent all that does not exploit the ``multi'' nature of the problem. For such problems, these solvers are expensive to develop, difficult to maintain and reuse. Their mapping to multi-processing machines is rather challenging.
Meta computing algorithms provide an attractive alternative. They allow us to exploit the problem characteristics and view its solution process as a workflow that involves individual, relatively simpler PDE solvers that are associated with the multi nature of the problem and can be fine tuned and easily mapped through high level parallelism.
\subsection{Hybrid, deterministic-stochastic methods} \label{MonteCarlo}
The Monte Carlo method has the capability to provide approximate solutions to a variety of mathematical problems, not necessarily with probabilistic content or structure, by performing statistical sampling experiments. About a century has been passed since the discovery of methods which based on the Monte Carlo concept provide numerical approximations to PDE problems. These methods generate random numbers and by observing certain of their characteristics and behavior are capable of calculating approximations to PDE solutions.
It has been proposed (see \cite{Vavalis2013} and references within) that the above stochastic solver can be combined with traditional PDE solvers to develop a hybrid solver that enjoy several very desirable characteristics in many respects.
Given the overall domain $\Omega$ with boundary $\vartheta\Omega$, the associated PDE and a particular domain of interest $D \subset\Omega$ with boundary $\Gamma = \vartheta D \backslash \vartheta\Omega$, the main steps of a hybrid stochastic/deterministic solver are:
\begin{description}
\item[Stochastic pre-processing:] Monte Carlo-based walks on spheres inside $\Omega$ to compute an approximation of the solution at selected points on $D$.
\item[Interpolation:] Interpolation procedures which using the computed in the previous step solutions on particular points on $D$ constructs the interpolant of the solution on $D$ which acts as a boundary conditions for the local PDE sub-problems.
\item[Deterministic solving:] Solves each one of the independent local PDE sub-problems generated by the above decoupling of the original PDE problem. Selected a local conventional solver for each resulting sub-problems that is of our interest and compute the solution.
\end{description}
Our prototype implementation concentrates on the Poisson equation, narrowed on the unit square or unit cube for 2D and 3D problems respectively. It utilizes high, quality state of the art software components that include finite solvers from the deal.II \cite{BangerthHartmannKanschat2007} library for the deterministic solving step, 2D and 3D interpolants, plot and visualization modules etc..
\subsection{Domain Decomposition Methods} \label{schwarz_alternating}
The classical Schwarz alternating procedure demonstrates the basic mathematical idea of overlapping domain decomposition methods. These methods \cite{Gander08schwarzmethods}, are efficient, flexible and best suited for MDMP problems. Several of these methods are inherently suitable for parallel computing the solution of PDEs. They all offer a reasonable alternative since they are based on a physical decomposition of a global MDMP problem. The global solution is then sought by solving the smaller subdomain problems collaboratively and then combining their individual solutions.
Let us consider an example consisting of the domain $\Omega =\Omega_1 \cup \Omega_2$ with perhaps different elliptic operators on each subdomain. $\varGamma_i$ is the internal boundary of subdomain $\varOmega_i, i=1,2$.
Schwarz methods are realized through the following iterative procedure for finding the approximate solution in the entire composite domain $\varOmega$. Let $u^n_i$ denote the approximate solution in subdomain $\varOmega_i$, and $f_i$ denote the restriction of $f$ to $\varOmega_i$. Starting with an initial guess $u_0$, we iterate for n = 1, 2, $\ldots$ to find successive approximate solutions $u^k, k=1,2, \ldots$. Assuming, without loss of generality in our approach, that the original MDMP problem consists of the Poison equation in $\Omega$ with homogeneous Dirichlet conditions on $\varOmega$ we iterate between the two sub-problems as follows:
\begin{equation}
\begin{split}
-\nabla^2 u^n_1 & = f_1 ~ ~ {\text in} ~ ~ \varOmega_1 \\
u^n_1 & = g ~ ~ {\text on} ~ ~ \vartheta\varOmega_1 \backslash \varGamma_1,\\
u^n_1 & = u_2^{n-1}\mid\varGamma_1 ~ ~ {\text on} ~ ~ \varGamma_1
\end{split}
\quad \quad
\begin{split}
-\nabla^2 u^n_2 & = f_2 ~ ~ {\text in} ~ ~ \varOmega_2 \\
u^n_2 & = g ~ ~ {\text on} ~ ~ \vartheta\varOmega_2\backslash\varGamma_2,\\
u^n_2 & = u^n_1 |\varGamma_2~ ~ {\text on} ~ ~ \varGamma_2.
\end{split}
\end{equation}
Within each iteration, the two problems continuously update the internal Dirichlet conditions on $\varGamma_1$ and $\varGamma_2$.
Note that the classical alternating Schwartz methods usually have limited parallelism. There exist variants of these methods (eg additive Schwarz methods) that inherently promote parallel computing and although their rate of convergence is lower, the associated iteration scheme is inherently parallel.
Interface relaxation methods \cite{tsompanopoulou20081620} are essentially non-overlapping domain decomposition methods that follow the iterative structure of the Schwartz method but in a more complicate manner. Besides that, in our meta-computing implementation framework these methods can be treated in a completely similar manner that due to space limitation will not be presented here.
\section{FEniCS Extensions for MDMP PDE Problems}
The FEniCS project \cite{LoggMardalEtAl2012a} is an open-source collection and integration of software tools specialized on automated, high quality and high performance solution of differential equations.
The main user interface of FEniCS is Dolfin \cite{LoggWells2010a}, a C++ and Python library. It provides a problem solving environment for models
based on PDEs. It implements core parts of the functionality of FEniCS, including
data structures and algorithms for computational meshes and finite element assembly. It also wraps the functionality of other FEniCS components and external software, and is responsible for the correct
communication between them.
FEniCS targets user-friendly notation and support for rapid development. It supports weak formulations for the representation of PDEs through the Unified Form Language (UFL)~\cite{Alnaes2012a}. UFL is integrated with Dolphin and defines a flexible user interface for defining finite element spaces and expressions for weak forms in a notation close to mathematical notation.
It can handle complicated equations efficiently. Differentiation of expressions and forms are also integrated in the language.
The main goal of our extensions is to design and implement an open, enhanced meta-computing environment supporting MDMP problems, without changing the back-end of FEniCS (problem assembly, linear algebra solvers etc.). Our platform utilizes and extends the Python user interface of Dolfin, as Python syntax is closer to UFL, being at the same time fitter for rapid prototyping. Support for multi-domain multi-physics (MDMP) problems is implemented on top of the existing functionality, either as new Python modules using the available data
structures and classes, or as external dynamically shared C++ libraries, wrapped as Python modules using SWIG \cite{SWIG}.
\subsection{Extensions for MDMP PDE Problems with Overlapping Domains}
We implement the additive Schwarz method and use it as a high level solver for MDMP problems with overlapping domains.
% In contrast with the Monte Carlo method in chapter \ref{MonteCarlo}, the code is written purely in Python; we do not offer a C++ API for this feature.
% \paragraph{Multi overlapping subdomains}
% Due to time limitation, currently the code supports simple domain overlapping schemes, that means that any particular point contributes at most to two different subdomains. Full support for multi domain overlapping areas as shown in figure \ref{fig:multi_overlap}, is in experimental state. As the platform evolves, depending on the types of problems we need to solve, different iterative methods may be added.
% \begin{figure}[ht]
% \begin{center}
% \includegraphics[scale=0.4]{multi_overlap.png}
% \caption{An example of multi overlapping subdomains}
% \label{fig:multi_overlap}
% \end{center}
% \end{figure}
The geometry, interfaces, discretization, boundary values and equations applicable on each subdomain are described using UFL and Dolphin in a separate file per subdomain. This organization treats different subdomains as distinct programming units, facilitating the parallel or distributed solution of the problem on different subdomains. This is particularly helpful in case web services are used, as discussed in Section~\ref{sec:web_services}.
%\paragraph{Python module}
All datatypes used are either pure Python or FEniCS objects. There is no dependence from third party software libraries at this level.
% The Python module consists of two files:
% \begin{description}
% \item[solverconfig.py] provides the base classes with sanity checks and the API for the solver to function properly.
% \item[solver.py] implements the solving routine among a handful of helper functions that simplify the whole process and further sanity checks to ensure the proper setup of the user's problem.
% \end{description}
% Inside solverconfig.py the module defines the following base classes:
% \begin{description}
% \item[class LogInfo] Its purpose is to keep track of the progress of a particular subdomain. The available information can be written to a user defined logfile.
% \item[class ConfigCommon] Holds separately the configuration of the whole solver. Some of the attributes the user can set are the number of dimensions of the problem, the number of max iterations for the solver, a tolerance value that is used to check for convergence, the filenames of the subdomains which will take part in the solving process, whether the user wants the creation of logfiles and whether they want visual plots of the solutions in each iteration. The class provides some predefined defalut values for all attributes.
% \item[class Config2D] Derives from ConfigCommon, with predefined number of dimensions set to 2. Everything else is the same as the parent class.
% \item[class Config3D] Derives from ConfigCommon, with predefined number of dimensions set to 3. Everything else is the same as the parent class.
% \item[class ConfigCommonProblem] This is the base class the user extends to define each subdomain. There are three methods that need to be overriden. We discuss them analytically in section \ref{subsec:domainAPI}.
% \end{description}
% \subsubsection{Domain API}
% \label{subsec:domainAPI}
Each subdomain object
%that inherits from the ConfigCommonProblem base class
must override
%the following methods. The solver object calls these methods before the actual solving phase begins, in order to gather the appropriate usefull information and setup the appropriate data structures for each subdomain.
a number of methods implicitly called before each invocation of the solver.
\begin{description}
\item[init()]
This method holds the UFL \cite{Alnaes2012a} definition of the subdomain and sets as class attributes the subdomain's function space, linear and bi-linear form of the PDE.
\item[neighbors()] It provides information to the solver about the other subdomains this subdomain overlaps with, in order for the solver to automatically update the interface values after each iteration.% It returns a Python dictionary with keys the filename of the neighbor subdomain and as value a method that returns the boolean value True only for the nodes on the common boundary of this subdomain and the neighbor subdomain.
\item[boundaries()] It informs the solver about the fixed external boundaries of the subdomain. %It returns a Python list of all the subdomain's external boundaries, each element being a DirichletBC object.
\end{description}
%\subsubsection{Iterative solver}
The entry point of the iterative solver is the solve() method. %as defined inside solver.py.
It takes as arguments an object with the configuration of the solving environment (max iterations, tolerance, etc) and a Python list of user defined problem objects.%, all derived from ConfigCommonProblem base class.
% After some initial steps (create logfiles, initialize solution vectors, etc), the main solving routine is called, named \_\_solve(subdomains, config).
% \newpage
% The main points of interest of the iteration algorithm and two of the helper methods are shown below in listing \ref{lst:iterative_code}:
% \begin{lstlisting}[numbers=left,caption={Core code of the iterative algorithm routine},label={lst:iterative_code}, basicstyle=\tiny]
% def __interpolate_interfaces(subdomains):
% for subdomain in subdomains:
% for iface in subdomain.interfaces.itervalues():
% interpolant = interpolate(iface['solution'],subdomain.trial_space())
% iface['interpolant'].vector()[:] = interpolant.vector()
% def __solve_iteration(subdomains):
% for subdomain in subdomains:
% subdomain.solve()
% def __update_interfaces(subdomains):
% for subdomain in subdomains:
% for iface in subdomain.interfaces.itervalues():
% iface['previous'].vector()[:] = iface['current'].vector()
% iface['bc'].apply(iface['current'].vector())
% def __solve(subdomains,config):
% iteration = 0
% iterate = True
% while iterate:
% iteration += 1
% __interpolate_interfaces(subdomains)
% __solve_iteration(subdomains)
% __update_interfaces(subdomains)
% if config.show_solution_plots:
% for subdomain in subdomains:
% plot(subdomain.solution(),title=subdomain.name)
% for subdomain in subdomains:
% if stop_criterion(config,subdomain,iteration):
% iterate = False
% return [ subdomain.solution() for subdomain in subdomains ]
% \end{lstlisting}
After each iteration, for each subdomain solution, the algorithm checks a set of termination criteria evaluating convergence or whether a maximum number of iterations has been reached.
% in the following order that may terminate the solving process:
% \begin{enumerate}
% \item If the exact solution is known, check for convergence w.r.t. the user defined tolerance value.
% \item If the errornorm of the current and previous iteration is below a user defined tolerance value.
% \item If the max iterations limit as defined by the user is reached. This is also the only case of failure.
% \end{enumerate}
% Below, in listing \ref{lst:iterative_stop}, we see the structure of the stop\_criterion() method:
% \begin{lstlisting}[numbers=left,caption={Implementation of the stop\_criterion() method},label={lst:iterative_stop}]
% def stop_criterion(config,subdomain,iteration):
% converged = True
% for iface in subdomain.interfaces.itervalues():
% if not __stop_criterion(config,iface,iteration):
% converged = False
% return converged
% def __stop_criterion(config,iface,iteration):
% loginfo = iface['log']
% x = iface['current']
% x_prev = iface['previous']
% x_exact = iface['exact'] if 'exact' in iface else None
% if x_exact:
% err_exact = errornorm(x_exact,x)
% n = norm(x_exact)
% if n != 0:
% err_exact /= n
% err_prev = errornorm(x,x_prev,degree_rise=degree_rise)
% if x_exact and err_exact <= config.tol_exact:
% print "*** matched exact solution ***"
% return True
% if iteration != 1 and err_prev <= config.tol_prev:
% print "*** no change between iterations ***"
% return True
% if iteration > config.max_iterations:
% print "*** max iterations limit reached ***"
% return True
% return False
% \end{lstlisting}
% Note that in order for the stop\_criterion() method to terminate the algorithm, all subdomains must converge for either of the two first criteria. The third criterion is common for all subdomains.
% The solver keeps logfiles for each boundary interface between all overalling subdomains. They keep track of the progress per iteration in a column based format which is suitable to use as input to Gnuplot.
\begin{figure}[ht]
\centering
\subfigure[Subdomains topology]{\label{fig:3Dproblem}
\includegraphics[width=0.3\textwidth]{domains.png}
}
\hspace{0.1\textwidth}
\subfigure[Convergence rate for the two subproblems]{\label{fig:convergence}
\includegraphics[width=0.3\textwidth]{convergence.png}
}
\subfigure[Solution for the sphere subdomain]{\label{fig:sphere}
\includegraphics[width=0.3\textwidth]{sphere.png}
}
\hspace{0.1\textwidth}
\subfigure[Solution for the box subdomain]{\label{fig:box}
\includegraphics[width=0.3\textwidth]{box.png}
}
\caption{A sample 3D problem with two overlapping subdomains}
\label{fig:domains_plots}
\end{figure}
As an example, assume a 3D problem with two subdomains: a sphere and a box that overlap as shown in figure~\ref{fig:3Dproblem}. Figures~\ref{fig:sphere} and~\ref{fig:box} depict the solution using the iterative Schwarz solver, whereas figure~\ref{fig:convergence} depicts the convergence rate for the two subdomains until reaching the user-specified accuracy.
Listing~\ref{lst:iterative_subdomain_defs} outlines the definition of the box subdomain (box3D\_1.py) on top of a common skeleton file. The definition of the sphere subdomain (sphere3D\_1.py) is similar. Listing~\ref{lst:iterative_driver} shows the code that, given the subdomain definitions, drives the iterative solver. The user-required changes on the skeleton and driver are in bold. The subdomains can be -- and are in the example -- configured with different meshes, discretizations and PDEs. Note also that using a remote solver would be completely straightforward, by substituting line 7 with the commented-out lines 5-6.
% Note that optionally, the programmer can also define different solvers for each one of the subdomains.
%For example a skeleton file (circle2D\_1.py) with the definition (in Python) for the circle subdomain in figure \ref{fig:center_rectangle} can be the following as shown in listing \ref{lst:iterative_subdomain_defs}:
\begin{lstlisting}[numbers=left,caption={Box subdomain definition based on a common skeleton.},label={lst:iterative_subdomain_defs}, basicstyle=\tiny]
# user defined methods
(*@\textbf{def OveralappingWithOther(): pass}@*)
(*@\textbf{def userDefinedUFL(): pass}@*)
(*@\textbf{def userDefinedBoundaryCondition(): pass}@*)
# skeleton example
def ExtBC(x,on_boundary):
return on_boundary and not OveralappingWithOther()
def ExtIface(x,on_boundary):
return on_boundary and OveralappingWithOther()
class Problem(ConfigCommonProblem):
# Override the API methods init(), neighbors() and boundaries()
def init(self,*args,**kwargs):
self.domain_name = (*@\textbf{'box'}@*)
mesh = (*@\textbf{Mesh(Box(-2,-1,-.5,2,1,.5),128)}@*)
self.V = (*@\textbf{FunctionSpace(mesh,'Lagrange',1)}@*)
self.a, self.L = (*@\textbf{userDefinedUFL(V)}@*)
# return a dictionary with interfaces for each neighbor
def neighbors(self):
(*@\textbf{interface = \{\}}@*)
(*@\textbf{interface[self.domain\_name] = ExtIface}@*)
(*@\textbf{return interface}@*)
# return a list with all the external boundaries
def boundaries(self):
(*@\textbf{bc = DirichletBC(self.V, userDefinedBoundaryCondition(), ExtBC)}@*)
(*@\textbf{return [ bc ]}@*)
\end{lstlisting}
% The user defines a class derived from ConfigCommonProblem and overrides its methods in order to define a particular subdomain. The ConfigCommonProblem class performs some validation checks to ensure that the user specified the minimal information to use this subdomain definition with the iterative solver.
% The skeleton definition is abstract to the geometry of the subdomain. That means that the same skeleton code from listing \ref{lst:iterative_subdomain_defs} can be used to define the rectangle subdomain as well.
% Given two defined subdomains in files sphere3D\_1.py and box3D\_1.py, the driver code that solves them looks like this in listing \ref{lst:iterative_driver}.
\begin{lstlisting}[numbers=left,caption={Code solving a 3D problem with two overlapping subdomains.},label={lst:iterative_driver}, basicstyle=\tiny]
from dolfin import *
import solverconfig
import solver
# client = hmc.RemoteClient(wsdl_url)
# client.set_options(timeout=timeout_in_seconds)
client = hmc.LocalClient()
(*@\textbf{import sphere3D\_1 as sphere}@*)
(*@\textbf{import box3D\_1 as box}@*)
(*@\textbf{sp = sphere.Problem(client=client)}@*)
(*@\textbf{bp = box.Problem(client=client)}@*)
(*@\textbf{subdomains=[ sp, bp ]}@*)
(*@\textbf{config = solverconfig.Config3D()}@*)
solver.solve(subdomains=subdomains,config=config)
\end{lstlisting}
\subsection{Hybrid, Deterministic-Stochastic Method Extensions}
We introduce extensions to FEniCS to support hybrid deterministic-stochastic methods. More specifically, a stochastic step is used to evaluate values at interfaces, whereas default FEniCS support can be used for interpolation and solving. This design decouples the stochastic interface estimation from the actual solving and allows it to be implemented on any device (including CPUs, GPUs or even FPGAs) in order to take advantage of the vast parallelism inherently available in Monte-Carlo methods. Our implementation includes a POSIX threads version for CPUs, as well as an OpenCL~\cite{OpenCL} version for any OpenCL-capable device (including CPUs and GPUs).
The functionality is available in Dolphin in the form of an MC class, offering the MC::montecarlo() method. The latter takes a description of the interface as input and outputs estimations for the values on the interface. Both 2D and 3D problems are supported.
% Listing \ref{lst:mc_cpp_problem} shows the base class Problem for the Poisson equation:
% \begin{lstlisting}[numbers=left,caption={C++ base class Problem for problem definition},label={lst:mc_cpp_problem}]
% template <int dim>
% class Problem {
% private:
% std::shared_ptr<const dolfin::Expression> f_expr, q_expr;
% public:
% double D[dim];
% Problem(const double *D,
% std::shared_ptr<const dolfin::Expression> _f = 0,
% std::shared_ptr<const dolfin::Expression> _q = 0) :
% f_expr(_f), q_expr(_q)
% { for (int i=0; i<dim; ++i) this->D[i] = D[i]; }
% double f(const double *_x) {
% dolfin::Array<double> values(1);
% dolfin::Array<double> x(dim,const_cast<double *>(_x));
% f_expr->eval(values,x);
% return values[0];
% }
% double q(const double *_x) {
% dolfin::Array<double> values(1);
% dolfin::Array<double> x(dim,const_cast<double *>(_x));
% q_expr->eval(values,x);
% return values[0];
% }
% };
% \end{lstlisting}
% D holds the lengths per dimension for the rectangle domain $\Omega$, where q() and f() are user defined functions that verify the Poisson's equation $\nabla^2$q(x) = f(x). The parameter x holds the coordinates of a 2D or 3D point to evaluate. $\nabla$ is the Laplace operator.
% Both q() and f() expressions are constructed using the FEniCS API. Listing \ref{lst:UFL_poisson_def} shows a definition in the UFL notation.
% \begin{center}
% \begin{lstlisting}[numbers=left,caption={UFL definition of the same Poisson equation.},label={lst:UFL_poisson_def}, basicstyle=\tiny]
% def Laplacian(expr,x,y):
% dxexpr = diff(expr,x)
% dx2expr = diff(dxexpr,x)
% dyexpr = diff(expr,y)
% dy2expr = diff(dyexpr,y)
% dx2dy2expr = dx2expr + dy2expr
% return dx2dy2expr
% x = variable(Expression("x[0]"))
% y = variable(Expression("x[1]"))
% f = (x)*(x-1)*(y)*(y-1)
% q = -Laplacian(f,x,y)
% \end{lstlisting}
% \end{center}
% For the OpenCL version of the algorithm the platform provides two skeleton OpenCl kernels, one for 2D and one for 3D problems. The platform generates the definitions of q() and f(), appends the proper OpenCL kernel code that uses them, compiles and runs the generated code.
% The f and q expressions in listing \ref{lst:UFL_poisson_def} will generate the following code in \ref{lst:mc_cpp_gen}:
% \begin{lstlisting}[numbers=left,caption={C++ prototype of the montecarlo() method},label={lst:mc_cpp_gen}]
% inline double CPP_CODE_Q(const double *x) { return -2*(x*(x-1) + y*(y-1)); }
% inline double CPP_CODE_F(const double *x) { return x*(x-1)*y*(y-1); }
% \end{lstlisting}
% The C++ prototypes of the montecarlo() method are shown in listing \ref{lst:mc_cpp_proto}:
% \begin{lstlisting}[numbers=left,caption={C++ prototype of the montecarlo() method},label={lst:mc_cpp_proto}]
% // multithread cpu version
% std::vector<double>
% montecarlo(double *D, int dim, double* node_coord, int nof_nodes,
% std::shared_ptr<dolfin::Expression> f,
% std::shared_ptr<dolfin::Expression> q);
% // parallel OpenCL version
% std::vector<double>
% montecarlo(double *D, int dim, double* node_coord, int nof_nodes,
% const std::string &f,
% const std::string &q);
% \end{lstlisting}
% The result (estimated values) is returned to the caller for further processing.
% \paragraph{Python wrapper module}
% The Python module provides a montecarlo() wrapper method that calls the external C++ library throught a wrapper layer generated by SWIG \cite{SWIG}.
The montecarlo() method takes the same arguments with the DirichletBC class of FEniCS, plus a description of the original domain and the subdomain of interest. Using the DirichletBC methods we obtain the points on the interface and call the new montecarlo() method for them. The call returns the estimated values of all interface points (nodes) assigned to a new DirichletBC object. The latter can then be used anywhere in the rest of the program.
% Listing \ref{lst:mc_method_def} shows the implementation of the montecarlo() wrapper method that calls the SWIG generated wrapper layer:
% \begin{lstlisting}[numbers=left,caption={Definition of montecarlo() method},label={lst:mc_method_def}]
% from fenics import *
% import _hybridmc as core
% import hmc_toolbox as tools
% import numpy as np
% def montecarlo(self,V,interface,**kwargs):
% dims = kwargs.get('Omega')
% bc = DirichletBC(V,1.0,interface)
% coords, keys = tools.get_boundary_coords(bc)
% dim = len(dims)
% nof_nodes = len(coords)/dim
% D = np.array(dims, dtype=np.float_)
% node_coord = np.array(coords, dtype=np.float_)
% f, q = kwargs.get('f'), kwargs.get('q')
% if not kwargs.get('OpenCL',False):
% f, q = Expression(f), Expression(q)
% value = core.montecarlo(D,dim,node_coord,nof_nodes,f,q)
% est = Function(V)
% est.vector()[keys] = value
% mcbc = DirichletBC(V,est,interface)
% return mcbc, est
% \end{lstlisting}
% There are some explicit data conversions from Python datatypes to numpy \cite{Dubois+etal:1996} datatypes as we need to guarantee that our data lie in contiguous memory. NumPy is also used internally from the FEniCS Python interface.
% With the proper configuration, SWIG generates the appropriate wrapper code to automatically convert an std::vector to a NumPy array and vice versa.
% We also introduce an extra Python module that implements a few helper functions that glue different components of FEniCS together and hide the unnecessary details from the programmer. It also simplifies the implementation of the web services support as we discuss in chapter \ref{WebServices}.
Listing \ref{lst:mc_example} outlines and example of using the stochastic support introduced in FEniCS to solve a PDE in an internal, rectangular subdomain of the original domain, by stochastically estimating the values at the interface of the internal subdomain. Once again, changes introduced by our extensions are highlighted in bold.
\begin{lstlisting}[numbers=left,caption={Example of montecarlo() method in user code.},label={lst:mc_example}, basicstyle=\tiny]
from dolfin import *
import hybridmc as hmc # the platform's Python module
def onbc(x,on_boundary):
return on_boundary
def mc_test_2D(Omega, Subdomain):
x, y = variable(Expression("x[0]")), variable(Expression("x[1]"))
expr = (x)*(x-1)*(y)*(y-1)
mesh = Mesh(SubDomain,128)
V = FunctionSpace(mesh,'Lagrange',1)
u, v = TrialFunction(V), TestFunction(V)
f = -Laplacian(expr,x,y)
a = inner(grad(u), grad(v))*dx
L = f*v*dx
# get expressions as strings
f_expr, q_expr = hmc.tools.cppcode(expr,x,y), hmc.tools.cppcode(f,x,y)
(*@\textbf{mcbc, est = client.montecarlo(V, onbc, OpenCL=True, Omega=Omega,}@*)
(*@\textbf{f=f\_expr, q=q\_expr)}@*)
sol_mc = Function(V)
solve(a==L,sol_mc,[ mcbc ])
if __name__ == '__main__':
Omega2D = [ 1., 1. ]
(*@\textbf{SubDomain2D = Rectangle(.4, .8, .4, .8)}@*)
(*@\textbf{client = hmc.LocalClient()}@*)
mc_test_2D(Omega2D, Subdomain2D)
\end{lstlisting}
The client object at line 29 above encapsulates the local/remote functionality of the method. We discuss more about web services and client objects in chapter \ref{sec:web_services}.
Figure \ref{fig:mc_plots} depicts the solution provided by the hybrid stochastic/deterministic Monte Carlo-based solver for the sample problem of listing~\ref{lst:mc_example}, as well as the absolute error with respect to a fully deterministic solver for a set of 16 experiments.
\begin{figure}[ht]
\vspace{-0.3in}
\centering
% \subfigure[MonteCarlo estimation on $\Gamma$]{\label{fig:mcbc}
% \includegraphics[width=0.29\textwidth]{mcbc.png} }
\subfigure[Solution of a Monte Carlo-based solver]{\label{fig:mcsol}
\includegraphics[width=0.40\textwidth]{mcsol.png} }
% \subfigure[Error w.r.t. the determinitstic boundary estimation]{\label{fig:mcbc_error}
% \includegraphics[width=0.30\textwidth]{mcbc_error.png} }
\hspace{0.1\textwidth}
\subfigure[Solution error w.r.t. a fully deterministic solver. Different lines correspond to different mesh resolutions]{\label{fig:mcsol_error}
\includegraphics[width=0.40\textwidth]{mcsol_error.png} }
\caption{Hybrid solution and error estimation with respect to a deterministic solver for the sample problem}\label{fig:mc_plots}
\vspace{-0.3in}
\end{figure}
\section{Web Services Layer}\label{sec:web_services}
The scientific community has embraced the Web, enabling researchers as well as practitioners to closely collaborate and share resources. This resulted primarily in the publication of information while the availability of computational services has been rather limited and to a great extend monolithic, mostly in the form of e-science platforms that are expensive to build and difficult to reuse outside their
scope and environment.
We envision a radically new way of deploying, publishing, sharing, discovering and re-using Scientific Computing resources in every day practice. For that we argue the necessity of an open, balanced and ever-evolving ecosystem of web services that:
\begin{itemize}
\item relieves consumers from the pain of selecting/installing/running the most appropriate algorithm/software/machine components for their scientific computing needs.
\item allows producers to offer their scientific computing components in an easy to be discovered/packaged/consumed way.
\item enables computing facilities to accommodate a wide range of consumers and producers in an open, dynamic, and value adding manner.
\item advances the science of scientific computing towards problem solving with the optimum available algorithm/implementation/machine combination
\end{itemize}
% Although the above approach may be applied to many areas of Scientific Computing it seems that it is particularly suited for MDMP problems, at least compared to other classes of problems (e.g. numerical linear algebra problems).
In our study we explore the idea of having the ability to develop, offer and consume MDMP related computational modules in a transparent and abstract way within our above described platform and through the Web.
For this we enhance our platform with a web service layer utilizing the following XML based standards.
For detailed information about Web services in general and the above standards in particular please see~\cite{Papazoglou}.
\begin{description}
\item[SOAP:] (Simple Object Access Protocol) for exchanging structured information within web services in computer networks. It relies on other application layer protocols, such as HTTP for message negotiation and transmission.
% \item[WSDL:] (Web Services Description Language) used for describing the functionality a web service offers. It provides a machine-readable description of how the service can be called, what parameters it expects, and what data structures it returns.
\item[WSDL:] (Web Services Description Language) used for describing the functionality a web service offers, how it can be called, what parameters it expects, and what data structures it returns.
\item[ebXML:] (e-business XML) that allows web service providers to publish their services and consumers to query for service availability and description.
\end{description}
The overall scenario for developing a MDMP solver under the SOA paradigm is the following. We first wrap up any of the software modules mentioned above (or any other related legacy code) as a web service and publish it on our ebXML directory utilizing any of the available IDEs or platforms.
This may be accomplished through one of the several available Integrated Developing Environments (IDE) or platforms. In particular we have implemented the above task in several different ways, using WSO2, .Net, Eclipse, or Spyne (see details given below) in a systematic manner that makes wrapping and deployment a more or less routine procedure with no particular challenges.
Next, any developer or even any software agent could query to ebXML for particular services, receive the list of available ones, select the most appropriate and bind to it automatically through its WSDL file even at run-time.
For a particular wrapper's implementations we have selected Spyne \cite{python:spyne} one out of the many existing Python frameworks and briefly describe our main steps below.
% , a Python toolkit that makes it easy to expose online services that have a well-defined API using multiple protocols and transports and supports WSDL and SOAP.
Listing \ref{lst:ws_mc} shows a simple server function definition that wraps the Monte Carlo method.
The deployment of the server code in listing \ref{lst:ws_mc} can be done as shown in listing \ref{lst:ws_deploy}.
The RemoteClient class (see below) utilizes Suds \cite{python:suds}, a lightweight SOAP-based web service client for Python which reads WSDL files at runtime.
% for encoding/decoding, having as goal to present an RPC-like interface into SOAP-based web services.
% The primary interface of the platform is the RemoteClient class (see below) that utilizes the Suds Client class.
Upon creation, it parses the WSDL and derives from it a representation which is used to provide the user with a service description for message/reply processing.
In order to have a consistent API between local and remote methods, apart from the RemoteClient class, the platform also defines a LocalClient class, as shown below, with the same API methods. In the case of the RemoteClient, the input data are sent to the remote server which in turn responds with the output data.
Listing \ref{lst:ws_clients} shows the base definition of the RemoteClient and LocalClient classes.
% The platform then, defines to both RemoteClient and LocalClient classes, methods that support, either as remote or local features respectively. For example, the wrapper routines that implement the Hybrid method described above, are obtained from the conventional implementation with the changes given below. The only essential change is the actual call to the underlying wrapper method. LocalClient calls the SWIG generated wrapper, where RemoteClient calls the Spyne wrapper.
% inside class RemoteClient: Lines ???-??? in listing 5?? become???
% inside class LocalClient: Lines ???-??? in listing 6?? become???
% The web services may be consumed the same way as they were local function calls. As seen below the only part of the user code that changes is the definition of the client object.
Any underlying implementation differences are
% All these exchanges are
transparent to the user who in both cases receives the result the conventional local function call way, as shown in listing \ref{lst:ws_create_client}.
\section{Conclusion} \label{Conclusion}
We design a meta-computing platform to target MDMP problems, modeled with PDEs, based on (but not limited to) the FEniCS project. The platform's environment provides a high level scripting API in Python, that utilizes the FEniCS UFL domain specific language.
Our environment allows domain experts to focus on expressing the models than delving into implementation details, programmers to effectively select the most appropriate available software module for a particular component (subdomain) of the problem with respect to its associated single physics model and users to efficiently deploy and run MDMP computations on loosely coupled distributed and heterogeneous compute engines.
We also show how to integrate remote functionality from machines over the web in a consistent and transparent way to the end user, following widely accepted standards. Our generic design allows us to exploit state of the art software libraries and explore new solving approaches for MDMP problems. It essentially allow us to replace our traditional software library based viewpoint with and the SOA based one that aggressively promote meta-computing and software reuse at large.
\section{Acknowledgment}
This research has been co-financed by the European Union (European Social Fund – ESF) and Greek national funds through the Operational Program "Education and Lifelong Learning" of the National Strategic Reference Framework (NSRF) ‐ Research Funding Program: THALES. Investing in knowledge society through the European Social Fund.
\bibliographystyle{plain}
\bibliography{Bibliography,fenics}
\section*{Appendix}
% \section{A simple server function definition}
\begin{lstlisting}[numbers=left,caption={A simple server function definition.},label={lst:ws_mc}, basicstyle=\tiny]
from spyne import Application, rpc, ServiceBase, Integer, Double, Array
from spyne.protocol.soap import Soap11
import _hybridmc as core
import numpy as np
class MDMPService(ServiceBase):
""" 1. convert the input Python lists to numpy arrays
2. call the core method and return output as Python list """
@rpc(Array(Double),Integer,Array(Double),Integer,String,String,Boolean,
_returns=Array(Double))
def montecarlo(ctx, dims, dim, coords, nof_nodes,f,q,OpenCL):
D = np.array(dims, dtype=np.float_)
node_coord = np.array(coords, dtype=np.float_)
if not OpenCL:
f = Expression(f)
q = Expression(q)
return core.montecarlo(D,dim,node_coord,nof_nodes,f,q)
\end{lstlisting}
\begin{lstlisting}[numbers=left,caption={Base definition of RemoteClient and LocalClient. },label={lst:ws_clients}, basicstyle=\tiny]
from suds.client import Client
class RemoteClient(Client):
def __init__(self,*args,**kwargs):
self.is_local = False
Client.__init__(self,*args,**kwargs)
class LocalClient():
def __init__(self,*args,**kwargs):
self.is_local = True
\end{lstlisting}
\begin{lstlisting}[numbers=left,caption={Create client objects from user-code. },label={lst:ws_create_client}, basicstyle=\tiny]
from dolfin import *
import hybridmc as hmc
if use_remote_client:
client = hmc.RemoteClient(wsdl_url)
client.set_options(timeout=90) # ...
else:
client = hmc.LocalClient()
\end{lstlisting}
\begin{lstlisting}[numbers=left,caption={Server deployment.},label={lst:ws_deploy}, basicstyle=\tiny]
from spyne import Application
from spyne.server.wsgi import WsgiApplication
from wsgiref.simple_server import make_server
from mdmp_service import MDMPService
import logging
app = Application([MDMPService], 'spyne.examples.hello.soap',
in_protocol=Soap11(validator='lxml'), out_protocol=Soap11())
wsgi_app = WsgiApplication(app)
# logging code ommited
server = make_server('127.0.0.1', 8000, wsgi_app).serve_forever()
\end{lstlisting}
\end{document}