%~Mouliné par MaN_auto v.0.23.0 2020-06-04 11:30:43
\documentclass[AHL,Unicode,longabstracts]{cedram}
%%\usepackage{mathrsfs}
%%\providecommand{\noopsort}[1]{}
%%\newcommand{\noopsort}[1]{}
%%\AtBeginDocument{
%\newtheorem{myname}[cdrthm]{My beautiful theorem} }
\newcommand*{\dd}{\mathrm{d}}
\newcommand*{\dx}{\dd x}
\newcommand*{\dX}{\dd X}
\newcommand*{\dH}{\dd H}
\newcommand*{\dR}{\dd R}
\newcommand*{\dY}{\dd Y}
\newcommand*{\dA}{\dd A}
\newcommand*{\dB}{\dd B}
\newcommand*{\dP}{\dd P}
\newcommand*{\dQ}{\dd Q}
\newcommand*{\dy}{\dd y}
\newcommand*{\dw}{\dd w}
\newcommand*{\dt}{\dd t}
\newcommand*{\dr}{\dd r}
\newcommand*{\df}{\dd f}
\newcommand*{\dz}{\dd z}
\newcommand*{\ddh}{\dd h}
\newcommand*{\domega}{\dd \omega}
\newcommand{\mysum}{\Sigma}
\begin{DefTralics}
\newcommand{\mysum}{\Sigma}
\end{DefTralics}
\newcommand*{\Aff}{\textit{Aff}}
%\DeclareMathOperator{\Aff}{Aff}
\DeclareMathOperator{\id}{id}
\DeclareMathOperator{\const}{const.}
\newcommand*\R{\mathbb{R}}
\newcommand{\Cbb}{\mathbb C}
%%\Cbb
\newcommand*\Z{\mathbb Z}
\newcommand*\N{\mathbb N}
\newcommand{\Pbb}{\mathbb P}
%%\Pbb
%\newcommand*\K{\mathbf{K}}
%\newcommand*\Q{ \mathbb{Q}}
%%\vspace{5mm}
%%\vspace{5mm}
%\newcommand*\cprime{$'$}
%\newcommand*\cprime{$'$}
%\newcommand*\cprime{$'$}
%\newcommand*\cprime{$'$}{ga
%\newcommand*\cprime{$'$}
%\newcommand*\cprime{$'$}
%\newcommand*\cprime{$'$}
%%%%%%%%%%%%%%%%%{\newcommand{\mysum}{\Sigma}%%%%%%%%%%%%%%%%%%%%%
\graphicspath{{./figures/}}
\newcommand*{\mk}{\mkern -1mu}
\newcommand*{\Mk}{\mkern -2mu}
\newcommand*{\mK}{\mkern 1mu}
\newcommand*{\MK}{\mkern 2mu}
\hypersetup{urlcolor=purple, linkcolor=blue, citecolor=red}
\newcommand*{\romanenumi}{\renewcommand*{\theenumi}{\roman{enumi}}}
\newcommand*{\Romanenumi}{\renewcommand*{\theenumi}{\Roman{enumi}}}
\newcommand*{\alphenumi}{\renewcommand*{\theenumi}{\alph{enumi}}}
\newcommand*{\Alphenumi}{\renewcommand*{\theenumi}{\Alph{enumi}}}
\let\oldtilde\tilde
\renewcommand*{\tilde}[1]{\mathchoice{\widetilde{#1}}{\widetilde{#1}}{\oldtilde{#1}}{\oldtilde{#1}}}
\let\oldhat\hat
\renewcommand*{\hat}[1]{\mathchoice{\widehat{#1}}{\widehat{#1}}{\oldhat{#1}}{\oldhat{#1}}}
\let\oldforall\forall
\renewcommand*{\forall}{\mathrel{\oldforall}}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\title[On the center-focus problem]{On the center-focus problem for the equation $\frac{\mathrm{d}y}{\mathrm{d}x} + \mysum_{i=1}^{n} a_i(x) y^i = 0, 0\leq x \leq 1$ where $a_i$ are polynomials}
\alttitle{Sur le problème du centre-foyer pour l'équation $\frac{\mathrm{d}y}{\mathrm{d}x} + \mysum_{i=1}^{n} a_i(x) y^i = 0, 0\leq x \leq 1$ où les $a_i$ sont des polynômes}
\keywords{center-focus problem, Abel equation, Liénard equation}
\subjclass{37F75, 34C14, 34C05}
%%[\initial{L.} \lastname{Gavrilov}]
\author{\firstname{Lubomir} \lastname{Gavrilov}}
\address{Institut de Mathématiques de Toulouse\\
UMR5219 Université de Toulouse\\
CNRS UPS IMT\\
31062 Toulouse Cedex 9 (France)}
\email{lubomir.gavrilov@math.univ-toulouse.fr}
\thanks{Research partially supported by the Grant No DN 02-5 of the Bulgarian Fund ``Scientific Research''}
\editor{J. V. Pereira}
\begin{abstract}
We study irreducible components of the set of polynomial plane differential systems with a center, which can be seen as a modern formulation of the classical center-focus problem. The emphasis is given on the interrelation between the geometry of the center set and the Picard--lefschetz theory of the bifurcation (or Poincaré--Pontryagin--Melnikov) functions. Our main illustrative example is the center-focus problem for the Abel equation on a segment, which is compared to the related polynomial Liénard equation.
\end{abstract}
\begin{altabstract}
Nous étudions les composantes irréductibles de l'ensemble des champs polynomiaux plans avec centre, ce qui peut être vu comme une formulation moderne du problème classique du centre-foyer de Poincaré. L'accent est mis sur l'interrelation entre la géométrie de l'ensemble des centres et la théorie de Picard--Lefschetz des fonctions de bifurcation (ou de Poincaré--Pontryagin--Melnikov). Notre exemple principal est le problème du centre-foyer pour l'équation d'Abel sur un segment, comparée à l'équation de Liénard associée.
\end{altabstract}
\datereceived{2018-11-28}
\daterevised{2019-08-11}
\dateaccepted{2019-11-06}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{document}
\maketitle
%%\tableofcontents
%%\newpage
\section{The center-focus problem}
The plane differential system
\begin{align}\label{system}
\dot{x} &= P(x,y), \quad
\dot{y}= Q(x,y)
\end{align}
is said to have a \emph{center} at the singular point $(0,0)$, if in a sufficiently small neighbourhood of this point all orbits are closed. Consider the scalar differential equation
\begin{align}\label{ode}
\frac{\dy}{\dx} + f(x,y) = 0,\quad x\in [0,1]
\end{align}
in which $f(x,0) =0, \forall x\in [0,1]$. The equation~\eqref{ode} is said to have a \emph{center} at $y=0$, if all solutions $y(x)$ starting near the origin, satisfy $y(0)=y(1)$ (the interval $[0,1]$ can be replaced by any closed interval).
\emph{Note on the terminology.} We do not specify here the category to which belong $P, Q, f$. They will be either analytic or polynomial, depending on the context. The base field will be either $\R$ or $\Cbb$ depending on the context too. Most results will be valid for both. Thus, the definition of a center for~\eqref{ode} is the same in the real and in the complex case. In the case of an analytic complex plane vector field~\eqref{system} the ``complex'' definition of a center is less straightforward. We say that the origin is a non-degenerate center, if the vector field has an analytic first integral with a Morse critical point at the origin. If this is the case, we shall also say that~\eqref{system} has a Morse singular point, e.g.~\cite{cerlin96,dul08}. We recall therefore
\begin{defi}
The analytic complex vector field~\eqref{system} is said to have a Morse singular point, if it allows an analytic first integral in a neighbourhood of this point, which has a Morse type singularity.
\end{defi}
If~\eqref{system} has a Morse singular point, then the linear part of~\eqref{system} is diagonalisable with non-zero eigenvalues, that is to say the singular point of the vector field is non-degenerate.
An example is the saddle $x'=x, y'=-y$ which has an analytic first integral $xy$ of Morse type, and hence a Morse critical point. Of course, it is linearly equivalent (over $\Cbb$) to $x'=y,y'=-x$ with first integral $x^2+y^2$ which is the usual linear real center. The advantage to study Morse critical points over $\Cbb$ is that we can use complex analysis and complex algebraic geometry. This is the point of view adopted in these notes.
The two equations~\eqref{system} and~\eqref{ode} are closely related. First, a polar change of variables transforms a plane system~\eqref{system} with a center, to equivalent equation of the form~\eqref{ode} with a center along the interval $[0,2\pi]$. Second, if the family of functions $f({\,\cdot\,},y), x\in [0,1]$ is replaced by its Fourier series $\hat{f}({\,\cdot\,},y)$ (so $\hat{f}(x+1,y) = \hat{f}(x,y)$) and the equation~\eqref{ode} has a center at $y=0$, then the new system
\begin{align}\label{ode1}
\frac{\dy}{\dx} + \hat{f}(x,y) = 0, \quad(x,y)\in \R / \Z \times \R
\end{align}
will have all its orbits starting near the periodic solution $y=0$ on the cylinder $ \R / \Z \times \R$, periodic too. Of course, if the smooth function $f$ is non-periodic, then the function $\hat{f}$ is only piece-wise continuous in $x$. The transport map of~\eqref{ode} along $[0,1]$ becomes a return map for~\eqref{ode1} and the definition of a limit cycle for~\eqref{ode} is straightforward too. Actually, the scalar equation~\eqref{ode} in which $f$ is a regular function, should be considered as a simplified model of the eventually singular equation
\[
\frac{\dy}{\dx} = \frac{P(x,y)}{Q(x,y)}.
\]
We resume the above considerations in the following definitions, which make sense both on $\R$ or $\Cbb$:
\begin{defi}\label{def1}
Let $ \varphi = \varphi({\,\cdot\,}; x_0,y_0)$ be the general solution of the equation $\dy + f(x,y)\dx=0$ with initial condition $y_0=\varphi(x_0; x_0,y_0)$, on the interval $ [x_0,x_1]$.
%%A: desactivation de l'environnement, description
%%\begin{description}
\begin{enumerate}\romanenumi
\item \label{list1.2.1}The solution $ \varphi = \varphi({\,\cdot\,}; x_0,y_0)$ is said to be periodic iff $\varphi(x_1; x_0,y_0) = y_0$
\item \label{list1.2.2}The solution $ \varphi = \varphi({\,\cdot\,}; x_0,y_0)$ is said to be a limit cycle, provided that it is periodic and isolated, that is to say there is a neighbourhood of its orbit on $S^1\times \R$ free of periodic solutions.
\item \label{list1.2.3}the map $y \mapsto \varphi(x_1; x_0,y) $ is the first return map of~\eqref{ode} in a neighbourhood of $ y=y_0$.
\item \label{list1.2.4}The equation~\eqref{ode} defines a center in a neighbourhood of the periodic solution $\varphi$ provided that the first return map is the identity map in a neighbourhood of $y_0$. If the return map is not the identity map, then we say that~\eqref{ode} defines a focus at the periodic solution $\varphi$.
\end{enumerate}
%%\end{description}
\end{defi}
The center focus-problem for the equation~\eqref{ode} or~\eqref{system} is, roughly speaking, to distinguish between a center and a focus. The algebro-geometric content of the problem is as follows. Suppose, that~\eqref{ode} is polynomial, more precisely
\begin{align}\label{ode2}
\frac{\dy}{\dx} + \sum_{i=0}^m a_i(x) y^{i+1},\quad a_i \in \Cbb[x], \deg a_i \leq n, x \in [0,1].
\end{align}
The first return map $y \mapsto \varphi(1;0,y)$ is well defined and analytic near the periodic solution $y=0$, and moreover
\[
\varphi(1;0,y) = y + \sum_{n=1}^\infty c_n(a) y^{n+1}.
\]
As we shall see in Theorem~\ref{brf}, under the condition $a_1=0$, the coefficients\linebreak $c_n=c_n(a), n\geq 1$, are \emph{polynomials} in the coefficients of $a_j = a_j(x)$, $j\leq n$. The condition that $\varphi(1;0,{\cdot\,})$ is the identity map determines an infinite number of polynomial relations $\{ c_n(a)=0 \}$ on the coefficients of the polynomials $a_j$. By the Hilbert basis theorem, only a finite number of them are relevant, and they define an algebraic variety (the so called center variety $\mathcal C_{m,n}$) in the vector space of all coefficients of the polynomials $a_j$. The problem is therefore (as formulated by Lins Neto~\cite{lins14} in the context of a polynomial foliation induced by~\eqref{system}):
\[
\text{\emph{Describe the irreducible components of $\mathcal C_{m,n}$.}}
\]
%%A: je rentre une subsection* pour l'intitule de type center, faisant l'introduction du plan de l'article:
%%\begin{center}
%\emph{Describe the irreducible components of} $\mathcal C_{m,n}$.
%\end{center}
%%~ N: Non, il s'agissait de la fin de la phrase précédente
%\pagebreak
%\subsection*{Describe the irreducible components of \texorpdfstring{$\mathcal C_{m,n}$}{Cm,n}}
%%\vspace{1cm}
\goodbreak
The content of the paper is as follows.
In Section~\ref{section2} we give first an explicit formula for the general solution of the equation
\[
\frac{\dy}{\dx} + \sum_{i=0}^n a_i(x) y^{i+1} = 0,\quad 0\leq x \leq 1
\]
in terms of iterated path integrals, see Theorem~\ref{firstintegral}. As a by-product we obtain a formula for the first return map, and explicit center conditions found first by Brudnyi, see Theorem~\ref{brf}.
Section~\ref{section3} is devoted to the perturbation theory of the integrable Abel equation
\[
\frac{\dy}{\dx} = a(x) y^2
\]
with first integral
\[
H= \frac1y + A(x),\quad A(x)= \int a(x) \dx.
\]
It is assumed that $A(0)=A(1)$, so the equation has a center along $[0,1]$. We are interested in the number of limit cycles (isolated solutions, such that $y(0)=y(1)$) which the perturbed equation $
\frac{\dy}{\dx} = a(x) y^2 + \dots $ can have. The center-focus problem for this perturbed equation leads to a well known \emph{ polynomial moment problem}. Under general assumptions this problem has an elegant solution, due to Colin Christopher, Theorem~\ref{ccth}, which is presented here in the setting of Abelian integrals of dimension zero.
In Section~\ref{section4} we study irreducible components of the center variety of the polynomial Abel equation (on the interval $[0,1]$)
\begin{align}\label{abel1}
\frac{\dy}{\dx} = p(x) y^2 + q(x) y^3
\end{align}
as well center variety of the related Liénard equation (by ``center'' we mean the usual Morse center in a neighbourhood of the origin in $\mathbb C^2$)
\begin{align}\label{lienard1}
\dot{x} = y, \quad \dot{y} = -q(x) - y p(x).
\end{align}
In Section~\ref{section41} we prove that the set of Abel equations coming from ``pull back'' provide irreducible components of the center set, Theorem~\ref{pullback}. These results are inspired by previous contributions of Movasati.
In Sections~\ref{section42} we revisit the classical center-focus for quadratic vector fields, with special attention to the $Q_4$ component of the center set.
In Section~\ref{section4b} we give a full description of the center set of Liénard type equations~\eqref{lienard1}. These results belong mainly to Cherkas and Christopher, but we present them in the broader context of the present notes. In particular, the base field will be $\Cbb$, see Theorem~\ref{th2a} and~\ref{th2}. The centers found in this way are always of ``pull back'' type. This suggests that the only centers of the related Abel equation~\eqref{abel1} are of ``pull back'' type too, which is the content of the so called Composition Conjecture for the Abel equation~\eqref{abel1}~\cite[p.~444]{bry10} to be discussed in Section~\ref{section43}. In this last section we show, however, that there are scalar Abel equations with a center along $[0,1]$, which can not be obtained by a ``pull back''. These equations have a Darboux type first integral, and their construction is inspired by the study of the $Q_4$ component in Section~\ref{section42}. Among them we find the recent counter-example to the Composition Conjecture mentioned above, found first by Giné, Grau and Santallusia~\cite{ggs18}.\footnote{\label{myfootnote}
The present paper is an extended version of two lectures given during the Zagreb Dynamical Systems Workshop, October 22-26, 2018.}%un espace en trop était introduit par le retour à la ligne.
\section*{Acknowledgement}
The author is obliged to Jean-Pierre Françoise for the illuminating discussions on the center-focus problem. I thank also the anonymous referees for several suggestions, which helped to improve the text.
\section{The first return map and the Brudnyi formula}\label{section2}
In this section we shall describe the return map of~\eqref{ode2} as a power series involving iterated path integrals. We prove an explicit formula, due to Brudnyi~\cite{brud06}, which amounts to solve the differential equation. The classical approach to do this is by the Picard iteration method. If $y_0$ is the initial condition at $x_0$ of the differential equation
\[
\dy = f(x,y) \dx
\]
then the Picard iteration is
\[
y_{n+1}(x) = y_0 + \int_{x_0}^x y_n(t) \dt
\]
where $y_n$ tends to the solution of the equation as $n\to \infty$. We illustrate this on the example $\dy = y \dx$. If $y_0$ is the initial condition at $x = 0$ then
\begin{align*}\label{exp}
y_1(x) & = y_0 + \int_0^x y_0 \dt\\
y_2(x) & = y_0 + \int_0^x y_1(t) \dt = y_0 + \int_0^x y_0 \dt + \iint_{0\leq t_2\leq t_1\leq x} y_0 \dt_1 \dt_2.
\end{align*}
As
\[
\int \dots \int_{0 \leq t_n\leq \dots \leq t_1\leq x}y_0 \dt_1 \dots \dt_n = y_0 \frac{x^n}{n!}
\]
we get $y(x) = y_0 e^x$ as expected. The multiple (or iterated) integrals above appear in a similar way in the non-autonomous linear $\dy=a(x) y \dx$, or even non-linear case $\dy=f(x,y) \dx$. The non-linear case is more involved, it is reduced to the linear one, but after introducing infinitely many new variables $y, y^2, y^3, \dots $. To get around this reduction we shall use a simple Ansatz, for which we need a formal definition of iterated integral.
Let $Ass_\omega$ be the graded free associative algebra generated by the infinite dimensional vector space of differential one-forms $\omega = a(x,y) \dx$, $a \in \Cbb\{x,y\}$. Its elements are non-commutative polynomials in such one-forms. The differential operator
\begin{gather*}
D : Ass^1_\omega \to Ass^1_\omega
\\
D(a(x,y) \dx) = \frac{\partial}{\partial y} a(x,y) \dx
\end{gather*}
induces a differential operator on $ Ass_\omega$ which acts by the Leibnitz rule. The readers familiar with the Picard--Lefschetz theory will recognize in $D$ an avatar of the covariant derivative of an Abelian differential on the level sets $\{y=c \}_c$.
To save brackets, it is convenient to introduce the following notation
\begin{equation}\label{Domega}
D\omega_1 \omega_2 \dots \omega_n = D(\omega_1 \omega_2 \dots \omega_n)
\end{equation}
so that (using brackets)
\begin{align*}
D \omega_1 \omega_2 =D (\omega_1 \omega_2) &= (D \omega_1) \omega_2 + \omega_1(D \omega_2).
\\
\intertext{and}
D \omega_1 D \omega_2 = D (\omega_1 D \omega_2) &= (D \omega_1)(D \omega_2) + \omega_1 (D^2 \omega_2).
\end{align*}
If we use the notation
\[
D^k\omega = \omega ^{(k)}
\]
then
\[
D \omega_1 \omega_2 = \omega_1' \omega_2+ \omega_1 \omega_2'
\]
and
\[
D \omega_1 D \omega_2 = (\omega_1 \omega_2')' = \omega_1' \omega_2' + \omega_1 \omega_2''.
\]
For $\omega_1\omega_2 \dots \omega_n \in Ass_\omega^n$, $\omega_k= \varphi_k(x,y) \dx$, define the iterated integral $\int_{x_0}^x \omega_1\omega_2 \dots \omega_n$ of length $n$, as equal to
\begin{equation}\label{multipleintegral}
\int \dots \int_{x_0 \leq t_n\leq \dots \leq t_1\leq x} \varphi_1(t_1,y) \dots \varphi_n(t_n,y) \dt_1 \dots \dt_n.
\end{equation}
The iterated integral allows also a recursive definition (hence the name):
\begin{equation}\label{iteratedintegral}
\int_{x_0}^x \omega_n\omega_{n-1} \dots \omega_1 = \int_{x_0}^x (\varphi_n (t) \int_{x_0}^t \omega_{n-1} \dots \omega_1)\dt
\end{equation}
where in the case $n=1$ we have the Riemann integral $\int_{x_0}^x \omega_{1}$. We note, that the usual notation for the multiple integral~\eqref{multipleintegral} is $\int_{x_0}^x \omega_n\omega_{n-1} \dots \omega_1$ on the place of $\int_{x_0}^x \omega_1\omega_2 \dots \omega_n$, see Chen~\cite{chen77} or Hain~\cite{hain87}. The reason to prefer the definition~\eqref{iteratedintegral} is that it is better adapted to applications in differential equation, e.g.~\cite{gavr05}. Recall in this context, that
\[
\int_{x_0}^x \omega_n\omega_{n-1} \dots \omega_1 = (-1)^n \int^{x_0}_x \omega_1\omega_2 \dots \omega_n.
\]
For a short summary of properties of iterated integrals which we use, see~\cite[Appendix]{gavr05}, \cite[Section~2]{gmn09}.
\begin{theo}\label{firstintegral}
With the notation~\eqref{Domega}, a first integral of the differential equation $\dy + f(x,y) \dx = 0$ is given by the following recursively defined convergent series
%%\boxed{}
\begin{align}\label{first2}
\varphi(x_0;x,y) = y + \int_{x_0}^x \omega + \int_{x_0}^x \omega D \omega + \int_{x_0}^x \omega D \omega D \omega +
\dots
\end{align}
where
\[
\omega = f(x,y) \dx.
\]
The general solution of~\eqref{ode} with initial condition $(x_0,y_0)$ is given by
\[
y= \varphi(x;x_0,y_0).
\]
\end{theo}
\begin{exam}
In the linear case
\[
y' + \alpha y = 0 \iff \dy + \alpha y \dx = 0
\]
we obtain
\begin{align*}
\varphi(x_0;x,y) &= y \left(1+ \alpha \int_{x_0}^x \dx + \alpha^2 \int_{x_0}^x \dx.\dx + \dots\right)\\
&= y(1 + \alpha(x-x_0) + \alpha^2 \frac{(x-x_0)^2}{2} + \dots =\;y e^{\alpha (x-x_0) }
\end{align*}
and the general solution is
\[
y = \varphi(x;x_0,y_0) =\; y_0 e^{\alpha (x_0-x) }.
\]
In the quadratic case
\[
\dy + 2 x y^2 \dx = 0, \omega = 2 x y^2 \dx
\]
we compute recursively
\begin{align*}
\int_{x_0}^x \omega &= \int_{x_0}^x 2 x y^2 \dx = x^2-x_0^2 \\
\int_{x_0}^x \omega D\omega & = \int_{x_0}^x 2 x y^2 \dx. 4 x y \dx = y^3 (x^2-x_0^2)^2 \\
\int_{x_0}^x \omega D\omega \dots D\omega &= (x^2-x_0^2)^n.
\end{align*}
Therefore we get the first integral
\[
\varphi(x_0;x,y) = y + y^2 (x^2-x_0^2) + y^3 (x^2-x_0^2)^2 + \dots
\]
and the corresponding general solution is
\begin{align*}
y= \varphi(x ; x_0,y_0) & = y_0 + y_0^2 (x_0^2-x^2) + y^3 (x^2_0-x^2)^2 + \dots \\
& = \frac{y_0}{1-y_0(x_0^2-x^2)}.
\end{align*}
\end{exam}
\begin{proof}[Proof of Theorem~\ref{firstintegral}.]
We first verify, that for every fixed $x_0$, the function $\varphi(x_0;x,y)$ is a first integral :
\begin{align*}
d\varphi(x_0;x,y)&=\frac{ \partial}{\partial x} \varphi(x_0;x,y) \dx + \frac{ \partial}{\partial y} \varphi(x_0;x,y) \dy \\
&= \omega + \omega \int_{x_0}^x D \omega + \omega \int_{x_0}^x D \omega D \omega + \omega \int_{x_0}^x D \omega D \omega D \omega + \dots \\
&\qquad\qquad+ \left(1 + \int_{x_0}^x D \omega + \int_{x_0}^x D \omega D \omega + \int_{x_0}^x D \omega D \omega D \omega + \dots\right) \dy \\
&= (\omega + \dy) \frac{ \partial}{\partial y} \varphi(x_0;x,y) = 0.
\end{align*}
As $\varphi(x_0;x_0,y_0) = y_0$ then the level set $\{ (x,y) : \varphi(x_0;x,y) = y_0 \}$ contains both $(x_0,y_0)$ and $(x,y)$. By symmetry
\[
y= \varphi(x;x_0,y_0)
\]
is the solution of~\eqref{ode} with initial condition $y(x_0)=y_0$. The convergency proof is by standard a priori estimates (omitted)
\end{proof}
Note that for fixed $x_0,x_1$ the two return maps
\[
y \mapsto \varphi(x_1;x_0,y), \quad y \mapsto \varphi(x_0;x_1,y)
\]
are mutually inverse. Therefore $\varphi(x_1;x_0,{\cdot\,}) = \id$ if and only if $ \varphi(x_0;x_1,{\cdot\,}) = \id$. Using Theorem~\ref{firstintegral} we can give explicit center conditions. Assume that
\[
f(x,y) = \sum_{i=1}^\infty a_i(x) y^{i+1}
\]
and develop the return map $ \varphi(x_0;x_1,y) $ as a power series in $y$
\begin{equation}\label{returnmap}
\varphi(x_0;x_1,y) = y + \sum_{n=1}^\infty c_n(a) y^{n+1}.
\end{equation}
If we denote, by abuse of notations, $a_{i}= a_{i}(x) \dx $ then we get for the first few coefficients $c_n(a)$ (compare to~\cite[p.~450]{bry10})
\begin{align*}
c_1(a) & = \int_{x_0}^{x_1} a_1 \\
c_2(a) & = \int_{x_0}^{x_1} a_2 + 2 a_1a_1\\
c_3(a) & = \int_{x_0}^{x_1} a_3 + 2 a_2 a_1 + 3 a_1a_2 + 6 a_1^3 \\
c_4(a) & = \int_{x_0}^{x_1} a_4 + 2 a_3 a_1 + 3 a_2^2 + 4 a_1a_3 + 6a_2a_1^2 + 8 a_1a_2a_1 + 12 a_1^2a_2 + 24 a_1^4
\end{align*}
and so on. The general form of the coefficients $c_n(a)$ is found immediately from Theorem~\ref{firstintegral}. We resume this in the following
\begin{theo}[Brudnyi's formula~\cite{brud06}]\label{brf}
The coefficients $c_n(a)$ of the first return map~\eqref{returnmap} for the differential equation
\[
\frac{\dy}{\dx} + \sum_{i=1}^\infty a_i(x) y^{i+1} = 0, x\in [x_0,x_1]
\]
are given by the formulae
\[
c_n(a) = \sum_{i_1+\dots + i_k=n} c_{i_1,\dots,i_k} \int_{x_0}^{x_1} a_{i_1} \cdots a_{i_k}
\]
where
\begin{align*}
c_{i_1} & = 1\\
c_{i_1,i_2} & = i_2+1 \\
c_{i_1,i_2,i_3} & = (i_3+1)(i_2+1)\\
& \;\:\vdots \\
c_{i_1,\dots,i_k} & = (i_k+1)(i_k+i_{k-1} +1) \dots (i_k + \cdots i_2 +1).
\end{align*}
\end{theo}
The above formula was deduced first by Brudnyi~\cite[p.~422]{brud06} under equivalent form, see also~\cite[Proposition~2.4]{bry10} in the case~\eqref{abel}.
\begin{coro}
The equation~\eqref{ode} has a center on the interval $[x_0,x_1]$ if and only if $c_n(a)=0$, for every integer $ n \geq 1$.
\end{coro}
\begin{exam}
Suppose that the equation
\[
\frac{\dy}{\dx} + a_1(x)y^2 + a_2(x)y^3 + \dots = 0
\]
has a center on the interval $[x_0,x_1]$. Then, using as above the notation $a_{i}= a_{i}(x) \dx $ we have
\begin{align*}
c_1&= \int_{x_0}^{x_1} a_1 = 0 \\
c_2& = \int_{x_0}^{x_1} a_2 + 2 \int_{x_0}^{x_1} a_1^2 = 0.
\end{align*}
The identity
\[
2 \int_{x_0}^{x_1} a_1^2 = \left(\int_{x_0}^{x_1} a_1\right)^2
\]
implies then, that $ \int_{x_0}^{x_1} a_2 = 0$.
If we consider more specifically the Abel equation
\begin{equation}\label{abelex}
\frac{\dy}{\dx} + a_1(x)y^2 + a_2(x)y^3 = 0
\end{equation}
then taking into consideration that $\int_{x_0}^{x_1} a_1^3 = 0$ and
\[
\int_{x_0}^{x_1} (a_2 a_1 + 3a_1a_2) = \int_{x_0}^{x_1} a_1 a_2 = 0
\]
we obtain $c_3= \int_{x_0}^{x_1} a_1 a_2 $. Therefore a necessary condition for the Abel equation~\eqref{abelex} to have a center on $[x_0,x_1]$ is
\begin{equation}\label{nc}
\int_{x_0}^{x_1} a_1 = 0,\quad \int_{x_0}^{x_1} a_2 = 0, \quad \int_{x_0}^{x_1} a_1 a_2 = 0
\end{equation}
If we suppose that $a_1$, $a_2$ are polynomials of degree at most two, these conditions are also sufficient. The case $\deg a_1, a_2 = 3$ can be studied similarly, see~\cite{bfy98,bfy99,bfy00}.
\end{exam}
In general, an obvious sufficient condition to have a center is therefore
\begin{equation}\label{ucenter}
\int_{x_0}^{x_1} a_{i_1} \cdots a_{i_k} = 0,\quad \forall i_j, k \geq 1.
\end{equation}
Centers with the property~\eqref{ucenter} were called \emph{universal} in~\cite{brud06}.
Consider, more specifically, the following equation with polynomial coefficients $a_i$
\begin{equation}\label{an}
\dy + \sum_{i=1}^n y^{i+1} a_i(x) \dx = 0,\quad a_i(x) \in K[x].
\end{equation}
\begin{theo}[{\cite[Corollary~1.20]{brud06}}]\label{universal}
The polynomial equation~\eqref{an} has an universal center on the interval $[x_0,x_1]$, if and only if, it is a pull back of some polynomial equation
\begin{equation}\label{bn}
\dy = \left(\sum_{i=1}^n b_i(\xi) y^{i+1}\right) \dd\xi,\quad b_i(\xi) \in K[\xi].
\end{equation}
via a suitable polynomial map $\xi = \xi (x)$ having the property $\xi(x_0)=\xi(x_1)$.
\end{theo}
Not all centers of~\eqref{an} are universal, as discovered recently in~\cite{ggs18}.
\section{Bifurcation functions related to Abel equation and a Theorem of Christopher}\label{section3}
In this section we study the following perturbed Abel differential equation on the interval $[0,1]$
\[
y' = a(x) y^2 - \sum_{j= 1}^\infty \varepsilon^j\left(y^2 p_j(x)+ y^3 q_j(x)\right)
\]
or equivalently
\begin{equation}\label{perturbed1}
\frac{\dy}{y^2} = a(x) \dx - \varepsilon \omega_1 - \varepsilon^2\omega_2 - \dots
\end{equation}
where
\begin{gather*}
\omega_j = (p_j(x)+ y q_j(x)) \dx,\quad
a=a(x),\quad p_j =p_j(x),\quad q_j=q_j(x)
\end{gather*}
are polynomials of degree
\[
\deg a = n, \deg p_j \leq n, \deg q_j \leq n
\]
and $\varepsilon$ is a small parameter, see~\cite{bfy98,bfy99,bfy00,chri00,yomd03}. For\linebreak $\varepsilon = 0$~\eqref{perturbed1} has a first integral
\[
H(x,y)= \frac1y + A(x), \quad A(x) = \int a(x) \dx.
\]
%%A: je rentre un env de type enonce pour l'intitule de type center, cite ci dessous:
%%\begin{center}
%%\emph{How many limit cycles has the perturbed system~\eqref{perturbed1} on the interval $[0,1]$? }
%%\end{center}
\begin{enonce*}[plain]{Question}
How many limit cycles has the perturbed system~\eqref{perturbed1} on the interval $[0,1]$?
\end{enonce*}
%%\vspace{0,5cm}
Recall from the preceding section that a solution $y(x)$ such that $y(0)=y(1)$, is called periodic on $[0,1]$. A limit cycle of~\eqref{perturbed1} on $[0,1]$ is therefore an isolated periodic solution on $[0,1]$.
The number of the limit cycles in a compact set are bounded by the number of the zeros of the so called \emph{bifurcation function}, which we define bellow. A limit cycle which remains bounded when $\varepsilon \to 0$, tends to a periodic solution of the non perturbed system. If the non-perturbed system ($\varepsilon = 0$) has a periodic solution, then necessarily $A(0)=A(1)$, which already implies that it has a center. For this reason we assume from now on that $A(0)=A(1)=0$, so that
\begin{equation*}
\dy = a(x) y^2 \dx \Longleftrightarrow \dH = 0
\end{equation*}
has a center along $0\leq x \leq 1$. The perturbed equation can be written
\begin{equation}\label{perturbed}
\dH - \varepsilon \omega_1 - \varepsilon^2\omega_2 - \dots = 0.
\end{equation}
For a solution $y(x)$, let $\mathcal P_\varepsilon$ be the first return map which sends the initial condition $y_0=y(0)$ to $y_1=y(1)$. We parameterise $\mathcal P_\varepsilon$ by $h=\frac1y = H(0,y)=H(1,y)$ and note that $\mathcal P_\varepsilon$ is analytic both in $h$ and $\varepsilon$ (close to zero). We have therefore for the first return map
%retour à la ligne en trop qui provoque un espace vertical en trop (cf article précédent)
\begin{equation}\label{return1}
\mathcal P_\epsilon (h)- h= \varepsilon^k M_k(h) + O(\varepsilon^{k+1}), \quad M_k \neq 0.
\end{equation}
The function $M_k$ is the\emph{ bifurcation function}, associated to the equation~\eqref{perturbed1}. It is also known as ``first non-zero Melnikov function''. The reader may compare this to~\eqref{first2} which is another representation of the first return map, defined for small $y$. As we shall see, the bifurcation function is globally defined. Therefore for every compact set $K$, $[0,1] \subset K \subset \R^2$ and all sufficiently small $|\varepsilon|$, the number of the limit cycles of~\eqref{perturbed1} in $K$ is bounded by the number of the zeros of the bifurcation function $M_k$ (counted with multiplicity).
$M_k$ allows an integral representation
\[
M_k(h) = \int_{\{H=h\}} \Omega_k
\]
where the integration is along the level set
\[
\{H=h\} = \{(x,y) : 1/y + A(x)= h, 0\leq x \leq 1 \}.
\]
The differential form $\Omega_k$ is computed by the classical Françoise's recursion formula~\cite{fran96,ilie98a,rous98} as follows:
\begin{quotation}
If $k=1$ then $\Omega_1 = \omega_1$, otherwise
\begin{equation}
\Omega_m = \omega_m + \sum_{i+j=m} r_i \omega_j, \quad 2 \leq m \leq k
\end{equation}
and the functions $r_i$, $1\leq i \leq k-1$ are determined successively from the identities $\Omega_i = \dd R_i + r_i \dH$.
\end{quotation}
Note that neither $r_i$ nor $R_i$ are uniquely defined. The integrals $M_i(h)$ are, however, defined unambiguously.
The first order Melnikov function $M_1$ was computed first by Lins Neto~\cite[Section~3]{lins80}, see also~\cite{bfy00}. We have
\begin{align*}
M_1(h) &= \int_{\{H=h\}} \omega_1 \\ &= \int_{\{H=h\}} p_1(x) \dx + yq_1(x) \dx\\
& = \int_0^1 p_1(x) \dx + \int_0^1 \frac{q_1(x)}{h - A(x)} \dx \\
& = \int_0^1 p_1(x) \dx + \sum_{k=0}^\infty h^{-k-1} \int_0^1 q_1(x) A^k(x)\dx.
\end{align*}
$M_1$ vanishes identically if and only if $\int_0^1 p_1(x) \dx = 0$ and
%%question ici de laisser ou non boxed ?Je la laisse ci dessous
%%\[
%%\boxed{
%%\int_0^1 q_1(x) A^k(x) \dx = 0, \; k= 0,1,2, \dots}
%%\]
\[
\int_0^1 q_1(x) A^k(x) \dx = 0, \quad k= 0,1,2, \dots
\]
which is the content of the famous polynomial moment problem for $q_1$ and $A$, solved in full generality by Pakovich and Muzychuk~\cite{pamu09}, see also~\cite{bfy98,bfy99,bfy00,chri00,yomd03}. If $M_1=0$ by the above formula we get
\[
M_2(h)= \int_{\{H=h\}} r_1 \omega_1 + \int_{\{H=h\}} \omega_2
\]
where $r_1$ is computed from the identity $\omega_1= \dR_1 + r_1 \dH$. As $\domega_1 = \dr_1 \wedge \dH$ then $\dr_1 = \omega_1'= \frac{\domega_1}{\dH}$ is the Gelfand--Leray form of $\omega_1$. From the identity $H(x,y(x,h)) \equiv h$ we have $ \frac{\partial y}{\partial h} = - y^2$ and hence
\[
r(x,y)= \int_0^x \omega_1' = - \int_0^x y^2 q(x) \dx.
\]
We conclude
\begin{prop}[{\cite[Formula~(2.8)]{gavr05}}]\label{prop3.1}
Under the hypothesis $M_1=0$ the second Melnikov function is given by the following iterated integral of length two
\begin{equation}
M_2(h) = \int_{\{H=h\}} \omega_1 \omega_1' + \int_{\{H=h\}} \omega_2
\end{equation}
where
\[
\omega_1= p_1(x) \dx + yq_1(x) \dx,\quad \omega_1'= -y^2 q_1(x) \dx,\quad \omega_2= p_2(x) \dx + yq_2(x) \dx.
\]
\end{prop}
The hypothesis $M_1=0$ is of interest for us, as it will allow to compute the tangent space to the center set at the point $(a,0)$, see the next Section~\ref{section4}. For our purposes the polynomial $a(x)$ can be taken in a general position, in which case the polynomial moment problem for $q(x),A(x)$ has the following elegant solution
\begin{theo}[\cite{chri00}]\label{ccth}
Assume that $A,q$ are complex univariate polynomials, such that
\[
A(0)=A(1)=0,\quad A'(0) \neq 0,\quad A'(1) \neq 0.
\]
The multivalued transcendental function
\begin{equation}\label{abelian}
I(h) = \int_0^1 \frac{q(x)}{h - A(x)} \dx
\end{equation}
vanishes identically, if and only if the polynomials $Q = \int q$ and $A$ satisfy the following ``Polynomial Composition Condition'' (PCC):
%% %%A: je rentre un env de type quotation pour l'intitule de type center, cite ci dessous:
%%\begin{center}
%%There exist polynomials $\tilde Q, \tilde A, W$, such that
%%\[
%A= \tilde A \circ W, Q= \tilde Q \circ W, W(0)=W(1).
%%\]
%%\end{center}
\begin{quotation}
\emph{There exist polynomials $\tilde Q, \tilde A, W$, such that}
\[
A= \tilde A \circ W,\quad Q= \tilde Q \circ W,\quad W(0)=W(1).
\]
\end{quotation}
\end{theo}
Before recalling the proof of Christopher, we put $I$ in the broader context of the Picard--Lefschetz theory.
The function $I(h)$ is well defined for sufficiently big $h$, and has an analytic continuation in a complex domain to certain multivalued function. It is in fact an Abelian integral depending on a parameter. More precisely, consider the genus zero affine~curve
\[
\Gamma_h= \left\{(x,y)\in \Cbb^2: \frac 1y +A(x) = h \right\}, \quad A(0)=A(1)=0.
\]
It is a Riemann sphere with $n+2$ removed points, provided that $h \neq 0$. The removed points correspond to $(x=x_i(h), y=\infty)$, where $A(x_i(h))\equiv h$, and to $(x=\infty,y=0)$. Given a divisor $m=P_0+P_1$ on $\Gamma_h$, where
\[
P_0 = \left(0,\frac1h\right), \quad P_1=\left(1,\frac1h\right)
\]
we define a singularized algebraic curve $\Gamma_h^{sing}$, see Figure~\ref{fig2}.
\begin{figure}
\includegraphics[scale=0.5]{figures/figure2.jpg}
%%\includegraphics[width=8cm]{figure2.jpg}
\caption{The singularized algebraic curve $\Gamma_h^{sing}$.}\label{fig2}
\end{figure}%%\ \\*[-1.5em]
As a topological space it is just the curve $\Gamma_h$ with the two points $P_0$ and $P_1$ identified to a point $m$. The structural sheaf of $\Gamma_h^{sing}$ is the same as the structural sheaf of $\Gamma_h$, except at the point $m\in \Gamma_h^{sing}$. A function $f$ on $ \Gamma_h^{sing}$ is said to be regular, if it is regular on $\Gamma_h$, and moreover $f(P_0)=f(P_1)$. The path $[0,1]$ connecting the points $x=0$ and $x=1$ closes on the singular algebraic curve $\Gamma_h^{sing}$. The function $I(h)$ defined in~\eqref{abelian} is an Abelian integral on $\Gamma_h^{sing}$
\[
I(h) = \int_{\delta(h)} y q(x) \dx, \quad y = \frac{1}{h-A(x)}
\]
where $\delta(h) \in H_1(\Gamma_h^{sing},\Z)$ is represented by the closed loop on $\Gamma_h^{sing}$ corresponding to the interval $[0,1]$.
We note that given an arbitrary effective divisor $m=P_0+P_1+ \dots P_k$ on $\Gamma_h$, one constructs in a similar way a singularized curve $\Gamma_h^{sing}$, which is the natural framework of the generalized center problem for the Abel equation, see~\cite[Conjecture~1.7]{bfy99} and~\cite{bry10}.
\begin{proof}[Proof of Theorem~\ref{ccth}]
The homology group $H_1(\Gamma_h^{sing}, \Z)$ is of dimension $n+2$. It is generated by $n+1$ simple closed loops $\gamma_i= \gamma_i(h)$ which make one turn around the $n+1$ punctures $x_i(h)$ on $\Gamma_h$, $A(x_i(h))-h=0$, as well the loop $\delta(h)$ connecting $0$ and $1$ on the singularized curve $ \Gamma_h^{sing}$. The monodromy of the loop $\delta(h)$ is shown on the Figure~\ref{fig1}.
\begin{figure}[!h]
\includegraphics[scale=0.5]{figures/figure1.pdf}
%%\includegraphics[width=12cm]{figure1}
\caption{The monodromy of $x_i(h), x_j(h)$ and the loop $\delta(h)$. }\label{fig1}
\end{figure}
As the integral $I(h)$ is constant, it follows that
\[
\int_{\gamma_i(h) - \gamma_j(h)} yq(x) \dx \equiv 0
\]
where $A(x_i(0))=A(x_j(0)) = 0$ and $ x_i(0) = 0$, $x_j(0) = 1$. We have also
\begin{align*}
\int_{\gamma_i(h) - \gamma_j(h)} yq(x) \dx &= \int_{\gamma_i(h) - \gamma_j(h)} \frac{q(x)}{h-A(x)} \dx \\
&= -2\pi i \left(\frac{q(x_i(h))}{A'(x_i(h))} - \frac{q(x_j(h))}{A'(x_j(h))}\right)\\
&= -2\pi i \big[q(x_i(h))x_i'(h) - q(x_j(h)) x_j'(x)\big] \\
&= -2\pi i \frac{\dd}{\ddh} \big[Q(x_i(h)) - Q(x_j(h))\big]\\
\intertext{where}
Q(x) &= \int q(x)\dx
\end{align*}
is a primitive of $q$, and $x_i(h)$ are the roots of the polynomial $A(x)-h$ (we used that $A'(x_i(h)) x_i'(h) \equiv 1$). We denote,
\begin{equation}
J(h) = \int_{x_i(h)-x_j(h)} Q = Q(x_i(h)) - Q(x_j(h))
\end{equation}
and call $J$ an Abelian integral of dimension zero along the zero-cycle
\[
x_i(h)-x_j(h) \in H_0(\{A(x)=h \}, \Z)
\]
(\cite[Definition~1]{gamo07}). If the Abelian integral $I(h)$ vanishes identically, then the same holds true for $J'(h)$, hence $J(h)= \const$ and it is easy to check that the constant is zero, $J(h) \equiv 0$.
The set of rational functions $Q$ such that $ Q(x_i(h)) \equiv Q(x_j(h)$ is a subfield of the field of all rational functions $\Cbb(x)$. By the Lüroth theorem this subfield is of the form $\Cbb(W)$ for suitable rational function $W$. It follows that $Q= \tilde Q \circ W, A = \tilde A \circ W$ where $\tilde Q, \tilde A, W$ are rational functions. As $Q$ is a polynomial, then $Q^{-1}(\infty)= \{ \infty \}$ which implies that $\tilde Q^{-1}(\infty) = \{p\}$ for some $p\in \Pbb^1$ and also $W^{-1}(p) = \{\infty\}$, and similarly $\tilde A^{-1}(\infty) = \{p\}$ Let $\varphi$ be a Mobius functions such that $\varphi(p)=\infty$. Then the functions
\[
\tilde Q \circ \varphi^{-1}, \varphi \circ W, \tilde A \circ \varphi^{-1}
\]
are polynomials. For this reason we may suppose that $\tilde Q, \tilde A, W$ are polynomials. If $W(x_i(h))\equiv W(x_j(h)) $, then clearly $W(0) = W(1)$ and the Theorem~\ref{ccth} is proved. If $W(x_i(h))\not\equiv W(x_j(h)) $, then we denote $ w_i(h) = W(x_i(h)), w_j(h) = W(x_j(h)) $ and note that
\[
\tilde A(w_i(h)) \equiv A(w_j(h)) \equiv h, \tilde Q(w_i(h)) = \tilde Q(w_j(h)).
\]
We may reason then by induction on $\tilde A, \tilde Q$ which have a smaller degree than $A, Q$ respectively. Thus this process must stop and we get $W$ with $W(x_i(h))\equiv W(x_j(h)) $, and hence $W(0)=W(1)$.
\end{proof}
\section{Irreducible components of the Center set}\label{section4}
An affine algebraic variety $V$ in $\Cbb^n$ is the common zero locus of a finite collection of polynomials $f_i \in \Cbb[z_1,\dots,z_n]$. The variety $V$ is said to be irreducible, if for any pair of closed varieties $V_1,V_2$ such that $V= V_1\cup V_2$, either $V_1=V$ or $V_2=V$. Of course, it might happen that a variety $V$ is reducible $V= V_1\cup V_2$, where $V_1,V_2\neq V$. In this case we may ask whether $V_1$ and $V_2$ are further reducible and so on. It is a basic fact of commutative algebra that in this way only a finitely many irreducible subvarieties $V_i\subset V$ can be found, and more precisely:
\begin{quote}
\emph{Any variety $V$ can be uniquely expressed as a finite union of irreducible varieties $V_i$ with $V_i \subsetneqq V_j$ for $i\neq j$, e.g.~\cite{harr95}}.
\end{quote}
The varieties $V_i$ which appear in the finite decomposition
\[
V = \cup_i V_i
\]
are the \emph{irreducible components} of $V$.
Let $W \subset V$ be another algebraic variety. Is $W$ an irreducible component of $V$? It is usually easy to verify, whether $W$ is irreducible. It is much harder to check that $W$ is an irreducible component of $V$. Indeed, it might happen that $W \subsetneqq V_i$ where $V_i$ is an irreducible component of $V$. To verify this, one may compare the dimensions of the tangent spaces $T_x W$ and $T_x V$ at some smooth point $x\in V\cap W$ (one point $x$ is enough!). Then \emph{$W \subsetneqq V_i$ if and only if $T_x W \subsetneqq T_x V$}. Of course, there might be no way to know that $x$ is a smooth point, in which case we use the tangent cones $TC_x W$ and $TC_x V$. For every $x\in W$ on an irreducible variety $W$ holds $\dim TC_x W = \dim W$. Thus, for irreducible varieties $W \subset V$ holds
\[
\dim TC_x W < \dim TC_x \Longleftrightarrow W \subsetneqq V.
\]
The choice of $x \in W$ is irrelevant, which allows a great flexibility.
The above observation will be applied in the case when $V$ is the center set of the equation~\eqref{ode}, and $W$ is a subset of equations with a center. In the planar case~\eqref{system} this approach was developed by Movasati~\cite{mova04}. He observed that the vanishing of the first Melnikov function, related to one-parameter deformations (arcs) of systems~\eqref{system} with a center, provides equations for the tangent space $T_x W$, while the vanishing of the second Melnikov function provides equations for the tangent cone $TC_x W$. This remarkable connection between algebraic geometry and dynamics will allow us to go farther in the description of irreducible components of the center set. We adapt the approach of Movasati~\cite{mova04} and Zare~\cite{zare19} to~\eqref{ode} in the context of the set $\mathcal A_n$ of Abel differential equations
\begin{align}\label{abel2}
\frac{\dy}{\dx} = a(x) y^2+ b(x)y^3
\end{align}
parameterised by the polynomials $a(x), b(x)$ of degree at most $n$. They form therefore a vector space of dimension~$2n+2$, and consider the subset $\mathcal C_n\subset \mathcal A_n$ of Abel differential equations having a center on the interval $0\leq x\leq 1$. As we saw in the preceding section, $\mathcal C_n$ is defined by finitely many polynomial relations $c_n(a,b) = 0$ and therefore is an algebraic set.
\subsection{Universal centers of the Abel equation define irreducible components of the center set}\label{section41}
If the integer $k>1$ divides $n+1$, then we denote by $\mathcal U_{n/k}\subset \mathcal C_n\subset \mathcal A_n$ the algebraic closure of the set of pairs of polynomials $(a,b)$ (or Abel equations~\eqref{abel2}), such that the following Polynomial Composition Condition (PCC) is satisfied
\emph{There exist polynomials $\tilde A, \tilde B, W$ of degrees $(n+1)/k$, $(n+1)/k$, $k$, such that}
\begin{equation}\label{PCC}
\tag{PCC} A= \tilde A \circ W,\quad B= \tilde B \circ W,\quad W(0)=W(1).
\end{equation}
The differential form associated to~\eqref{abel2}
\[
\dy - \left(a(x) y^2+ b(x)y^3\right) \dx = \dy - y^2 \dA(x) - y^3\dB(x)
\]
is a pull back of the differential form
\begin{equation}\label{pback}
\dy- \left(\tilde{A}'(w) y^2 + \tilde{B}'(w) y^3\right) \dw = \dy - y^2 \dd \tilde{A}(w) - y^3 \dd \tilde{B}(w)
\end{equation}
under the map $(x,y) \to (w,y)$, where $w=W(x)$. In other words the equation~\eqref{abel2} is obtained from
\[
\frac{\dy}{\dw} = \tilde{A}'(w) y^2 + \tilde{B}'(w) y^3
\]
via the substitution $w=W(x)$. This, combined to $W(0)=W(1)$ implies that the set of Abel equations $\mathcal U_{n/k}$ have a center at $y=0$ along $[0,1]$. Of course one could check directly that the center conditions $c_n(a)=0$ are satisfied for all $n$ (Theorem~\ref{brf}). Indeed, the iterated integrals $
\int_{x_0}^{x_1} a_{i_1} \cdots a_{i_k} $ vanish, because they are pull backs under $W$ of iterated integrals along an interval, contractible to the point $W(x_0)=W(x_1)$. Following Brudnuyi~\cite{brud06}, we say that~\eqref{abel2} determines an \emph{universal center} if and only if
\[
\int_{x_0}^{x_1} a_{i_1} \cdots a_{i_k} = 0, \quad \forall i_j \in \N.
\]
It is shown then that a center is universal, if and only if the corresponding equation~\eqref{abel2} is a pull back under an appropriate polynomial as above, see Brudnyi~\cite[Corollary~1.20]{brud06}. Thus, the universal centers are exactly those, obtained by a polynomial pull back in the sense~\eqref{pback}, see the Polynomial Composition Condition~\eqref{PCC}.
Note that the universal center set $\mathcal U_{n/k}$ is an irreducible algebraic variety, as a Zariski open subset of it is parametrized by the polynomials $\tilde A, \tilde B, W$ of degrees respectively $(n+1)/k, (n+1)/k, k$. The main result of the section is
\begin{theo}\label{pullback}
The algebraic sets $\mathcal U_{n/k}$ are irreducible components of the center set $\mathcal C_n$ of the Abel equation
\[
\frac{\dy}{\dx} = a(x) y^2+ b(x)y^3,\quad \deg a,\, \deg b \leq n.
\]
\end{theo}
We shall illustrate first the idea of the proof of Theorem~\ref{pullback} on the rather elementary case $k=n+1$. The closure of the universal center set $ \mathcal U_{n/n+1} $ consists of Abel equations~\eqref{abel2} such that
\[
\deg a,\, \deg b \leq n, \quad \int_0^1 a(x)\dx = \int_0^1 b(x)\dx = 0
\]
and moreover the polynomials $a(x), b(x)$ are co-linear. Thus, $ \mathcal U_{n/n+1} $ is identified to the vector space of pairs of polynomials $(a(x),b(x))$ with the above properties, and is therefore of dimension $n+1$. Consider now the point $(a(x),0) \in U_{n/n+1} $ where $a(x)$ is a degree $n$ polynomial.
\begin{prop}
The tangent space $T_{(a,0)} \mathcal U_{n/n+1} $ is a vector space of dimension $n+1$, which consists of pairs of polynomials $(p,q)$ of degree at most $n$, such that $q$ and $a$ are co-linear polynomials, and $\int_0^1 p(x)\dx = 0$.
\end{prop}
The proof is left to the reader. Next, we compute the tangent cone $TC_{(a,0)} \mathcal C_n $ at $(a,0)$ to the center set $\mathcal C_n$. To avoid complications, we choose $a$ to be a non-composite polynomial.
\begin{prop}\label{tangentc1}
Lat $a$ be a non-composite polynomial of degree $n$, such that $a(0)\neq 0, a(1)\neq 0$. Then
\[
TC_{(a,0)} \mathcal C_n = T_{(a,0)} \mathcal U_{n/n+1}.
\]
\end{prop}
The above implies that \emph{the algebraic set $\mathcal U_{n/n+1} $ is an irreducible component of the center set $\mathcal C_n$.}
\begin{proof}[Proof of Proposition~\ref{tangentc1}] Consider a one-parameter deformation
\begin{align}
\varepsilon \to (a- \varepsilon p + \dots, - \varepsilon q + \dots)
\end{align}
of~\eqref{abel2} at the point $(a,0)$. For $\varepsilon=0$ the equation is
\[
\frac{\dy}{y^2} = a(x) \dx
\]
and has a first integral $H(x,y)= \frac1y + A(x)$ where $A$ is a primitive of $a$, $A(0)=A(1)$. The perturbed equation is
\[
\dH - \varepsilon [p(x) + yq(x)] \dx + \dots = 0.
\]
We parameterize the cross-sections $\{x=0\}$, $\{ x= 1 \}$ by $h= H(0,y)=H(1,y)=1/y $ and write for the return map $\varphi_\varepsilon$
\[
\varphi_\varepsilon (h) = h + \varepsilon M_1(h) + O(\varepsilon^2).
\]
The Melnikov function $M_1$, according to Section~\ref{section3}, is computed to be
\[
M_1(h) = \int_{\{H=h\}} p(x) \dx + yq(x) \dx = \int_0^1 p(x) \dx + \int_0^1 \frac{q(x)}{h - A(x)} \dx.
\]
Assuming that for all sufficiently small $\varepsilon$ the deformed Abel equation belongs to the center set $\mathcal C_n$, implies $M_1=0$, which on its turn imposes rather severe conditions on the polynomials $p, q$. First, $\int_0^1 p(x) \dx = 0$ as follows already from~\eqref{nc}. The second~condition
\[
\int_0^1 \frac{q(x)}{h - A(x)} \dx \equiv 0
\]
is well studied in a number of articles, and is known as the \emph{polynomial moment problem}, e.g.~\cite{bry10} and the references there. For the case of a general $A$, see the Addendum by Pakovich in~\cite{yomd03}. As $a(0)\neq 0, a(1)\neq 0$ then by Theorem~\ref{ccth} we have that $\int_0^1 \frac{q(x)}{h - A(x)} \dx \equiv 0$ if and only if the composition condition holds true. As $A$ is supposed to be prime, this means that $A$ and $Q=\int q$ are co-linear polynomials. This completes the proof of Proposition~\ref{tangentc1} and Theorem~\ref{pullback} in the case $k=n$.
\end{proof}
Note that in full generality, a vector $(p,q) $ which belongs to the tangent cone is a vector, such that there is a one-parameter deformation
\[
\varepsilon \to (a+ \varepsilon^k p + \dots, \varepsilon^k q + \dots)
\]
at the point $(a,0)$ which belongs to the center set $\mathcal C_n$. The same arguments give the same constraints to the vector $(p,q)$.
\begin{proof}[Proof of Theorem~\ref{pullback} in the general case]
Assume that the integer $k>1$ divides $n+1$ and consider the algebraic set $\mathcal U_{n/k}$ of Abel differential equations, at $y=0$ along $[0,1]$. The proof follows the same lines as the case $k=n$, with the notable difference that the second Melnikov function $M_2$ will be needed.
We compute first the tangent space to $\mathcal U_{n/k}$ at a general point $(a,0)$. Consider for this purpose the one-parameter deformation
\begin{equation}\label{abelfoliationeps}
\mathcal F_\varepsilon : \frac{\dy}{y^2} = a(x) \dx - \varepsilon \omega_1 - \varepsilon \omega_2 - \dots
\end{equation}
where
\[
\omega_i= p_i(x) \dx + y q_i(x) \dx
\]
are polynomial one-forms, $\deg p_i \leq n$, $\deg q_i \leq n$. As before we denote
\[
A = \int a,\quad P_i = \int p_i,\quad Q_i = \int q_i
\]
where
\[
A(x)= \tilde{A}(W(x)),\quad W(0)=W(1),\quad P_i(0)=P_i(1),\quad Q_i(0)=Q_i(1).
\]
The point $(a,0)$ belongs to $\mathcal U_{n/k} $ if and only if $A= \tilde{A} \circ W$ for some degree $k$ polynomial $W$.
\goodbreak
\begin{prop}\label{cone}
The tangent space $T_{(a,0) }\mathcal U_{n/k} $ is the vector space of polynomials $(p_1,q_1)$ such that
\[
P_1(x) = \tilde P_1 \circ W(x) + R(x). \tilde A' (W(x)), \quad Q_1(x) = \tilde Q_1 (W(x))
\]
where $\tilde P_1, \tilde Q_1 $ are arbitrary polynomials of degree at most $(n+1)/k$ and $R= R(x)$ is any degree $k$ polynomial, such that $R(0)=R(1)$.
\end{prop}
The proof is straightforward, it suffices to consider the first order approximation in $\varepsilon$ of the general deformation
\[
\omega_1^\varepsilon = \dd\big[(\tilde{A} + \varepsilon \tilde P) \circ (W+ \varepsilon R)(x)\big] + \varepsilon y \dd \big[\tilde Q \circ (W+ \varepsilon R) (x)\big]
\]
of $\omega_1^0= a \dx.$
Next, we study the tangent cone $TC_{(a,0)} \mathcal C_n$. We need to compare the affine varieties $T_{(a,0) }\mathcal U_{n/k} \subset TC_{(a,0)} \mathcal C_n$.
\begin{prop}\label{prop4.5}
In a sufficiently small neighbourhood of every general point $(p,q)\in T_{(a,0) }\mathcal U_{n/k}$ the tangent cones $ TC_{(a,0)} \mathcal C_n$ and $T_{(a,0) }\mathcal U_{n/k}$ coincide.
\end{prop}
The above Proposition~\ref{prop4.5} shows that there is no irreducible component of $ TC_{(a,0)} \mathcal C_n$ which contains an irreducible component of $T_{(a,0) }\mathcal U_{n/k}$ of strictly smaller dimension. This would imply Theorem~\ref{pullback}.
The first Melnikov function, as in the case $k=n$, is $M_1= \int_0^1 p_1\dx + yq_1 \dy$. By Christopher's theorem $M_1=0$ implies that $q_1$ satisfies the composition condition
\[
Q_1(x)= \tilde Q_1 (W(x)).
\]
Additional obstructions on the form of $p_1$ will be found by inspecting the second Melnikov function $M_2$. Under the condition that $M_1=0$ we find~\cite[Formula~(2.8)]{gavr05}
\[
M_2 = \int_0^1 \omega_1 \omega'_1 + \int_0^1 \omega_2
\]
where the derivative $'$ is with respect to the parameter $h$. The identity $h= A(x)+ \frac1y$ shows that $y'=-y^2$ and $\omega_1'= - y^2 \dx$, it is clearly a covariant derivative in a cohomology bundle (although we do not need this interpretation here). Therefore, for the iterated integral of length two we find
\begin{align*}
\int_0^1 \omega \omega' & = - \int_{\{H=h\}} (p_1 \dx+q_1 y \dx)(y^2 q_1 \dx) \\
& = - \int_{\{H=h\}} (p_1 \dx)(y^2 q_1 \dx) \\
&= \int_{\{H=h\}} y^2 q_1 P_1 \dx
\end{align*}
where $P_1$ is a primitive of $p_1$. Indeed, $M_1=0$ implies the composition condition for $Q_1=\int q_1$ and $A$, that is to say the integral $\int_{\{H=h\}} y q_1 \dx$ vanishes as a pull back. The same then holds true for its derivative $\int_{\{H=h\}} y^2 q_1 \dx$ as well for the iterated integral $\int_{\{H=h\}} (y q_1 \dx))(y^2 q_1 \dx) $. Further, by the shuffle relation for iterated integrals
\[
\int_{\{H=h\}} (p_1 \dx)(y^2 q_1 \dx) + \int_{\{H=h\}} (y^2 q_1 \dx) (p_1 \dx) = \int_{\{H=h\}} p_1 \dx \int_{\{H=h\}} y^2 q_1 \dx = 0.
\]
Further, for $\int_0^1 \omega_2$ we find
\begin{align*}
\int_0^1 \omega_2 = \int_0^1 (p_2+ y q_2) \dx &= \int_0^1 \frac{\dQ_2}{h-A(x)} \\
& = - \int_0^1 \frac{Q_2 dA}{(h-A)^2} + \frac{Q_2}{h-A} |_0^1 \\
& = -\int_0^1 y^2 Q_2 a \dx,
\end{align*}
so that under the condition $M_1=0$ implies
\[
M_2(h)= \int_{\{H=h\}} y^2 q_1 P_1 \dx - y^2 Q_2 a \dx = \int_{0}^1 \frac{q_1(x) P_1(x) - Q_2(x) a(x)}{(h-A(x))^2} \dx.
\]
We apply Christopher's theorem to $M_2$ and conclude that the primitive of the polynomial $q_1(x) P_1(x) - Q_2(x) a(x)$ is a composite polynomial, it can be expressed as a polynomial function in $W(x)$, and therefore
\[
q_1(x) P_1(x) - Q_2(x) a(x) = P(W(x)) W'(x)
\]
or equivalently
\[
\tilde{Q}_1'(W(x)) P_1(x) - Q_2(x) \tilde{A}'(W(x)) = R_1(W(x))
\]
for certain polynomial $R_1$. Assuming that $\tilde{Q}_1'$ and $\tilde{A}'$ are mutually prime, there exist polynomials $R_2, R_3$ such that
\[
\tilde{Q}_1'(W) R_2(W) - \tilde{A}'(W) R_3(W) = R_1(W)
\]
so
\[
\tilde{Q}_1'(W(x)) (P_1(x) -R_2(W(x)))- (Q_2(x)- R_3(W(x))) \tilde{A}'(W(x)) = 0.
\]
This implies finally that $ \tilde{A}'(W(x))$ divides $P_1(x) -R_2(W(x))$ and
\[
P_1(x) = R_2(W(x)) + R(x) \tilde{A}'(W(x)).
\]
Proposition~\ref{cone}, and hence Theorem~\ref{pullback} is proved.
\end{proof}
\subsection{The center set of plane quadratic vector fields}\label{section42}
Let $ \mathcal A_n$ be here the set of all polynomial vector fields of degree at most $n$. The only (non-trivial) case in which the center set $\mathcal C_n \subset \mathcal A_n$ is completely known is the quadratic one, $n=2$. For comprehensive description and historical comments concerning the center-focus problem in the quadratic case see Zoladek~\cite{zola94}. To the plane quadratic vector field~\eqref{system} we associate a foliation $\mathcal F_\omega = \{\omega = 0\} $ on $\Cbb^2$, defined by the polynomial one-form
\[
\omega = P(x,y) \dy - Q(x,y) \dx.
\]
The leaves of the foliation are the orbits of the plane vector field~\eqref{system}, and the restriction of the one-form $\omega$ on the leaves of $\mathcal F_\omega$ vanishes identically.
In this section we assume that the polynomials $P,Q$ are of degree at most two, and the system has a center. As the foliation is over $\Cbb$ we must be more careful in the definition. We shall say that a singular point is a center, if the point is non-degenerate, and has a local holomorphic first integral with a Morse critical point. Thus, in a neighbourhood of such a point, and up to a complex affine change of the variables, the system can be written in the form
\begin{equation}
\dot{x} = x + P_2(x,y), \quad
\dot{y} = - y + Q_2(x,y)
\end{equation}
for some homogeneous polynomials $P_2, Q_2$. The following classical result is implicit in Zoladek~\cite[Theorem~1]{zola94}
\begin{theo}\label{th6}
The center set $\mathcal C_2$ of plane polynomial quadratic systems with a Morse center has four irreducible components.
\end{theo}
The above claim is a modern interpretation of the Dulac's classification~\cite{dul08} of such Morse centers in a complex domain, see Lins Neto~\cite[Theorem~1.1]{lins14}. Indeed, it is easier to decide that a given variety is irreducible, than to decide that it is an irreducible component of some algebraic set. Sketch of the proof of Theorem~\ref{th6} can be found in~\cite[Appendix]{gavr16}.
To describe explicitly the four components of the center variety $\mathcal C_2$, recall that the foliation $\mathcal F_\omega$, respectively the vector field~\eqref{system}, is said to be logarithmic, if
\begin{equation}\label{log}
P(x,y) \dy - Q(x,y) \dx = f_1 \dots f_k \sum_{i=1}^k \lambda_i \frac{\df_i}{f_i},\quad f_i \in K[x,y],\quad \lambda_i \in K
\end{equation}
for suitable polynomials $f_i$ and exponents $\lambda_i$. As
\[
\sum_{i=1}^k \lambda_i \frac{\df_i}{f_i} = \dd \log \prod_{i=1}^k f_i^{\lambda_i}
\]
then the logarithmic foliation $\mathcal F_\omega$ has a first integral of Darboux type
\[
\prod_{i=1}^k f_i^{\lambda_i}.
\]
Let $\mathcal L (d_1,d_2, \dots,d_k)$ denotes the set of such logarithmic foliations (or plane vector fields) with
\[
\deg f_1 \leq d_1, \deg f_2 \leq d_2, \dots, \deg f_k \leq d_k.
\]
For generic polynomials $f_i$ of degree $d_i$ the degree of the associated vector field is $\sum d_i-1$. Therefore $\mathcal L (d_1,d_2, \dots,d_k)$ is quadratic, provided that $d_1=3$ or $d_1=1$, $d_2=2$ or $d_1=d_2=d_3=1$. This defines three large irreducible components of the center set $\mathcal C_2$ of quadratic systems with a Morse center, $\mathcal L(3), \mathcal L(1,2), \mathcal L(1,1)$ respectively. We have, however, one more irreducible component of $\mathcal C_2$ which is
\[
\mathcal Q_4 = \mathcal L(2,3) \cup \mathcal A_2.
\]
Here $\mathcal L(2,3)$ is the set of polynomial foliations as above, with a first integral $f_2^3/f_3^2$ where $\deg f_2 = 2, \deg f_3 = 3$. Generically such a foliation is of degree four, but it happens that its intersection $\mathcal Q_4$ with the space $\mathcal A_2$ of quadratic foliations is non empty and it is an irreducible algebraic set. The notation $\mathcal Q_4$ is introduced by Zoladek~\cite{zola94}, the index $4$ indicates the co-dimension of the set in the space of quadratic vector fields $\mathcal A_2$.
The exceptional set $\mathcal Q_4$ might look not quite explicit, we investigate it in details below.
The space $\mathcal A_n$ of polynomial vector fields of degree at most $n$ are identified to a vector space of dimension $(n+1)(n+2)$. On $\mathcal A_n$ acts the affine group $\Aff_2$ of affine transformations of $K^2$ (as usual $K=\R$ or $K=\Cbb$), as well the multiplicative group $K^*$ corresponding to ``change of time'', $\dim \Aff_2 \times K^* = 7$. Therefore the dimension of the orbit of a general polynomial vector field is $7$. For this reason it is expected that the minimal dimension of a component of the center set $\mathcal C_n$ is also $7$. Such components, if exist, will be in a sense exceptional.
In the quadratic case $n=2$ the dimension of the four components of $\mathcal C_2$ are easily found. For instance, in the case $\mathcal L(1,1,1)\subset \mathcal A_2$, and up to an affine changes of variables and time, one may suppose that the first integral is in the form $xy^\lambda (1-x-y)^\mu$. Therefore the dimension of $\mathcal{L}(1,1,1)$ is $2+7=9$ and the codimension is $3=12-9$. We find similarly that $\dim \mathcal L(2,1) = \dim \mathcal L(3) = 9$.
We describe now the last component $\mathcal Q_4$. Let $[x:y:z] $ be homogeneous coordinates in $\Pbb^2$
\begin{align}\label{pol2}
P_2(x,y,z)&= a_2(x,y) + a_1(x,y) z + a_0(x,y) z^2\\
P_3(x,y,z)& = b_3(x,y) +b_2(x,y) z + b_1(x,y)z^2 + b_0(x,y) z^3\label{pol3}
\end{align}
be homogeneous polynomials in $x,y,z$ of degree~$2$ and $3$. The function
\[
H=P_2^3/P_3^2
\]
is therefore rational on $\Pbb^2$ and induces a foliation on $\Pbb^2$
\begin{equation}\label{p2}
3 P_3(x,y,z) \dP_2(x,y,z) -2 P_2(x,y,z) \dP_3(x,y,z) =0.
\end{equation}
The corresponding affine foliation on the chart $\Cbb^2$ defined by $z=1$
\begin{equation}\label{aff2}
3 P_3(x,y,1) \dP_2(x,y,1) -2 P_2(x,y,1) \dP_3(x,y,1) =0
\end{equation}
is of degree $ 4$. We may obtain a plane polynomial foliation of degree $ 2$ by imposing the following additional conditions.
Suppose first, that the infinite line $\{z=0\}$ is invariant, that is to say (up to affine change)
\begin{equation}
H(x:y:1)= \frac{a_2(x,y)^3}{ b_3(x,y)^2} = 1.
\end{equation}
This condition can be written as
\[
P_2(x,y,z)^3=P_3(x,y,z)^2 + O(z).
\]
The foliation~\eqref{p2} takes the form
\[
z\big[P(x,y,z)\dx + Q(x,y,z)\dy\big] + R(x,y,z) \dz = 0
\]
where $\deg P, \deg Q \leq 3$, so~\eqref{aff2} is of degree $ 3$. If we further suppose that $z$ divides the homogeneous one form $3P_3\dP_2-2P_2\dP_3$ then~\eqref{p2} takes the form
\[
z^2\big[P(x,y,z)\dx + Q(x,y,z)\dy\big] + zR(x,y,z) \dz = 0
\]
where $\deg P, \deg Q \leq 2$, so~\eqref{aff2} is a plane quadratic foliation. The condition that $z^2$ divides $2P_3\dP_2-3P_2\dP_3$ can be written as
%\goodbreak %pas top de couper juste avant une équation
\[
P_2(x,y,z)^3=P_3(x,y,z)^2 + O(z^2)
\]
or equivalently
\begin{align}
a_2(x,y)^3 &= b_3(x,y)^2 \label{eqn1}\\
3 a_2(x,y)^2 a_1(x,y) &= 2 b_3(x,y) b_2(x,y).\label{eqn2}
\end{align}
These polynomial relations can be further simplified by affine changes of the variables $x,y$. First, \eqref{eqn1} implies that $a_2$ is a square of a linear function in $x,y$ which we may suppose equal to $x$, that is to say
\[
a_2(x,y)= x^2, \; b_3(x,y) = x^3.
\]
The second condition~\eqref{eqn2} becomes $3x a_1= 2 b_2$ where we may put $a_1=2 y$, and hence
\[
a_1(x,y) = 2y,\; b_2(x,y) = 3 xy.
\]
It is seen that the polynomial $P_3(x,y,1)$ has a real critical point which we can put at the origin, so we shall also suppose that $b_1=0$. Using finally a ``change of time'' (the action of $K^*$) we assume that $b_0=1$ while $a_0= \alpha \in K$ is a free parameter (modulus). The first integral takes therefore the form
\begin{equation}\label{aps2}
H_\alpha(x,y) = \frac{ (x^2 + 2 y + \alpha)^3}{(x^3 + 3xy + 1)^2}
\end{equation}
with induced quadratic foliation
\begin{equation}\label{aps3}
(- \alpha x^{2} - 2 y^{2} - \alpha y + x) \dx + (x y - \alpha x + 1)\dy.
\end{equation}
This is the exceptional co-dimension four component of $Q_4$.
The reader may check that the corresponding vector field
\[
x'= x y - \alpha x + 1, \quad y' = \alpha x^{2} +2 y^{2} + \alpha y - x
\]
has a Morse center at $x=1/\alpha, y=0$ which is moreover a usual real center for $\alpha \in\linebreak(\-1,0)$. The above computation is suggested by~\cite{lins14} where, however, the modulus $\alpha$ is wrongly fixed equal to $\alpha=\infty$). The foliation on $\Pbb^2$ corresponding to
\[
H_\infty(x,y) = \frac{ (x^2 + 2 y + 1)^3}{(x^3 + 3xy)^2}
\]
has two invariant lines $\{x=0\}$ and $\{z=0\}$, in contrast to the general foliation defined by $dH_\alpha(x,y) = 0$ which has only one invariant line $\{z=0\}$. We resume the above as follows
\begin{prop}
Every polynomial vector field having a rational first integral of the form
\[
H(x,y)= \frac{\big(a_0(x,y) + a_1(x,y) + a_2(x,y)\big)^3}{\big(b_0(x,y)+ b_1(x,y) + b_2(x,y) + b_3(x,y)\big)^2}
\]
where the homogeneous polynomials $a_i, b_j$ of degrees $0\leq i \leq 2$, $0\leq j \leq 3$ are subject to the relations
\begin{align*}
a_2(x,y)^3 &= b_3(x,y)^2 \\
3 a_2(x,y)^2 a_1(x,y) &= 2 b_3(x,y) b_2(x,y)
\end{align*}
is of degree two. The set of such quadratic vector fields form the irreducible component $\mathcal Q_4$ of the center set $\mathcal C_2$. Up to an affine change of the variables $x,y$ the polynomial $H$ can be assumed in the form $H(x,y) = \frac{ (x^2 + 2 y + \alpha)^3}{(x^3 + 3xy + 1)^2}$ where $\alpha$ is a parameter.
\end{prop}
We conclude this section with the following remarkable property of $\mathcal Q_4$. One may check that general rational function of the form $H(x,y)=P_2^3/P_3^2$, where $P_2,P_3$ are bi-variate polynomials of degree two and three, defines a pencil of genus four curves $\Gamma_t = \{(x,y):H(x,y)=t\}$ on $\Cbb^2$. However, the special rational function $ H_\alpha$~\eqref{aps2} defines an elliptic pencil, that is to say the level sets
\[
\Gamma_t = \left\{(x,y) \in \Cbb^2 : H_\alpha(x,y)= t \right\}
\]
are genus one curves, see~\cite{gail09}.
\subsection{The center set of the polynomial Liénard equation}\label{section4b}
Consider the following polynomial Liénard equation
\begin{equation}\label{lie1}
\dot{x} = y, \quad \dot{y} = -q(x) - y p(x)
\end{equation}
in which the origin $(0,0)$ is an isolated singular point. The following is an obvious necessary and sufficient condition the equation~\eqref{lie1} to have a \emph{linear} center
\[
q(0)=p(0)=0,\quad q'(0) >0.
\]
The description of the non-degenerate centers of~\eqref{lie1} is due to Cherkas~\cite{cher72} in the real analytic case, and to Christopher~\cite{chri99} in the polynomial case.
Consider the following Polynomial Composition Condition (PCC)
\begin{quotation}
\emph{There exist polynomials $\tilde P, \tilde Q, W$ such that
\begin{equation}\label{eqPCC}
\tag{PCC}
P= \tilde P\circ W, \quad Q= \tilde Q \circ W
\end{equation}
where $P'(x)=p(x), Q'(x)=q(x)$.}
\end{quotation}
The Theorem of Cherkas and Christopher can be formulated as follows
\begin{theo}\label{th2a}
The real polynomial Liénard equation~\eqref{lie1} has a non-dege\-nerate real center at the origin in $\R^2$, if and only if
\[
p(0)=q(0)=0,\quad q'(0)>0
\]
and the polynomials $p, q$ satisfy the above Polynomial Composition Condition, where the real polynomial $W$ has a Morse critical point at the origin.
\end{theo}
The proof of Theorem~\ref{th2a}, see~\cite{chri99,chri07}, is based on the following simple observation due to Cherkas~\cite[Lemma~1]{cher72}
\begin{lemm}
The real analytic equation
\begin{equation}\label{cher}
\dot{x} = y, \quad \dot{y} = -x+ y \sum_{i=1}^\infty a_i x^i
\end{equation}
has a center at the origin, if and only if $a_{2j}= 0$, $\forall j\geq 1$.
\end{lemm}
Indeed, the truncated equation
\begin{equation}\label{truncated}
\dot{x} = y, \quad \dot{y} = -x+ y \sum_{j=0}^\infty a_{2j+1} x^{2j+1}
\end{equation}
is reversible in $x$, and hence it has a center at the origin. In a sufficiently small neighbourhood of the origin, \eqref{truncated} is rotated with respect to the vector field~\eqref{cher}, unless $a_{2j}= 0$, $\forall j\geq 1$. The final argument of Christopher is to use the Lüroth theorem, to deduce the~\eqref{eqPCC} condition. This topological argument of Cherkas does not apply in a complex domain. We shall prove, however, the following more general
\begin{theo}\label{th2}
The (possibly complex) polynomial Liénard equation~\eqref{lie1} has a Morse critical point at the origin in $\Cbb^2$, if and only
if
\[
p(0)=q(0)=0,\quad q'(0)\neq 0
\]
and the polynomials $p, q$ satisfy the above Polynomial Composition Condition, where the (possibly complex) polynomial $W$ has a Morse critical point at the origin.
\end{theo}
Theorem~\ref{th2} implies Theorem~\ref{th2a}. Its meaning is, that the origin is a Morse center if and only if the Liénard equation is a ``pull back''. More precisely, the Liénard equation~\eqref{lie1} induces a polynomial foliation
\begin{equation}\label{lie2}
y \dy +(q(x) + y p(x)) \dx = 0.
\end{equation}
which is pull back of the foliation
\begin{equation}\label{lie2b}
y\dy + \dd\tilde Q(W) + y \dd\tilde P(W) = 0
\end{equation}
under the map $W=W(x)$, $y=y$. We may assume that $\tilde Q(0)=\tilde P(0)=0$ and $\tilde Q'(0)\neq 0$. Under these hypothesis~\eqref{lie2b} has a local first integral of the form
\[
W+ c_1y^2+ c_2 Wy + c_2W^2 + \dots, \quad c_1 \neq 0
\]
which implies that the pull back foliation~\eqref{lie2} has an analytic first integral with a Morse critical point at the origin.
To the end of this subsection~\ref{section4b} we prove Theorem~\ref{th2}. Assume that the Liénard equation~\eqref{lie1} has a Morse critical point at the origin. As $q(0)=0, q'(0)\neq 0$ and $p(0) = 0$, then the polynomial $\frac 12 y^2 + Q(x)$ has a Morse critical point at the origin and there exists a local bi-analytic change of the variable $x\to X$ such that $\frac 12 y^2 + Q(x(X))= \frac12 (y^2+ X^2)$. Thus
\begin{equation}
y \dy + (q(x) + y p(x)) \dx = \frac12 \dd (y^2+ X^2) + y \dP(x(X)).
\end{equation}
We expand
\[
\dP(x(X)) = \left(\sum_{i=1}^\infty a_i X^i\right) \dX
\]
and obtain the equivalent foliation
\begin{equation}\label{lie3}
\frac12 \dd(y^2+X^2) + y \left(\sum_{i=1}^\infty a_i X^i\right) \dX = 0.
\end{equation}
By analogy to the Cherkas Lemma we shall prove
\begin{lemm}\label{l2}
The foliation~\eqref{lie3} has a Morse critical point at the origin if and only if $a_{2j}= 0$, $\forall j\geq 1$.
\end{lemm}
\begin{proof}
After rescaling $(X,y) \mapsto \varepsilon (X,y)$ the foliation takes the form
\begin{equation}
\mathcal F _\varepsilon : \frac12 \dd(y^2+X^2) + y \left(\sum_{i=1}^\infty \varepsilon ^{i}a_i X^i\right) \dX = 0
\end{equation}
and it suffices to prove that for sufficiently small $\varepsilon$ it has a Morse critical point. Note first that the truncated foliation $\mathcal F _\varepsilon^t$
\begin{equation}
\mathcal F _\varepsilon^t : \frac12 \dd(y^2+X^2) + y \left(\sum_{j=0}^{\infty} \varepsilon ^{2j+1}a_{2j+1} X^{2j+1}\right) \dX = 0
\end{equation}
is a pullback of
\begin{equation}\label{eq44}
\frac12 \dd(y^2+\xi) + \frac12 y \left(\sum_{j=0}^{\infty} \varepsilon ^{2j+1}a_{2j+1} \xi^j\right) \dd\xi = 0
\end{equation}
under the map $\pi : (X,y) \mapsto (\xi, y)$, $\xi = X^2$. The foliation~\eqref{eq44} is regular at the origin and has a first integral
\[
\frac12(y^2+\xi) + O(\varepsilon)
\]
where $O(\varepsilon)$ is analytic in $\varepsilon, \xi, Y$, and vanishes as $\varepsilon = 0$. Thus $\mathcal F _\varepsilon^t$ has a first integral
\[
H_\varepsilon(x,y) = \frac12 (y^2+X^2) + O(\varepsilon)
\]
where $O(\varepsilon)$ is analytic in $\varepsilon, X^2, Y$, and vanishes as $\varepsilon = 0$. This also shows that the origin is a Morse critical point of the truncated foliation $\mathcal F _\varepsilon^t $.
As $H_\varepsilon$ is a first integral of $\mathcal F _\varepsilon^t$ then for every fixed $\varepsilon$ we have
\[
(1+ O(\varepsilon)) \dH_\varepsilon(x,y) = \frac12 \dd(y^2+X^2) + y \left(\sum_{j=0}^{\infty} \varepsilon ^{2j+1}a_{2j+1} X^{2j+1}\right) \dX.
\]
Suppose now that for some $j \geq 1$, $a_{2j} \neq 0$ and let $j=k$ be the smallest integer with this property. We have
\begin{equation}
\mathcal F _\varepsilon : (1+ O(\varepsilon)) \dH_\varepsilon(x,y) + \varepsilon^{2k} ya_{2k}X^{2k}\dX + O(\varepsilon^{2k+1})\dX = 0
\end{equation}
where by abuse of notations $ O(\varepsilon^{2k+1})$ denotes an analytic function in $X,y,\varepsilon$ which is divisible by $\varepsilon^{2k+1}$. The origin is a Morse critical point if and only if the holonomy map of the two separatrices of $\mathcal F _\varepsilon$ at the origin, are the identity maps. The holonomy map will be evaluated by the usual Poincaré--Pontryagin--Melnikov formula. The separatrices are tangent to the lines $y \pm i X = 0$. We take a cross-section to one of the separatrices, parameterised by the restriction of $H_\varepsilon(x,y)$ on it. Let
\[
\gamma_\varepsilon(h) \subset \{(x,y) : H_\varepsilon(x,y) = h \}
\]
be a continuous family of closed loops vanishing at the origin as $h \to 0$. The holonomy map of $\mathcal F _\varepsilon$, corresponding to this closed loop is
\begin{multline*}
h \mapsto h + \frac{\varepsilon^{2k}}{1+O(\varepsilon)} \left(\int_{\gamma_\varepsilon(h)} ya_{2k}X^{2k} \dX + O(\varepsilon) \dX\right) \\
\begin{aligned}
&= h+ \varepsilon^{2k} \int_{\gamma_\varepsilon(h)} ya_{2k}X^{2k} \dX + O(\varepsilon^{2k+1}) \dX \\
& = h + \varepsilon^{2k} a_{2k} \int_{\gamma_0(h)} y X^{2k} \dX + O(\varepsilon^{2k+1}) \dX
\end{aligned}
\end{multline*}
where
\[
\gamma_0(h) \subset \big\{(x,y) : H_0(x,y) = h \big\} = \left\{(x,y) : \frac12 (y^2+X^2) = h \right\}.
\]
By homogeneity of the polynomials
\[
\int_{\gamma_0(h)} y X^{2k} \dX = h^{k+1} \int_{\gamma_0(1)} y X^{2k} \dX.
\]
As the homology of the algebraic curve $\{(y,X)\in \Cbb^2: y^2+X^2 = 2h \}$ has one generator we can suppose that this generator is just the real circle $\gamma_0(1)= \{ (y,X)\in \R^2 : y^2+X^2 = 2 \}$ and in this case
\[
\int_{\gamma_0(1)} y X^{2k} \dX = \iint_{y^2+X^2 \leq 2 }X^{2k} \dX \dy \neq 0.
\]
We conclude that if the holonomy map is the identity map, then $a_{2k} = 0$ which is the desirable contradiction. Lemma~\ref{l2} is proved.
\end{proof}
\begin{proof}[Proof of Theorem~\ref{th2}] Assuming that the Liénard equation has a Morse critical point, and hence $Q(x)$ has a Morse critical point at the origin, denote $x_1(h), x_2(h)$ the two roots of the polynomial $Q(x)-h$ which vanish at $0$ as $h$ tends to $0$. We have obviously that $X(x_1(h)) = - X(x_2(h))$. By Lemma~\ref{l2} the analytic function $P(x(X))$ is even in $X$, and hence $P(x_1(h))\equiv P(x_2(h))$. Following an idea of Christopher (already used at the end of Section~\ref{section3}), consider now the subfield $\mathcal C \subset \Cbb(x)$ formed by all rational functions $R=R(x)\in\Cbb(x)$ satisfying the identity
\[
R(x_1(h))\equiv R(x_2(h)).
\]
According to the Lüroth theorem, every subfield of $\Cbb(x)$ is of the form $\Cbb(W)$ for some rational function $W=W(x)$. Thus we have $\mathcal C = \Cbb(W)$ where $P,Q \in \mathcal C$. Therefore there exist rational functions $\tilde P, \tilde Q$ such that
\[
P= \tilde P \circ W,\quad Q = \tilde Q \circ W.
\]
Using the same argument as in the proof of Theorem~\ref{ccth} we may suppose that $ \tilde P, W, \tilde Q$ are polynomials, and hence $P, Q$ satisfy (PCC) which completes the proof of the Theorem~\ref{th2}.
\end{proof}
%%\pagebreak
\subsection{Abel equations with Darboux type first integral}\label{section43}
The polynomial Liénard equation
\begin{equation}
\dot{x} = y, \quad \dot{y} = -q(x) - y p(x)
\end{equation}
with associated foliation $y \dy + (q(x) + y p(x)) \dx = 0$, after the substitution $y\to 1/y$, becomes the following Abel equation
\begin{equation}\label{abel}
\frac{\dy}{\dx} = y^2 p(x) + y^3 q(x).
\end{equation}
Equivalently, we consider the foliation
\begin{equation}\label{abel243}
\dy = \left(y^2 p(x) + y^3 q(x)\right) \dx.
\end{equation}
The classification of Morse critical points of the Liénard equation~\eqref{lie1} obtained in Section~\ref{section4b} suggests that a similar claim would hold true for the scalar Abel equation~\eqref{abel243}. This is the content of the following
%%introduction theo env--
\begin{enonce*}[plain]{Composition Conjecture~\cite[p.~444]{bry10}}
The Abel equation~\eqref{abel243} has a center at the solution $y=0$ along some fixed interval $[a,b]$ if and only if the following Polynomial Composition Condition~\eqref{eq3PCC} holds true
\begin{equation}\label{eq3PCC}
P= \tilde P\circ W, \;Q= \tilde Q \circ W, \; W(a)=W(b). \tag{PCC}
\end{equation}
%%\vspace{1cm}\\
\end{enonce*}
Note that the Cherkas--Christopher theorem is for non-degenerate centers. The Composition Conjecture missed the possibility for the Abel or Liénard equations to have a Darboux type first integral, with resonant saddle point and characteristic ratio $p:-q$ (instead of a non-degenerate center with $1:-1$ ratio). Incidentally, Liénard equations with a Darboux type first integral will produce counter-examples to the Composition Conjecture, which is the subject of the present section. We explain in this context the recent counter-example of Giné, Grau and Santallusia~\cite{ggs18}.
The method of constructing such systems is based on the example of the\linebreak co-dimension four center set $\mathcal Q_4$ for quadratic system, as explained in Section~\ref{section42}.
Let
\begin{align*}
P_2 &= a_0(x) + a_1(x)y + a_2(x) y^2\\
Q_2 & = b_0(x)+ b_1(x)y + b_2(x) y^2
\end{align*}
where $a_i, b_j$ are polynomials, such that $P_2^p=Q_2^q + O(y^3)$, where $p,q$ are positive relatively prime integers.
This implies that the corresponding one-form
\[
p Q_2 \dP_2 - q P_2 \dQ_2
\]
is divisible by $y^2$, and then the associated reduced foliation (after division by $y^2$)
is of degree two in $y$, and moreover $\{y=0\}$ is a leaf. Therefore the foliation is defined~as
\begin{equation}\label{eq1}
(r_1y+r_2) \dy = y(r_3 y + r_4) \dx = 0,\quad r_i \in \mathbb C[x]
\end{equation}
where
\begin{align*}
r_1 &= 2(p-q)a_2b_2\\
r_2 & = (p-2q)a_1b_2 -(q-2p)b_1 a_2 \\
r_3 &= pa_2'b_2-qb_2' a_2\\
r_4 & = p a_1'b_2 - qb_1' a_2.
\end{align*}
Note that if $a_2 = \const\neq 0$, $b_2 = \const\neq 0$ the foliation takes the Liénard form
\begin{equation}\label{eq2}
(r_1y+r_2) \dy = y r_4 \dx,\quad r_1=\const.
\end{equation}
Of course, it is not clear, whether such polynomials exist. To verify this we have to solve the equation
\[
\left(a_0(x) + a_1(x)y + a_2(x) y^2\right)^p = \left(b_0(x)+ b_1(x)y + b_2(x) y^2\right)^q \mod y^3
\]
assuming that $a_i(x), b_j(x)$ are polynomials, and $a_2, b_2$ are constants. A first condition is given by
\[
a_0^p=b_0^q
\]
which implies
\[
\left(1 + \frac{a_1(x)}{a_0(x)} y + \frac{a_2(x)}{a_0(x)} y^2\right)^p = \left(1+ \frac{b_1(x)}{b_0(x)}y + \frac{b_2(x) }{b_0(x)}y^2\right)^q \mod y^3
\]
or equivalently
\begin{align*}
p \frac{a_1(x)}{a_0(x)} &= q \frac{b_1(x)}{b_0(x)} \\
p \frac{a_2(x)}{a_0(x)} + \frac{p(p-1)}{2} \left(\frac{a_1(x)}{a_0(x)}\right)^2 &= q \frac{b_2(x)}{b_0(x)} + \frac{q(q-1)}{2}\left(\frac{b_1(x)}{b_0(x)}\right)^2.
\end{align*}
Thus $a_i, b_j$ are polynomials which satisfy the following redundant system of equations
\begin{align*}
a_0(x)^p & =b_0(x)^q \\
p \frac{a_1(x)}{a_0(x)} &= q \frac{b_1(x)}{b_0(x)} \\
p\frac{a_2(x)}{a_0(x)} - \frac{p}{2} \left(\frac{a_1(x)}{a_0(x)}\right)^2 &= q \frac{b_2(x)}{b_0(x)} - \frac{q}{2}\left(\frac{b_1(x)}{b_0(x)}\right)^2.
\end{align*}
It follows that for some polynomial $R$,
\[
a_0(x)=R(x)^q, \; b_0 = R(x)^p
\]
and moreover
\[
p a_2R(x)^{-q} - q b_2R(x)^{-p}
\]
is a square of a rational function, where we recall that $a_2=\const, b_2 = \const$. It is easy to check that this is only possible if, say $p