Skip to content
Snippets Groups Projects
qip.tex 111 KiB
Newer Older
theochap's avatar
theochap committed
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%                                                                 %%
%% Please do not use \input{...} to include other tex files.       %%
%% Submit your LaTeX manuscript as one .tex document.              %%
%%                                                                 %%
%% All additional figures and files should be attached             %%
%% separately and not embedded in the \TeX\ document itself.       %%
%%                                                                 %%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

%%\documentclass[referee,sn-basic]{sn-jnl}% referee option is meant for double line spacing

%%=======================================================%%
%% to print line numbers in the margin use lineno option %%
%%=======================================================%%

%%\documentclass[lineno,sn-basic]{sn-jnl}% Basic Springer Nature Reference Style/Chemistry Reference Style

%%======================================================%%
%% to compile with pdflatex/xelatex use pdflatex option %%
%%======================================================%%

%%\documentclass[pdflatex,sn-basic]{sn-jnl}% Basic Springer Nature Reference Style/Chemistry Reference Style

%\documentclass[sn-basic]{sn-jnl}% Basic Springer Nature Reference Style/Chemistry Reference Style
\documentclass[sn-mathphys]{sn-jnl}% Math and Physical Sciences Reference Style
%%\documentclass[sn-aps]{sn-jnl}% American Physical Society (APS) Reference Style
%%\documentclass[sn-vancouver]{sn-jnl}% Vancouver Reference Style
%%\documentclass[sn-apa]{sn-jnl}% APA Reference Style
%%\documentclass[sn-chicago]{sn-jnl}% Chicago-based Humanities Reference Style
%%\documentclass[sn-standardnature]{sn-jnl}% Standard Nature Portfolio Reference Style
%%\documentclass[default]{sn-jnl}% Default
%%\documentclass[default,iicol]{sn-jnl}% Default with double column layout

%%%% Standard Packages


\usepackage[utf8]{inputenc} % allow utf-8 input
\usepackage[T1]{fontenc}    % use 8-bit T1 fonts
\usepackage{hyperref}       % hyperlinks
\usepackage{url}            % simple URL typesetting
\usepackage{booktabs}       % professional-quality tables
\usepackage{amsfonts,amssymb,amsthm}       % blackboard math symbols
\usepackage{nicefrac}       % compact symbols for 1/2, etc.
\usepackage{microtype}      % microtypography
\usepackage{lipsum}
\usepackage{amsmath}
\usepackage{amssymb}
\usepackage{graphicx} % inclusion des figures
\usepackage{listings}
\usepackage{tikz}
\usepackage{pgfplots}
\usepackage{pgfplotstable}
\usetikzlibrary{arrows, automata}
\usetikzlibrary{arrows,shapes}
\pgfplotsset{compat=1.17} 
\usepackage{qcircuit}
\usepackage{caption}
\usepackage{subcaption}
\usepackage{braket}
\usepackage{float}
\usepackage[l3]{csvsimple}
\usepackage{siunitx}
\usepackage{booktabs}
\usepackage{longtable}
%%%% 

%%%%%=============================================================================%%%%
%%%%  Remarks: This template is provided to aid authors with the preparation
%%%%  of original research articles intended for submission to journals published 
%%%%  by Springer Nature. The guidance has been prepared in partnership with 
%%%%  production teams to conform to Springer Nature technical requirements. 
%%%%  Editorial and presentation requirements differ among journal portfolios and 
%%%%  research disciplines. You may find sections in this template are irrelevant 
%%%%  to your work and are empowered to omit any such section if allowed by the 
%%%%  journal you intend to submit to. The submission guidelines and policies 
%%%%  of the journal take precedence. A detailed User Manual is available in the 
%%%%  template package for technical guidance.
%%%%%=============================================================================%%%%

\jyear{2021}%

%% as per the requirement new theorem styles can be included as shown below
\theoremstyle{thmstyleone}%
\newtheorem{theo}{Theorem}[section]
\newtheorem{lem}[theo]{Lemma}
\newtheorem{prop}[theo]{Proposition}
\newtheorem{propriete}[theo]{Property}
\newtheorem{cor}[theo]{Corollary}
\newtheorem{definition-theo}[theo]{Definition-Theorem}


%\newtheorem{theorem}{Theorem}%  meant for continuous numbers
%%\newtheorem{theorem}{Theorem}[section]% meant for sectionwise numbers
%% optional argument [theorem] produces theorem numbering sequence instead of independent numbers for Proposition
%\newtheorem{proposition}[theorem]{Proposition}% 
%%\newtheorem{proposition}{Proposition}% to get separate numbers for theorem and proposition etc.

\theoremstyle{thmstyletwo}%
\newtheorem{example}[theo]{Example}%
\newtheorem{remark}[theo]{Remark}%

\theoremstyle{thmstylethree}%
\newtheorem{definition}[theo]{Definition}

\raggedbottom
%%\unnumbered% uncomment this for unnumbered level heads

\begin{document}

\title[On New PageRank Computation Methods using Quantum Computing]{On New PageRank Computation Methods using Quantum Computing}

%%=============================================================%%
%% Prefix	-> \pfx{Dr}
%% GivenName	-> \fnm{Joergen W.}
%% Particle	-> \spfx{van der} -> surname prefix
%% FamilyName	-> \sur{Ploeg}
%% Suffix	-> \sfx{IV}
%% NatureName	-> \tanm{Poet Laureate} -> Title after name
%% Degrees	-> \dgr{MSc, PhD}
%% \author*[1,2]{\pfx{Dr} \fnm{Joergen W.} \spfx{van der} \sur{Ploeg} \sfx{IV} \tanm{Poet Laureate} 
%%                 \dgr{MSc, PhD}}\email{iauthor@gmail.com}
%%=============================================================%%

\author*[1]{\fnm{Théodore} \sur{Chapuis-Chkaiban}}\email{theodore.chapuis-chkaiban@student-cs.fr}

\author[2]{\fnm{Zeno} \sur{Toffano}}\email{zeno.toffano@centralesupelec.fr}
\equalcont{These authors contributed equally to this work.}

\author[3]{\fnm{Benoît} \sur{Valiron}}\email{benoit.valiron@centralesupelec.fr}
\equalcont{These authors contributed equally to this work.}

\affil*[1]{\orgdiv{Department of Computer Science},
  \orgname{CentraleSupélec}, \orgaddress{\street{3 Rue Joliot-Curie},
    \postcode{91190}, \city{Gif-Sur-Yvette}, \country{France}}}

\affil[2]{\orgname{Université Paris-Saclay}, CNRS, CentraleSupélec,
  \orgdiv{Laboratoire des signaux et systèmes},
  \orgaddress{\postcode{91190}, \city{Gif-sur-Yvette},
    \country{France}}}

\affil[3]{\orgname{Université Paris-Saclay}, CNRS, ENS Paris-Saclay,
  CentraleSupélec, \orgdiv{Laboratoire Méthodes Formelles},
  \orgaddress{\postcode{91190}, \city{Gif-sur-Yvette},
    \country{France}}}

\abstract{In this paper we propose several new quantum computation algorithms as an original contribution to the domain of PageRank algorithm theory, Spectral Graph Theory and Quantum Signal Processing.
We first propose an application to PageRank of the HHL quantum algorithm for linear equation systems. We then introduce one of the first Quantum Based Algorithms to perform a directed Graph Fourier Transform with a low gate complexity. After, proposing a generalized PageRank formulation, based on ideas stemming from Spectral Graph Theory, we show how our quantum directed graph Fourier Transform can be applied to compute our generalized version of the PageRank. 
}

\keywords{Quantum Computing, PageRank, Fourier Transform, Spectral
  Graph Theory}

\maketitle

\section{Introduction}

The Google PageRank algorithm was introduced by Sergey Brin and
Lawrence Page in their paper entitled "The anatomy of a
large-scale hypertextual Web search engine" \cite{brin_page_1998}. The PageRank algorithm produces a node ranking score by order of importance on a directed graph. The algorithm structured the Web: although it is now integrated into a larger pipeline, it still remains a major component of Google's search engine.

Numerous applications have been derived. For instance in social networks, this algorithm can be used to sort the most influential users or
contents. Use cases can be found in various fields like sports, literature, neuroscience, toxic waste management, debugging and road traffic prediction... See \cite{cornell_pagerank} for a broad discussion and \cite{gleich_2015} where David F. Gleich discusses extensively the various applications of the PageRank algorithm.

On the mathematical side the algorithm relies on the Perron-Frobenius theorem producing an approximation of a final PageRank vector.
In an unparallel setting, the PageRank of a $n$-node graph can be computed in $\mathcal{O}(n)$ operations (it amounts to do a constant number of sparse matrix-vector multiplications) . The computational complexity is logarithmic on the precision one wants to achieve on the final rankings.

Spectral Graph Theory (SGT) is a new mathematical theory used to adapt signal processing to graph theory. Graph Fourier Transform - a major tool used in SGT - provides interesting results on the properties of directed graphs \cite{shuman_narang_frossard_ortega_vandergheynst_2013}. This theory has numerous applications and consequences on the study of graphs :
from Machine Learning and Computer Graphics to pure mathematics. See
\cite{chung_1997,
  ricaud_borgnat_tremblay_goncalves_vandergheynst_2019,
  shuman_narang_frossard_ortega_vandergheynst_2013, sevi2019} for
instance.

Three original proposals are presented in this paper: 

\begin{itemize}
    \item We propose an efficient way to compute the classical PageRank using quantum circuits by using the HHL (Harrow, Hassidim, Lloyd) algorithm \cite{Harrow_2009}. We point out that the classical PageRank algorithm can be efficiently computed using the HHL algorithm. To our knowledge this is the first quantum circuit based PageRank algorithm and can be seen as the equivalent  of the Quantum Adiabatic Algorithm  \cite{garnerone_zanardi_lidar_2012} with some minor refinements on the treatment of dangling nodes and squared state amplitudes. Our algorithm may be seen as a solution to the open problem stated at the end of \cite{garnerone_zanardi_lidar_2012}. Our version achieves a better theoretical complexity in the final state preparation, and improves the number of samples needed to approximate the top ranking values. As far as we know, this theoretical logarithmic complexity has never been reached for the PageRank algorithm, the previous best result was obtained by \cite{garnerone_zanardi_lidar_2012}, with a $\mathcal{O}(polylog(n))$ experimental complexity.
    
    
    \item We also propose a quantum computing framework to tackle several problems related to Spectral Graph Theory. We adapt the Quantum Singular Thresholding Algorithm \cite{gilyen_su_low_wiebe_2019} to compute inverse Graph Fourier Transforms. We show that, provided one can access to an efficient quantum data structure, one can either perform advanced graph signal filtering or produce complex signals based on inverse Graph Fourier Transform using a number of quantum gates that scales linearly with the degree of a polynomial approximation of the filter. We then present several use cases for our algorithm.
    
\item We finally generalize Google's classical PageRank algorithm and provide a new PageRank formalism that can be used as well in classical and quantum Signal Processing. We then show that the Quantum Singular Thresholding algorithm can compute this generalised version of the PageRank using quantum circuits.  The method relies again on Spectral Graph Theory and produces rankings by the investigation of the the input Graph transition matrix eigenspaces. We provide numerical results refining the ranking obtained by the classical PageRank algorithm, respecting the original structure of the input graph. We then show that our generalized PageRank method can be efficiently implemented on a quantum computer using our Quantum Signal Processing Algorithm. We also draw a parallel between our method and G. Paparo's discrete PageRank quantum algorithm \cite{paparo_martin-delgado_2012}. 
    
\end{itemize}


We have divided this article into 7 sections:

\begin{enumerate}
    \item Introduction
    \item General notions on PageRank algorithm theory and state of the art of the quantum approaches of PageRank.
    \item HHL algorithm application to PageRank.
    \item Introduction to the Spectral Graph Theory
    \item Quantum algorithms For Graph Fourier Transform computation
    \item Application of Spectral Graph Theory to PageRank
    \item Conclusion
\end{enumerate}

We added an appendix that details numerical examples of our Generalized PageRank using various Kernels.
theochap's avatar
theochap committed
201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843

\section{General notions on PageRank Theory and Quantum Computing applications}
\label{sec:previousResults}

\subsection{Mathematical statement of the problem.}

\subsubsection*{Inputs:} Given a directed finite weighted graph $\mathcal{G} = (V, E, W(E))$, where
$W(E) : V \times V \rightarrow \mathbb{R}^{+}$ is a map such that $\forall (i,j) \in V\times V, \left\{ \begin{array}{cc}
    W(i,j)>0 & \mbox{ if } (i,j) \in E  \\
    W(i,j)=0 & \mbox{ otherwise}
\end{array} \right.$.
We will only consider finite graphs in this paper, ie $|V|<\infty$.

Throughout this paper, to improve clarity, the
simpler terms \textit{directed graphs} or \textit{graphs} are used in place of \textit{directed finite weighted graph}. The finite and weighted
properties are implicit if not precised otherwise. Similarly, the terms \textit{weighted adjacency matrix} and \textit{adjacency matrix} will be used indistinctly.

While the original PageRank algorithm can have any type of graph as input, our results are derived  by using the algorithm on scale-free networks. The scale
free network degree distribution follows an asymptotic power law,
i.e. the proportion \textit{P(k)} of nodes having k outgoing edges is :
\begin{equation*}
    P(k) \simeq k^{-\gamma}
\end{equation*}
Where the coefficient values  $2 \leq \gamma \leq 3$,  lead to models that best highlight human interaction \cite{choromanski_matuszak_mie}.

These scale-free networks are generated using the Barabasi–Albert model. In fact, this model produce random scale-free networks using a
preferential attachment mechanism - which aims to simulate naturals
and human-based systems such as the Internet or Social Networks
\cite{barabasi_albert_1999, albert_barabasi_2002}. 

\subsubsection*{Result:} The ouiput is given by the importance vector \emph{I},
$I:V \rightarrow \mathbb{R}$, that establishes a ranking of the nodes
by order of "importance".

\subsection{The Google matrix.}

\subsubsection{Theoretical prerequisites}
\label{subsubsec:Theo_prepreq}
The PageRank algorithm is based on Random Walks, more specifically on Markov Chains - it uses the Perron Frobenius theorem to compute the importance vector. In this first section, we will precise the definitions and theorems that form the PageRank theory.

As stated before the central mechanism behind the PageRank algorithm is the Markov Chain. Informally, given a directed weighted graph
$\mathcal{G} = (V,E, W(E))$ a Markov Chain can be informally defined as the successive positions of a random walker on the nodes \textit{V}, taking into account that the random walker can jump from the node $i$ to node $j$ of
$\mathcal{G}$ with the transition probability:

\begin{equation*}
    p_{ij} = \frac{W(i,j)}{\sum_{k\in V} W(i,k)} \in [0,1]
\end{equation*}
For a formal definition of a Markov Chain see \cite{grinstead_snell_2006}.

We have a direct correspondence between the set of states of a Markov Chain with its associated transition probabilities and the directed weighted graph.
Also the set of transition probabilities are identified with the associated weighted adjacency matrix - which we will call the \textit{transition matrix} of the Markov Chain.

Thus, to simplify the notations we will assume that all the directed weighted graphs
$\mathcal{G}=(V, E, W(E))$ used in this paper are
normalized, i.e. $\forall i \in V$, 
$\exists j \in V, \mbox{ so that: } (i,j)\in E$, then
$\sum_{j \in V} W(i,j) = 1$. Hence, given a graph $\mathcal{G}$ one
will now use indifferently the term \textit{transition matrix} to
designate the \textit{adjacency weighted matrix} of $\mathcal{G}$ .

Also we say that a given matrix is \textit{left-stochastic} if the coefficients of its rows sum to one.

Let's now define notion of \textit{dangling nodes} which is fundamental to the PageRank Theory:

\begin{definition}[Dangling Nodes \cite{langville_meyer_2004}]\label{def:dangling}
Let $\mathcal{G}=(V,E)$ be a directed graph. $x \in V$ is called a dangling node if it has no outgoing edges.
\end{definition}

The \textit{transition matrix} of a Markov Chain on a graph that
doesn't have any \textit{dangling nodes} (ie nodes that has no
outgoing edges) is \textit{left stochastic}. If the graph admits
dangling nodes, then the transition matrix has a null row.

\begin{definition}[Regular Markov Chains \cite{grinstead_snell_2006}]
\label{def:Regular_Markov}
A Markov Chain is called \textbf{regular} if some power of its
transition matrix has only positive coefficients. if \textit{C} is a Markov Chain associated with the probability transition matrix \textit{P}, then \textit{C} is regular \textit{iff} : \begin{equation}\label{eq:regularity} \exists k \in
  \mathbb{N}, \mbox{ so that } \forall (i,j) \in \|1, n\|, P^k_{i,j}>0
\end{equation}

By extension, we say that a matrix is \textbf{regular}, if it follows
\ref{eq:regularity}.

Please note that a \textbf{regular matrix is necessarily
  left-stochastic} - and hence is not associated with a graph that
admits dandling nodes.

\end{definition}

Hence, a Markov Chain whose state space is a graph $\mathcal{G}=(V,E)$ is regular \textit{iff} a random walker starting from any $v \in V$ can be located at any node of $\mathcal{G}$ after a finite number of steps.

Let's introduce an equivalent characterisation for regular Markov Chains that only holds true for finite state Markov Chains.

\begin{theo}[Regular Markov Chain Characterisation]
Let C be a Markov Chain, associated with the probability transition matrix P. C is \textbf{regular} iff it has the following properties :
\begin{enumerate}
    \item Irreducibility : $\forall (i, j), \exists \ k \mbox{ so that : } P^k(i,j)>0$
    \item Aperiodicity : $\forall i \mbox{ : } gcd\{k: P^k(i,j)>0\} = 1$
\end{enumerate}

\end{theo}

\label{ergodic_regular_markov}

Some authors (see \cite{sevi2019}) use the term \textit{ergodic
  Markov Chain} to designate a \textit{regular Markov Chain}, while
others \cite{grinstead_snell_2006} use the term \textit{ergodic Markov Chains}  in relation to the \textit{irreducibility} property. To simplify the notations, we have chosen to only use the term \textit{regular Markov Chain} throughout this paper.

In the previous theorem, the \textit{connectivity} property means that starting from any node, a random walker can attain any other node in the graph after a finite number of steps.

A node $i$ has a \textit{period}  $k$ if, starting from $i$, a
random walker can only get back to $i$ by a multiple of $k$
steps. The \textit{period} of a graph is the greatest common divisor (gcd) of the periods of its nodes. A bipartite graph, for instance, is a 2-periodic graph. A graph is aperiodic if it is a 1-periodic graph.


Now let's introduce the limit distribution of a Markov Chain, that will allow us to define the importance vector.

\begin{definition}[Limit distribution of a Regular Markov Chain \cite{grinstead_snell_2006}]
  Let $P$ be the transition matrix for a regular Markov Chain. Then,
  as $n \rightarrow \infty$, the powers $P^n$ approach a limiting
  matrix $W$ with all rows equal to the same vector $w$. The vector $w$ is a
  strictly positive probability vector (all its components are
  strictly positive and they sum to one).

Then :
\begin{enumerate}
    \item $wP = w$ and any row vector $v$ such that $vP = v$ is a constant multiple of $w$.
    \item $P \mathbb{1} = \mathbb{1}$, where $\mathbb{1}$ is the column vector where all the coefficients are equal to 1
\end{enumerate}

The vector $w$ is called the \textbf{probability limit distribution}
of the markov chain C associated with the transition matrix P.

\end{definition}

The $P^k_{ij}$ coefficients of the successive powers of the
transition matrix correspond to the probability of a walker, starting from the node $i$, to get to the node $j$ after $k$ steps. $w$ being the rows of the matrix $\underset{k\rightarrow \infty}{lim} P^k$. The
limit distribution probability coefficients $w_i$ can be interpreted as the probability to get to the node $i$ of the state graph after letting a random walker surf on the state graph for an infinite number of steps, starting from any node in the state graph.

In this way, the definition of the limit probability distribution is consistent because a node with a high limit probability distribution is able to attract a random walker starting from any node of the graph.

The Perron-Frobenius Theorem is the exact transposition of the previous result to linear algebra. Before discussing  the Perron-Frobenius Theorem, let's recall the definition of a left (resp right) eigenvector, as it will be heavily used in this paper.

\begin{definition}[Left/Right Eigenvector]\label{theo:limit_distrib_markov_chain}
  A left (resp right) eigenvector associated with an eigenvalue
  $\lambda$ of a matrix $M \in \mathcal{M}_n(\mathbb{C})$ is a vector
  $X \in \mathbb{C}^n / \{0\}$ such that :
\begin{equation*}
    XM = \lambda X
\end{equation*}
(respectively $MX = \lambda X$)
\end{definition}

\begin{definition-theo}[Perron-Frobenius Theorem for Positive Matrices \cite{meyer_2000}]\label{def-theo:perron-frob}
  Let $M \in \mathcal{M}_n(\mathbb{C})$ be a regular matrix, ie a
  matrix so that
\begin{equation*}
    \exists k \in \mathbb{N}, \mbox{ so that : } M^k_{ij} > 0, \forall (i,j) \in [1, n]
\end{equation*}

\begin{enumerate}
\item Then, 1 is an eigenvalue of M and any other eigenvalue $\lambda$ of M has its absolute value is strictly smaller than 1, \textit{i.e.}
  $\forall \lambda \in Sp(A) / 1, \|\lambda\| < 1$. 1 is called the
  Perron root, the Perron-Frobenius eigenvalue or the leading eigenvalue.
\item Moreover, the Perron-Frobenius eigenvalue is simple, ie the
  eigenspace associated to 1 is one-dimensional.
\item There exists a left (resp right) eigenvector
  $v = (v_1, \hdots, v_n)^T$ of M, associated to the Perron-Frobenius
  eigenvalue, with $\forall i \in [1, \hdots, n], v_i > 0$.
\item There exists a unique left (resp right) eigenvector such that :
    \begin{equation*}
        p M = p, \ p>0, \ and \ \|p\|_1 = 1
    \end{equation*}
    This eigenvector is called the left (resp right) Perron-vector of M.

\end{enumerate}

\end{definition-theo}

Hence, given a regular Markov Chain $C$ associated with a regular matrix
$M$, the probability limit distribution of $C$, is exactly the left Perron-vector of $M$.

Having laid the theoretical framework, we can now explain the motivations that have led to our PageRank model.

\subsubsection{Classical Google PageRank}

\paragraph{Patching the dangling nodes:} Let $\mathcal{G}=(V,E)$ be a directed graph with transition matrix $P$.  If
the graph $\mathcal{G}$ features dandling nodes, one transforms the
matrix $P$ into $\Bar{P}$  where
\begin{equation*}\label{eq:dangling_nodes_free}
    \Bar{P_{i}} = \left\{
    \begin{array}{ll}
        P_{i} & \mbox{if $i$ is not a dandling node}  \\
        (\rho_1, \hdots, \rho_n) & \mbox{if $i$ is a dangling node}
    \end{array}
    \right.
\end{equation*} 
And where $\Bar{P_{i}}$ is the $i^{th}$ row of $\Bar{P}$ and
$(\rho_1, \hdots, \rho_n)$ is a row stochastic vector (ie
$\underset{i \in [1, n]}{\sum} \rho_i = 1$). We call the matrix
$\Bar{P}$ the dangling-node free transition matrix of $C$.

One has to add virtual outgoing edges
to prevent input graphs from having dandling nodes, otherwise it is impossible to escape from their attraction: once a random walker is trapped into a dangling node, he can't move to an other node and the
limit probability distribution admits only one non-zero
coefficient. 

In \cite{boldi_posenato_santini_vigna} an extensive discussion is presented on the subject defining three types of patches :
\textit{Strongly Preferential PageRank} - which is chosen by default
when a patch method is not expressively precised \cite{gleich_2015} ($\rho$ is the uniform
probability distribution),
\textit{Weakly Preferential PageRank} and \textit{Sink Preferential
  PageRank} - which are less common \cite{gleich_2015}.

\paragraph{Google matrix and importance vector:} In Lawrence Page $\&$ Sergey Brin's paper \cite{brin_page_1998}, the
Google matrix is defined as :

\begin{definition}[Google Matrix \cite{brin_page_1998}]
\label{def:google_matrix}
Let $P$ be an dandling-free transition matrix associated a directed graph $\mathcal{G}$, the Google Matrix of $\mathcal{G}$ is :
\begin{equation*}
    G = (1-\alpha) P + \frac{\alpha}{n} \mathbb{1}
\end{equation*}
Where $\mathbb{1}$ is the matrix that only contains ones and $\alpha$ is a constant ($\alpha \in [0,1]$).

\end{definition}

The original intuition behind the Google matrix is
the following : let $\mathcal{G}=(V,E)$ be a directed graph
representing the relations between the web Pages -\textit{ i.e.} where $x \in V$ represents a Web Page and
$e_{ij} \in E$ is the link from $i$ pointing to $j$. In order to quantify the "importance" of the nodes of the Graph we propagate a random walker on the Web and compute the probability that the Walker gets to a given node after a long time : the higher the probability the more relevant the node.

The Google matrix transforms the input graph by adding low-weighted edges between any pairs of nodes, making the input graph strongly connected (and thus regular). Google uses the value of $\alpha=0.15$ for its web search engine : this is an heuristic choice to balance fast convergence and pertinence of the results
\cite{langville_meyer_2004}. In fact, the smaller the constant
$\alpha$ is, the better the structure of the original graph is
preserved, while the greater $\alpha$ is, the faster the PageRank
converges.

Then, one can finally define the Google importance vector, using the Google matrix :
 
\begin{definition}[Google importance vector]
  Let $\mathcal{G}=(V,E)$ the directed input graph. The Google
  importance vector is the Perron-vector of the Google Matrix of
  $\mathcal{G}$.

  In other words, the Google importance vector is defined as being the only unit left row eigenvector associated to the eigenvalue equal to 1 of the Google Matrix. Ie I is the only vector so that:
  
  \begin{equation}
    IG = I
  \end{equation}
  
  Since the Google matrix is irreducible, it has only one eigenvalue equal
  to 1 and $I$ is well defined.
  
\end{definition}

Finally, the classical PageRank algorithm performs the following
steps:

\begin{enumerate}
\item Choose a starting vector $x^{(0)}$ (generally,
  $x^{(0)} = \mathbb{1}/n$ where $\mathbb{1}$ is the vector filled
  with ones).
\item While $\|x^{(k)T} - x^{(k-1)T}\| < \epsilon$ (ie while the
  algorithm has not converged), perform the following update
  \begin{equation*}
    x^{(k)T} = x^{(k-1)T} G
  \end{equation*}
\item Output $x$.
\end{enumerate}

There are some ways to reduce the computational cost of the update operation : since the input transition matrix $P$ is sparse and the Google matrix being a linear combination of $P$ and a constant matrix - the update operation scales as
$\mathcal{O}(nnz(P)) \simeq \mathcal{O} (n)$ where $nnz(P)$ is the
number of non-zero components of P. See \cite{langville_meyer_2004}
for more details on the explicit computation and optimisation of the
classical PageRank algorithm.


\subsection{Quantum PageRank}

Over the past years two main directions have been taken to adapt the PageRank algorithm to the Quantum domain. 

The first direction was taken in \cite{garnerone_zanardi_lidar_2012}, proposing polynomial computational improvements over its classical counterpart, but this method does not use Quantum Walks to improve the ranking performance.

A second one, \cite{paparo_2014,
  paparo_muller_comellas_martin-delgado_2013,
  sanchez-burillo_duch_gomez-gardenes_zueco_2012,
  loke_tang_rodriguez_small_wang_2016,
  paparo_martin-delgado_2012}, studied the influence of Quantum Random Walks on the  PageRank algorithm rankings. This approach exploited Quantum Walk properties such as quantum state interference, which heavily modifies the walker probability
distribution on the state graph \cite{kempe_2009} and revealed the graph structure more precisely \cite{paparo_2014,
  paparo_muller_comellas_martin-delgado_2013,
  loke_tang_rodriguez_small_wang_2016}. These algorithms were essentially designed to produce classical simulations of discrete quantum walks, hence did not provide significant improvements in comparison with the classical random walk based solutions.

\section{Quantum PageRank using the HHL algorithm.}

Here we present our first application: adapting the quantum adiabatic algorithm for search engines introduced by Gamerone, Zanardi and Lidar in 2012 \cite{garnerone_zanardi_lidar_2012} to PageRank. To our knowledge this application has never been proposed so far. The linear system formulation of PageRank presented here possesses several interesting properties that makes it particularly suitable to be solved by the HHL algorithm \cite{Harrow_2009}. Our contribution presents a quantum circuit algorithm that prepares a quantum state corresponding to the Google importance vector, which has multiple interesting applications (some of them inspired from \cite{garnerone_zanardi_lidar_2012}). In addition providing a circuit based quantum PageRank algorithm is interesting as it can be easily used as a subroutine for more complex algorithms.

\subsection{Problem statement}
\subsubsection*{Inputs:}
The personalized Google matrix is used as input:
$G = \alpha P + \frac{(1-\alpha)}{N} e v^T$ where $P$ is the stochastic
transition matrix of a dangling node free graph, $\alpha$ is a
constant and $v \in \mathbb{R}^{+}$, so that $v^T e = 1$ is the
personalization vector.

For a correct performance of our algorithm, $P$ must be a \textit{s-sparse} matrix. To satisfy this requirement, the dangling node removal procedure must not produce dense rows in the dangling free transition matrix. For this purpose, we choose to follow the \textit{Weak Preferential PageRank} methods when handling with dangling nodes :
\begin{itemize}
\item \textit{Sink Preferential PageRank} : we apply the
  transformation $P \rightarrow P + diag(c_i)$, where
  $c_i = 1 \mbox{ \textit{iff} $i$ is a dangling node}$. This amounts for the walker to wait on the dangling node with probability $\alpha$, and let the walker go to another node following the personalization vector
  $v$ with probability $1 - \alpha$.
\item \textit{Node reversal Preferential PageRank} : we apply the
  transformation
  $P \rightarrow P + diag(c_i) (\beta P^{T_stoch} + (1-\beta)Id)$,
  with $\beta$ a constant. This amounts for the walker to jump back to a node that points towards the dangling node with a probability $\alpha\beta$,
  to stay on the dangling node with probability $\alpha(1-\beta)$ or
  to leave the node following the teleportation vector with probability
  $(1-\alpha)$.
\end{itemize}

These dangling node removal methods preserve the sparsity of the input matrix. The first method increases the score of the dangling nodes in comparison to the \textit{Strongly preferential PageRank} method, the second one increases the score of the nodes linked to the dangling nodes. It must be emphasized that these methods do not seem to modify significantly the global PageRank score since in most applications dangling nodes represent a very small proportion of the nodes of the graph (often bogus nodes
\cite{brin_page_1998}).

\subsubsection*{Resources:}
We assume that we have access to an efficient method to prepare the quantum state
$\ket{v} = \sum \frac{v_i}{\|v\|_2} \ket{i}$ and the oracle whose matrix
representation is $(I - \alpha P)^T$.

\subsubsection*{Outputs:}
Our algorithm outputs a final quantum state that represents the limit probability distribution of the Google matrix
$\ket{\pi} = \sum_i \frac{\pi_i}{\|\pi\|_2} \ket{i}$.

\subsection{Introduction to HHL}

The quantum algorithm for linear systems of equations was introduced by A. Harrow, A. Hassidim and S. Lloyd in 2009 in their paper
\cite{Harrow_2009}.

Given a linear system $Ax = b$  where $A$ is a
$n \times n$ s-sparse hermitian matrix.  The HHL algorithm prepares the solution $x$ in a quantum state $\ket{x}$ using
$\mathcal{O}(\frac{log(N) s^2 \kappa^2}{\epsilon})$ quantum gates, where $\kappa$ is the conditioning number of $A$.

For the case where the matrix $A$ is not hermitian (the case for PageRank), one can solve the extended linear system
$\begin{pmatrix} 0 & A \\ A^T & 0 \end{pmatrix} \hat{x} = \hat{b}$,
where $\hat{b} = \begin{pmatrix} b \\ 0 \end{pmatrix}$ and
$\hat{x} = \begin{pmatrix} 0 \\ x \end{pmatrix}$. The computational
complexity remains the same as the eigenvalues of
$C = \begin{pmatrix} 0 & A \\ A^T & 0 \end{pmatrix}$ are the same as those of
$A$, each eigenvalue of $A$ appearing twice in the eigendecomposition of $C$.

This algorithm has important applications in several fields of Physics and Computer Science such as Electromagnetism
\cite{Clader_2013} or Quantum Machine Learning \cite{Zhao_2019}.

\subsubsection*{Application to the PageRank algorithm}
Interestingly the PageRank formalism is surprisingly well
adapted to the HHL algorithm.

Let $\pi$ be the limit probability distribution vector, one has the following equalities:

\begin{align}
    &\pi G = \pi \\
    &\Leftrightarrow \pi (\alpha P + (1-\alpha) ev^T) = \pi \\
    &\Leftrightarrow \pi (Id - \alpha P) = (\alpha - 1) \pi e v^T \\
    &\Leftrightarrow \left\{ \begin{array}{c}
        (Id - \alpha P)^T \pi^T = (\alpha - 1) v \\
           \pi e = 1
    \end{array} \label{eq:linear_system_formulation} \right.
\end{align}

Using the fact that $\pi e = 1$. The last equation
\ref{eq:linear_system_formulation} corresponds to the linear system PageRank formulation. This formulation is equivalent to the popular eigensystem formulation used in classical approaches \cite{langville_meyer_2004, gleich_2015}.

The linear system \ref{eq:linear_system_formulation} presents the following property relevant for the HHL
algorithm :

\begin{propriete}[Condition number of the Linear system formulation of the PageRank \cite{langville_meyer_2004}]
    The condition number $\kappa_\infty(PageRank)$ of $(Id - \alpha P)$, is entirely determined by $\alpha$:
    \begin{equation}
        \kappa_\infty(PageRank) = \frac{1+\alpha}{1-\alpha}
    \end{equation}
    
\end{propriete}

Note that as the $(I - \alpha P)$ row's sum is
$(1-\alpha)$ so the maximum eigenvalue of $(I - \alpha P)$ is
$(1 - \alpha)$ (consequence of the Gershgoring theorem applied to stochastic matrices). As the eigenvalues of
$C = \begin{pmatrix} 0 & A \\ A^T & 0 \end{pmatrix}$ are the same as
those of $A$, and the eigenvalues and singular values of $C$ coincide, the minimum singular value of $C$ is
$\sigma_{min} = \frac{(1-\alpha)^2}{(1+\alpha)}$.

To apply the HHL algorithm, one needs to rescale the matrix $(I-\alpha P)$ by the
$(\frac{1}{1-\alpha})$ factor so that every singular value of $C$ lies between $\frac{1}{\kappa}$ and $1$.

We can then state the theoretical complexity for our PageRank vector computation :

\begin{prop}[Gate complexity of the Circuit Based Quantum PageRank]
  Providing an efficient oracle access to the matrix
  $\frac{1}{1-\alpha}(Id - \alpha P)^T$, of size $N\times N$ and
  sparsity $s$, and the personalized vector $v$, the circuit based quantum PageRank algorithm can prepare the state $\ket{\pi}$ with precision  $\epsilon$, in the runtime
  $\mathcal{O}(\frac{log(N) s^2}{\epsilon})$.
\end{prop}

\subsection{Algorithm Major Steps and Applications}
\begin{figure}
    \centering
    $\begin{array}{c}
    \Qcircuit {
    \lstick{D : \ket{0}} & \qw & \qw & \qw & \multigate{2}{\mbox{Eigenvalue Inversion}} & \qw & \meter  \\
    \lstick{C : \ket{0}_{n_l}} & {/} \qw &\qw &  \multigate{2}{QPE} & \ghost{\mbox{Eigenvalue Inversion}} & \multigate{2}{QPE^\dagger} & \qw  \\
    \lstick{B : \ket{0}_{n_a}} & {/} \qw & \multigate{1}{Load \ket{v}}  & \ghost{QPE} & \ghost{\mbox{Eigenvalue Inversion}} & \ghost{QPE^\dagger} & \qw\\
    \lstick{A : \ket{0}_{n_b}} & {/} \qw & \ghost{Load \ket{v}} & \ghost{QPE}  & \qw &  \ghost{QPE^\dagger} & \rstick{\ket{\pi}} \qw \gategroup{1}{4}{4}{6}{.7em}{--} \\
    & & & & \dstick{\mbox{\textit{Repeat until sucess}}} \cwx & &}       
    \end{array}$
    
    \bigskip
    \caption{The Quantum Circuit to prepare the Google matrix Probability Limit Distribution}
    \label{fig:quantum_circuit_algo_2}
\end{figure}

The Quantum circuit used in this algorithm is detailed in figure
\ref{fig:quantum_circuit_algo_2}. An exhaustive description of the steps of the HHL algorithm can be found in \cite{Harrow_2009, Pan_2014} and on the Internet \cite{qiskit_hhl}.

The use cases developed by \cite{garnerone_zanardi_lidar_2012} for their Adiabatic Quantum Computing method - ranking the top nodes and comparing successive PageRanks - can be analysed with our quantum circuit. However, compared to
the method in \cite{garnerone_zanardi_lidar_2012}, which scales polylogarithmically with $N$ and in
$(\frac{1}{\epsilon^2})$, our method has a time complexity of 
$\mathcal{O}(\frac{log(n)}{\epsilon})$ for a $\mathcal{O}(1)$ sparse input graph
(which is often the case for web graphs) - providing efficient quantum state generation and quantum data access.  

Using our method, followed by quantum (interactive) amplitude estimation, one can determine
the amplitude of a given component of $\ket{\pi}$ with precision
$\epsilon$, \cite{grinko_gacon_zoufal_woerner_2021, brassard_hoyer_mosca_tapp_2002}, which is quadratically faster than the classical MCMC algorithm.

\section{A short introduction to the directed graph Fourier Transform.}
\label{sec:SGTIntro}

In the next sections, we will study potential applications of quantum computing related to Spectral Graph theory, using the Quantum Singular Value Transformation to compute graph signal filters by the Graph Fourier Transform. In this section, we will introduce  topics of Spectral Graph Theory for directed graphs that will be thoroughly used in the following sections of the paper.

\subsection{Transposition of classical Fourier Analysis to Spectral graph Theory.} 

We will introduce Spectral Graph Theory (SGT) for directed graphs using Markov Chains, in a similar way as proposed recently in
\cite{sevi2019}. Previous approaches in
\cite{sandryhaila_moura_2014, sardellitti_barbarossa_lorenzo_2017}
yielded interesting yet less exhaustive extensions as
they introduce either non-closed form expressions for \textit{Graph
  Fourier Transform} \cite{sardellitti_barbarossa_lorenzo_2017}, or
do not well include approaches on undirected graphs
\cite{sandryhaila_moura_2014}.

Moreover, the approach in \cite{sevi2019} is interesting because it
incorporates the Random Walk framework that was already used in our subsection
\ref{subsubsec:Theo_prepreq}, and nicely defines the Graph Fourier Transform within this context.

The important notions for SGT of \textit{Lazy Markov Chains, Time-Reversed Markov Chain, Reversible Markov chain, additive reversibilisation and random walk laplacian} are all explained in \cite{sevi2019}.

\subsubsection{A new approach of Graph Fourier Transform}
\textit{We suppose given a Markov
  chain $C$, on a graph $G=(V,E)$ with a probability transition matrix
  $P$. We define $\hat{P}_\alpha = \frac{\alpha P + (1-\alpha) P^*}{2}, \alpha \in [0,1]$, where $P^*$ is the transition matrix associated with the time reversed version of $C$. We also note $\hat{P} = \hat{P}_{1/2}$}

Let's define a directed graph Fourier Transform,  slightly different from the one introduced by the authors in \cite{sevi2019}, as they
make the assumption that $P$ is diagonalizable. We have chosen to provide a more flexible extension of this definition to non diagonalizable transition matrices by using the SVD decomposition.

\begin{definition}[Directed Graph Fourier Transform]\label{def:directed_graph_fourier_transform}
  Let $P$ be the transition probability matrix of a given regular
  Markov Chain $C$ on a graph $G=(V,E)$. We assume that $P$ is aperiodic (otherwise one can use $\bar{P} = \nicefrac{Id + P}{2}$).
  
  Any $\hat{P}_\alpha, \alpha \in [0,1]$ admits a Singular Value Decomposition. The SVD basis of $\hat{P}_\alpha$ is called the $\Xi_\alpha$ Fourier Basis of $C$. Please note that the SVD and the eigendecomposition coincide for $\alpha = \nicefrac{1}{2}$.
  
  Let $\phi$ a signal on G, and $\Xi_\alpha$ a Fourier Basis. The
  $\alpha$-Fourier Transform of $\phi$, noted $\hat{\phi}_\alpha$ is
  defined by :
  \begin{equation}
    \hat{\phi}_\alpha = \Xi_\alpha^{-1} \phi
  \end{equation}
  
  And the inverse Fourier Transform is :
  \begin{equation}
    \phi = \Xi_\alpha \hat{\phi}_\alpha
  \end{equation}
  
\end{definition}

The $\alpha = 1/2$ case is particularly
interesting. Let's note $\pi$ the limit distribution of $C$ and define the following scalar product on $l^2(V, \pi)$, $<.>_{\pi}$ : $$(f, g) \rightarrow \sum_{v\in V} f^*(v)g(v)\pi(v)$$

Since $\hat{P} = \frac{P + P^*}{2}$ is self adjoint for the operation $<.>_{\pi}$, it is a diagonalizable operator and admits a $<.>_{\pi}$
orthonormal basis of eigenvectors $\Xi_{1/2}$ for $\hat{P}$ which is coincides with the SVD of $\hat{P}_{1/2}$.

This basis is also an eigenvector basis of $\mathcal{L}^{rw}$ - the random walk laplacian of $G$ - which elegantly draws a parallel with the classical Fourier analysis, as
well as the SGT framework for directed graphs \cite{sevi2019}.
  

\subsubsection{Frequency Analysis of the directed Graph Fourier Transform}

H. Sevi, G. Rilling, P. Borgnat in \cite{sevi2019} have introduced
several tools to interpret the frequencies of the Graph Fourier
Transform. 

Throughout this section, one will mainly focus on the Fourier Basis
$\Xi_{1/2}$ and if not precised otherwise, one will drop the $1/2$
index to simplify the notations. If $\alpha \neq 1/2$, we lose the direct link between SVD and eigendecomposition that can be used to give intuitive energetic interpretations to the eigenvectors of the Fourier basis.

\paragraph{Smoothness of Signals on graphs:}
Graph signals smoothness or regularity are fundamental notions for SGT: the main objective of graph signal processing is to decompose signals on a basis of waves with different frequencies, or varying regularity. The higher the frequency of a
wave, the lower its smoothness or regularity. We refer to \cite{sevi2019} for more details on how to evaluate the smoothness of graph signals, and specially for the notions of \textit{Rayleigh quotient and Dirichlet energy}.


\paragraph{Frequency Analysis on directed graphs:}
One major contribution of \cite{sevi2019} was the introduction of a
meaningful frequency interpretation that links the directed graph
Fourier Basis to the Rayleigh Quotient values. Here we introduce the
relation between the two notions:

\begin{prop}[Rayleigh Quotients for the Fourier Basis vectors \cite{sevi2019}]\label{prop:frequencies_rayleigh}
  Given a $\Xi_{1/2} = (\xi_1, \hdots, \xi_n)$ Fourier basis of
  $\hat{P}$, associated with the eigenvalues
  $(\lambda_1, \hdots, \lambda_n)$ of $\hat{P}$. One has the
  relation :

\begin{equation}
    \forall i \in [|1, n|], \ \mathcal{R}_{\pi, \hat{P}}(\xi_i) = 1 - \lambda_i
\end{equation}
with $\Re(\lambda_i)$ being the real part of the vector $\lambda_i$.

This equation yields an explicit relation between the smoothness of an
eigenvector $\xi_i$ of the Fourier Basis, and its corresponding
eigenvalue.

One can also write that for every $\xi_i \in \Xi_{1/2}$ there exists a
$\rho_i$, eigenvalue of $\mathcal{L}^{rw}$ such that:
\begin{equation}
    \mathcal{R}_{\pi, \hat{P}}(\xi_i) = \rho_i
\end{equation}

This relation links the random walk Laplacian eigenvalues to the
Rayleigh Quotients of the Fourier Basis vectors - extending the intuitive link between Fourier basis vectors and energy, stemming from classical Signal processing.

\end{prop}

Let's remind here that the eigenvalues of $\hat{P}$ and
$\mathcal{L}^{rw}$ are real numbers because the eigenvalues of a self adjoint operator are real \cite{hoffman_kunze_1962}, Ed n°2, p.312.

One has then to define a framework that still respects the directedness of the graph $P$ in addition to incorporate the theory introduced by
\cite{shuman_narang_frossard_ortega_vandergheynst_2013,
  ricaud_borgnat_tremblay_goncalves_vandergheynst_2019} for undirected
graphs. The proposition \ref{prop:frequencies_rayleigh} is very importance to understand the intuition behind SGT : the smoothness of a given eigenvector $\xi_i$ of the Fourier basis is directly related to the Random Walk eigenvalue associated with $\xi_i$. Hence, drawing a parallel with the classical Fourier Analysis framework, the eigenvalues of the Graph Laplacian can be interpreted as frequencies since the eigenvectors of the Fourier Basis associated with large eigenvalues vary rapidly (their \textit{Rayleigh quotient} is high).

However, one must note that in our case the eigenvalues of the Laplacian are not necessarily distinct. This situation is prohibited in a classical setting since the eigenfunctions of the Laplacian are represented by the functions
$e^{iyx}$ which always correspond to different eigenvalues.

\paragraph{Graph Kernels:} \label{par:graph_kernel}

The paper \cite{shuman_narang_frossard_ortega_vandergheynst_2013} uses the
notion of Graph Kernels. Given a Graph Fourier Transform Basis
$\hat{P}_\alpha$, a graph Kernel is a vector defined directly in the
Graph Fourier space.

One important example of Graph Kernel is the signal $\hat{\phi} = (e^{-\lambda_1 t}, e^{-\lambda_2 t}, \hdots, e^{-\lambda_n t})$, $t \in \mathbb{R^+}$, where $\lambda_i$ are the eigenvalues of $\hat{P}_\alpha$.
$\hat{\phi}$ is called the \textit{heat Kernel} in direct reference to the Heat equation - we will now designate the \textit{heat Kernel} by $\mathbb{H}$.

Hence, given a graph $G=(V,E)$, a direct transposition of this
equation to the directed graph setting is :
$\partial_{t} \phi = - \mathcal{L}^{rw} \phi$. Let $\Xi_\alpha$ be a
Fourier Basis of $G$ ; a solution of this equation, given an initial
condition $\phi(i,0), \forall i \in V$ and $t \in \mathbb{R}^+$ is
given by
$\phi(i, t) = (\Xi_\alpha \times (e^{-\lambda_1 t}, e^{-\lambda_2 t},
\hdots, e^{-\lambda_n t})^T)_i \ \phi(i, 0)$.

One can then understand the origin of the Heat Kernel : the solution
of the Heat equation is the reverse Fourier Transform of the Heat
Kernel multiplied by an initial condition.



\section{Graph Fourier Transform Quantum Algorithms}
\label{sec:quantumGFT}

\textit{Throughout this section, we will assume given an input directed weighed graph $\mathcal{G}=(V,E,W(E))$ and its associated transition matrix $P$.}

In this section, we introduce a Quantum Algorithm - based on the Quantum Singular Value Transformation - that may be used to prepare a
quantum state that reproduces a wide range of signal graph filters and inverse graph Fourier transforms. The generalized PageRank theory introduced in section
\ref{sec:SGTapplicationToPR} is derived as a particular application
of our algorithm which has a vast range
of applications in SGT (\cite{chung_1997,
  ricaud_borgnat_tremblay_goncalves_vandergheynst_2019,
  shuman_narang_frossard_ortega_vandergheynst_2013, sevi2019}).
    
\subsection{Graph Fourier eigenbasis filtering based on Quantum SVT.}

\subsubsection{Elements of Singular Value Transformation}

\paragraph{Singular Value Transformation:}

In \cite{gilyen_su_low_wiebe_2019}, A. Gilyén et al define the
Singular Value Transformation as :

\begin{definition}[Singular Value Transformation]
  Let $P \in \mathbb{C}[X]$ be a polynomial and $A$ an operator, whose
  SVD is $A = W\Sigma V^\dagger$.

  Then the $P$-Singular Value Transformation of $A$ is the
  transformation:

\begin{equation}
        P^{(SV)}(A) := \left\{ \begin{array}{cc}
            WP(\Sigma)V^\dagger & \mbox{if P is odd} \\
            VP(\Sigma)V^\dagger & \mbox{if P is even}
        \end{array} \right.
\end{equation}

\end{definition}

This definition of Singular Value Transformation, relies on the
notion of \textit{projected unitary encodings}:

\begin{definition}[Projected unitary encodings/Block encodings
  \cite{gilyen_su_low_wiebe_2019}]

  Let $\widetilde{\Pi}$ and $\Pi$ be orthogonal projectors, $U$ a unitary
  and $A$ any real operator, we say that $(\widetilde{\Pi}, \Pi, U)$
  form a projected unitary encoding of $A$ \textit{iff} :
\begin{equation}
    A = \widetilde{\Pi} U \Pi
\end{equation}

From now on, to avoid confusion with the limit probability
distribution $\Pi$, we choose to use the letter $R$ to represent
projected unity encodings - modifying the notations used in
\cite{gilyen_su_low_wiebe_2019}.

In the next section, we use a particular case of Unitary
Encoding: Block encodings.

Suppose $A$ is a s-qubit operator, $\alpha, \epsilon \in \mathbb{R}_+$
and $a \in \mathbb{N}$. We say that the $(s+a)-qubit$ unitary $U$ is an
$(\alpha, a, \epsilon)$-block encoding of $A$ if :
\begin{equation}
    \|A - \alpha(\bra{0}^{\otimes a} \otimes I) U (\ket{0}^{\otimes a} \otimes I) \| \leq \epsilon
\end{equation}

In other words, the top left block of the matrix $U$ is an
$\nicefrac{\epsilon}{\alpha}$-close approximation of
$\nicefrac{A}{\alpha}$.

\end{definition}

These projected unitary encodings are used to bridge between quantum unitary operators ($U$) and classical operators ($A$). In fact, if $(\widetilde{R}, R, U)$ forms a projected unitary encoding of $A$. Quantum circuits will act on the unitary operator $U$and its changes will reflect the Singular Value Transformation on $A$ via the orthogonal
projections $\widetilde{R}$ and $R$. In other words, given a degree-$d$
odd polynomial, bounded by the absolute value 1 on the intereval [-1, 1], the singular value transformation performs the operation :

\begin{equation}
    A = \widetilde{R} U R \rightarrow P^{SV}(A) = \widetilde{R} U_{\phi} R
\end{equation}

Where the unitary $U_\phi$ can be implemented with a circuit using $U$
and its inverse $d$ times. In the case where $P$ is even, the same
result holds, replacing $\widetilde{R}$ by $R$.

These are powerful results, as shown in
\cite{gilyen_su_low_wiebe_2019} where the authors propose several use cases of their Quantum Singular Value Transformation while featuring near optimal complexity for every algorithm. Application fields include
Quantum Walks, Matrix Arithmetics, Quantum Machine Learning and Quantum Language Processing. Our paper will use this Singular Value Transformation to provide a powerful framework in order to implement a wide
range of inverse graph Fourier Transforms in one of the very first Quantum based Spectral Graph Theory algorithms. As a direct application, we will show in the next section \ref{sec:SGTapplicationToPR} how to derive a Quantum Graph Fourier Transform based PageRank algorithm. 

\subsubsection{On Graph Frequency-Based inverse Fourier Transform}

Our algorithm is able to solve the following problem :

\paragraph{Input:}\label{par:algo_1_inputs} Let $C$ be the regular Markov Chain on the Graph $\mathcal{G}$ with transition
matrix $P$ and let $(u_1, \hdots, u_n)$ be its Fourier Basis
associated with eigenvalues $(\lambda_1, \hdots, \lambda_n)$. Let
$h: \mathbb{R} \rightarrow \mathbb{R} $ a real sequentially continuous
function. For every $\epsilon > 0$, $h$ admits an $\epsilon$-close
Polynomial approximation by a polynomial $Q$ of degree $d(\epsilon)$,
such that $\forall x \in [-1,1], \|Q(x)\|< \nicefrac{1}{2}$. Let
$h(\lambda_i)_{i\in [1, n]}$ a graph Kernel defined by :
$h(\lambda_i)_{i\in [1, n]}=(h(\lambda_1), \hdots, h(\lambda_n))$

\paragraph{Output:} The quantum state $\ket{\psi_o}$ representing the
inverse graph Fourier transform of $h(\lambda_i)_{i\in [1, n]}$, as
defined in \ref{def:directed_graph_fourier_transform} :

\begin{equation}
    \ket{\psi_o} = \sum_{i \in [1,n]} \Tilde{h}(\lambda_i) \ket{u_i} 
\end{equation}

Where $\|\Tilde{h} - h\|_{[0,1]} < \epsilon$.

\paragraph{On the necessity for a QRAM structure:}
All these methods suppose  to possess an efficient access to a quantum memory (QRAM for instance) being able to represent classical data in a quantum state.\label{rmq:QRAM_assumptions}
The paper \cite{giovannetti_lloyd_maccone_2008} introduced a Bucket Brigade QRAM
structure which can provide and store classical information
in $\mathcal{O}(poly-log(n))$ complexity. However, this structure does not seem to provide a sufficient stability to guarantee
constant error prediction for sub-linear complexity quantum based algorithms (see
\cite{arunachalam_gheorghiu_jochym-oconnor_mosca_srinivasan_2015} for
more detailed explanations). Other data structures were introduced since the Bucket Brigade version, see
\cite{park_petruccione_rhee_2019} for instance.

One must emphasize that the complexity results depend heavily on the regularity of the function $h$. Indeed, the algorithm time complexity depends directly on the degree of a polynomial approximation of $h$. We will
propose a detailed analysis of the complexity of our quantum algorithm in \ref{subsubsec:algo_1_complexity}.

\subsubsection{Quantum SVT major steps.}
Here are the major steps of our Graph Fourier Transform computational method. We are given a regular and reversible transition matrix $P$,
and define $\mathcal{P} := \Pi^{1/2} P \Pi^{-1/2}$, where $\Pi$ is the
probability limit distribution of $P$. $\mathcal{P}$ is decomposed as
$\mathcal{P}= U \Sigma U^T$ using SVD : indeed, since $P$ is regular,
$\mathcal{P}$ is symmetric and the SVD of $\mathcal{P}$ is exactly its
eigendecomposition.

\textbf{Step 1 :} - We suppose being given $2b = 2 log_2(N)$ initial
qubits in the state $\ket{0}_A \ket{0}_B$, in this way the register $A$ and
the register $B$ contain both exactly $b$ qubits. Prepare the initial
quantum state
$\ket{\psi_P}=\sum \mathcal{P}_{i,j} \ket{i}_A \ket{j}_B =
\sum_{k=1}^T \sigma_k \ket{u_k}_A \ket{v_k}_B$, in the same way as proposed in
\cite{lin_bao_zhang_li_wang_2019} and
\cite{duan_yuan_liu_li_2018}. The preparation of this initial state can be performed by adressing a QRAM where one can retrieve the values of
$\mathcal{P}_{i,j}$ for state preparation.


\textbf{Step 2 :} - Let $(\bra{0} \otimes Id, \ket{0} \otimes Id, U) $
be a unitary encoding of $\mathcal{P}$ such that
$$(\ket{0}\bra{0} \otimes Id) U (\ket{0}\bra{0} \otimes Id)
= \begin{pmatrix} \mathcal{P} & 0 \\ 0 & 0 \end{pmatrix}$$ 
This step
consists in combining the results of \cite{gilyen_su_low_wiebe_2019}
to build circuits that can produce the polynomial transformations
$T_i^{SV}(\mathcal{P}) = \widetilde{R} U_{\phi_i} R$. Then, one can
use the block matrix arithmetic results (Lemma 53 of
\cite{gilyen_su_low_wiebe_2019}) to build successions of block
encoding unitaries - since $T(\mathcal{P}) = U T(\sigma) U^T$,
applying two successive polynomials $T_1$ and $T_2$ to $\mathcal{P}$
amounts to applying directly the product $T_1 \times T_2$ to
$\mathcal{P}$. Here one only needs to use the product of two block
encoding unitaries: the first one encoding a polynomial approximation
of the inverse function on $[-1,1]$, the second one encoding any
polynomial Q of degree d bounded by $1/2$.

\begin{itemize}
\item \textbf{First unitary: inverse function approximation}. Since
  the matrix $\mathcal{P}$ is non singular, there exist a $\delta>0$
  such that
  $\forall x \in [-\delta, \delta], x \not\in Sp(\mathcal{P})$ where
  $Sp(\mathcal{P})$ is the spectrum of $\mathcal{P}$. Then, according
  to the \textit{Corollary 69} of \cite{gilyen_su_low_wiebe_2019},
  there is an odd polynomial $L \in \mathcal{R}[X]$ of degree
  $\mathcal{O}(\nicefrac{1}{\delta} log(\nicefrac{1}{\epsilon})$ that
  is $\epsilon-approximating$ the function
  $f(x) = \frac{3\delta}{4 x}$ on the domain
  $I = [-1, 1] \setminus [-\delta,
  \delta]$. \cite{gilyen_su_low_wiebe_2019} gives a method to build
  recursively the polynomial $P$. Then using Quantum Singular Value
  Transformation, one can build the operator $U_{\phi}$ such that
  $$(\ket{0}\bra{0} \otimes I)U_{\phi}(\ket{0}\bra{0} \otimes I)
  = \begin{pmatrix} L^{(SV)}(\mathcal{P}) & 0 \\ 0 & 0 \end{pmatrix}$$
\item \textbf{Second unitary : $h$ function approximation}. We apply
  Hermitian Quantum Singular Value Transformation (Theorem 56) from
  \cite{gilyen_su_low_wiebe_2019} to build the operator $U_{\phi}$ so
  that
  $$(\ket{0}\bra{0} \otimes I)U_\phi (\ket{0}\bra{0} \otimes I)
  = \begin{pmatrix} Q^{(SV)}(\mathcal{P}) & 0 \\ 0 & 0 \end{pmatrix}$$.
    
\item \textbf{Patching up : multiplication of the previous encodings.}
  The multiplication theorem of \cite{gilyen_su_low_wiebe_2019}
  provides a final unitary encoding of $T(\mathcal{P}) L(\mathcal{P})$
  : $\widehat{U}$.
    
\end{itemize}

\textbf{Step 3:} - Apply $\widetilde{R}\widehat{U} R$ to the register
$B$. The system state is now
$\ket{\psi_3} = \sum_i Q(\lambda_i) \ket{u_i}\ket{u_i}$.

\textbf{Step 4:} - Uncompute the register B. This can be done without
the need of external ancilla qubits, by only applying CNOT gates to every pair of qubits from register $A$ and $B$. After the operation, the register $B$ will be in the pure quantum state $\ket{0}_B$ and one can uncompute
it by measurement.

\textbf{Step 5 :} The final quantum system state is
$\ket{\psi_f} = \sum_i Q(\lambda_i) \ket{u_i}$.

\subsubsection{Detailed explanation of the main steps of the algorithm.}\label{subsubsec:algo_1_explanation}

This algorithm relies on the following property: for
normal semi-definite matrices the eigendecomposition and SVD
coincide. Hence, reversible transition matrices $P$ being adjoint
operators in the space $l^2(V, \pi)$, the operator
$\mathcal{P} = \Pi^{1/2} P \Pi^{-1/2}$ is an adjoint operator in the
classical $l^2(V)$ space, equipped with the standard inner product -
ie $\mathcal{P}^* = \mathcal{P}^T = \mathcal{P}$. The whole framework developed in section 
\ref{sec:SGTIntro} then directly applies to the SVD of $\mathcal{P}$,
which explains how one can simply identify the singular values to
eigenvalues and eigenvectors to singular vectors for the matrix
$\mathcal{P}$.

Our algorithm also applies to non-reversible transition matrices,
however one looses some of the properties introduced in section \ref{sec:SGTIntro}:
we discuss the non reversible case in
\ref{subsubsec:algo_1_extensions}.

For \textbf{Step 2}, the polynomial $T$ can represent a wide range of
functions. We are going to give several examples of Kernel functions
$h$ in \ref{subsubsec:algo_1_extensions}.

In our SVT algorithm, we build explicitly a unitary encoding
$(\mathcal{R}, R, U)$ of $\mathcal{P}$, inspired from the construction proposed in
\cite{gilyen_su_low_wiebe_2019} in the
\textit{Corollary 34}, which stems from Szegedy's construction of
Quantum Walk Operators (see \cite{szegedy}). Indeed, the unitary
$U = U_L^\dagger U_R$ is built from the two state preparation
unitaries $U_L$ and $U_R$:

\begin{align}
    U_R : \ket{0} \ket{x} &\rightarrow \sum_{y \in V} \sqrt{P_{xy}} \ket{x} \ket{y} \\
    U_L : \ket{0} \ket{y} &\rightarrow \sum_{x \in V} \sqrt{P^*_{yx}} \ket{x} \ket{y}
\end{align}

This implies, choosing $R = \widetilde{R} = (\ket{0}\bra{0} \otimes I)$, that $\widetilde{R}UR = \begin{pmatrix} \mathcal{P} & 0 \\ 0 & * \end{pmatrix}$.

We remind that $P^*_{x,y} := P_{y,x} \frac{\pi(y)}{\pi(x)}$, which allows getting $\mathcal{P}$ from $(\sqrt{P_{xy}P^*_{yx}})_{ij \in [1,n]}$.

Using the above operator, and applying them to the register $B$ and a $b$-size ancilla set of qubits, yields :

\begin{align}
    \sum_{k\in [1,n]} \sigma_k \ket{u_k}_A \widetilde{R}U_\phi R (\ket{v_k}_B \ket{0}_C) &= \sum_{k\in [1,n]} \sigma_k \ket{u_k}_A  (\sum_{i \in [1,n]} T(\sigma_i) \ket{u_i}\bra{v_i}\ket{v_k}_B) \ket{\zeta}_C )\\
    &= \sum_{k\in [1,n]} \sigma_k T(\sigma_k) \ket{u_k}_A  \ket{u_k}_B \ket{\zeta}_C \\
    &= \sum_{k\in [1,n]} Q(\sigma_k) \ket{u_k}_A  \ket{u_k}_B \ket{\zeta}_C \\
    &= \sum_{k\in [1,n]} Q(\sigma_k) \ket{u_k}_A  \ket{u_k}_B
\end{align}

Let's simply recall that the states $\ket{v_i}$ form an orthonormal basis of
vectors, which allows one to derive the second line equality in the
previous set of equations.

The register $C$ being unentangled with the register $B$, due to the diagonal
block-form of the operator $\widetilde{R}U_\phi R$, one can uncompute
the register without modifying the global state of the system.

\paragraph{Circuit implementation of the algorithm:}
Figure \ref{fig:quantum_circuit_algo_1} presents a 
quantum circuit implementation of the algorithm introduced in the previous paragraph.
\begin{figure}
    \centering
    $\begin{array}{c}
    \Qcircuit {
    \lstick{C : \ket{0}_{n_b}} & {/} \qw &\qw &  \multigate{1}{\widetilde{R}U_\phi R} & \meter & \qw & \qw \\
    \lstick{B : \ket{0}_{n_b}} & {/} \qw & \multigate{1}{QRAM}  & \ghost{\widetilde{R}U_\phi R} &  \targ & \meter & \rstick{\ket{0}} \cw \\
    \lstick{A : \ket{0}_{n_b}} & {/} \qw & \ghost{QRAM} & \qw  & \ctrl{-1} &   \rstick{\sum_i \Tilde{h}(\lambda_i) \ket{u_i}} \qw \\
    & & \dstick{\ket{0}_A\ket{0}_B \rightarrow \sum_{ij} P_{i,j} \ket{i}_A \ket{j}_B} \cwx & & & \\
    & & & & & &}       
    \end{array}$
    
    \bigskip
    \caption{The Quantum Circuit used by the Graph Fourier eigenbasis filtering}
    \label{fig:quantum_circuit_algo_1}
\end{figure}

\subsubsection{Complexity analysis.}\label{subsubsec:algo_1_complexity}

Our algorithm being the combination of three macroscopic quantum processes, one has to estimate the gate and memory cost of each
process and sum each individual computational cost to get the global
computational cost.\\

\paragraph{Cost of QRAM state preparation:}
As noticed in \ref{rmq:QRAM_assumptions}, several authors have
introduced Quantum Random Access Memory structures that are able to
prepare any loaded state in time $\mathcal{O}(polylog(n))$ with a low
degree of polynomial scaling.\\

\noindent\textbf{\textit{Cost of $\widetilde{R}U_\phi R$ preparation:}}\\
Let $(\widetilde{R}, R, U)$ be a unitary encoding of $P$, a real
operator. According to the results of \cite{gilyen_su_low_wiebe_2019}
(\textit{Corollary 18 \& Lemma 19}), given a real odd polynomial of
degree $d$, bounded by 1 in $[0,1]$, one can build $U_\phi$ - after
$\mathcal{O}(poly(d, log(\frac{1}{\epsilon})))$ preprocessing
computations on classical computer - using for $d$-times :
\begin{itemize}
    \item The operators $U$ and $U^\dagger$
    \item The gates $C_R NOT$ and $C_{\widetilde{R}} NOT$
    \item Single qubit gates.
\end{itemize}
the process uses a constant number of ancilla qubits. Which makes the overall number of gates required scaling as $\mathcal{O}(d \mathcal{T}(U))$
with $\mathcal{T}(U)$ the cost of implementing $C_R NOT$ and $U$ gates
\cite{gilyen_su_low_wiebe_2019}.\\

\noindent\textbf{\textit{Cost of last $C_{NOT}$ preparation:}}\\
The last  $C_R NOT$ operator requires the use of $n_b$ 2-qubit $CNOT$s, which makes an overall of $\mathcal{O}(log(n))$ quantum gates.\\

\paragraph{Patching up:}
By combining all the previous costs, one can deduce that the overall quantum gate cost for the circuit implementation
\ref{fig:quantum_circuit_algo_1} scales as
$\mathcal{O}(d \mathcal{T}(U) + S(log(n)))$ with $S$ a low degree
polynomial. This result proves a direct dependency between the number of quantum gates required and the degree of the simulated polynomial:
this explains why our algorithm is most efficient because, in general, Fourier Kernels are well approximated by low degree polynomials.

\subsection{Algorithm Discussions}\label{subsubsec:algo_1_extensions}

We have been able to derive several interesting and general results from a precise exploitation of the Quantum Singular Value Transformation algorithm introduced by A. Gilyén et \textit{al.} in
\cite{gilyen_su_low_wiebe_2019}. In this part we will analyse several extensions to this algorithm that may be of interest, while giving examples of Kernels efficiently implemented within the algorithm.

\subsubsection{Undirected graph version:}
Even though we have only worked with directed graphs, our Quantum Graph Frequency-Based Inverse Fourier Transform transposes well to the undirected framework. Indeed, undirected graphs are always reversible and symmetric which means that the reversible condition on
the input may be dropped in the undirected setting. Several Spectral Graph Theory applications only make use of undirected graphs
\cite{ortega_frossard_kovacevic_moura_vandergheynst_2018,
  shuman_narang_frossard_ortega_vandergheynst_2013,
  ricaud_borgnat_tremblay_goncalves_vandergheynst_2019} and our
algorithm may be of great use in those cases. Please note also that
these undirected graphs does not need to be regular and that our
results also apply to undirected non-regular graphs (whereas power
method for PageRank does not converge for these graphs).

\subsubsection{Non reversible graphs:}
As discussed before our algorithm is adapted for regular
non-reversible graphs: however, in this case, one looses the direct correspondence between transition matrix singular values and eigenvalues due to the absence of symmetries for the transition matrix. Indeed, the operator
$\mathcal{P} = \Pi^{-1/2} P \Pi^{1/2}$ is no longer self-adjoint and one can only claim that the singular values of $\mathcal{P}$ are the
square root of the eigenvalues of
$\mathcal{P}\mathcal{P}^* = \Pi^{-1/2} P \Pi P^* \Pi^{-1/2}$. In addition, the polynomials used for singular value transformation must be either even or odd (see \cite{gilyen_su_low_wiebe_2019}).

\subsubsection{Spectral Graph signal filtering application:}
Another interesting use case of the algorithm corresponds to a slightly modified version of our original problem. Given
$\mathcal{P} = \Pi^{1/2} P \Pi^{-1/2}$, where $P$ is a regular and
reversible transition matrix, and assuming that one can produce an initial quantum superposition of eigenvectors $\ket{u_k}$ of
$\mathcal{P}$, $\ket{\psi}_i = \sum_k c_k \ket{u_k}$, for every
polynomial $Q$ bounded by 1 on $[-1,1]$, the algorithm presented above can produce the output quantum state :

\begin{equation}
    \ket{\psi}_f = \sum_k c_k Q(\lambda_k) \ket{u_k} 
\end{equation}
    
Where $\lambda_k$ is the eigenvalue associated with $\ket{u_k}$.

Thus, if $h$ is an analytical function bounded by 1 on $[-1,1]$
$\epsilon$-approximated by a polynomial $Q_\epsilon$ on $[-1,1]$, one
can build the state to a precision $\epsilon$ on the values of
$h(\lambda_k)$ :

\begin{equation}
    \ket{\psi}_f = \sum_k c_k h(\lambda_k) \ket{u_k} 
\end{equation}

Results can also be derived for complex graph filters
(according to the corollary 8 of \cite{gilyen_su_low_wiebe_2019}),
providing that the norm of the graph filter is bounded by
$\nicefrac{1}{4}$ on $[-1,1]$. A consequence of this is that, given any initial graph signal $\phi$ (which can be written as a quantum superposition of $\mathcal{P}$ Fourier Basis eigenvectors), one can apply the linear filter $h(\lambda_1, \hdots, \lambda_n)$ to its Graph Fourier components, where $h$ is any real function, $\epsilon$-approximated by a given family of polynomials, and bounded by 1 on $[-1,1]$.

Signal filtering is ubiquitous in Spectral Graph Theory applications
\cite{sandryhaila_moura_2014,
  shuman_narang_frossard_ortega_vandergheynst_2013, sevi2019,
  ortega_frossard_kovacevic_moura_vandergheynst_2018}, and our
algorithm may be used to implement a wide range of such linear filters
on Graph Fourier Components, the only condition on these filters being
that it these must be bounded by 1 on $[-1,1]$. Note that any
continuous function can be made bounded by 1 on $[-1,1]$ by dividing
it by its maximum value on $[-1,1]$ (which exists since the function
is continuous).


\section{Spectral Graph Theory Quantum PageRank Algorithm.}\label{sec:SGTapplicationToPR}

\textit{Throughout this section, we will assume given an input
  directed weighed graph $\mathcal{G}=(V,E,W(E))$ and its associated
  transition matrix $P$.}

In this section, we will introduce a generalized version of the PageRank using Spectral Graph Theory. This generalized PageRank can be seen as a detailed application for the Quantum Algorithm (inverse graph Fourier transform) of \ref{sec:quantumGFT}. 

Using results stemming from Spectral Graph Theory, we are able to derive a generalization of the PageRank algorithm that is able to better translate local graph properties on the nodes ranking produced by the algorithm. This work is the first in its kind, as - to our knowledge - a complete adaptation of the
spectral graph theory to the PageRank has not yet been proposed. Our algorithm is able to adjust the influence of local and global graph structures to the final ranking, and can provide a whole new class of importance criteria that can be used to produce new node rankings for any graph. 
 
\subsection{Generalized Importance Vector}
In this subsection we precise the notion of ranking by \textit{order of importance} and generalize this notion, providing a new class of \textit{Generalised PageRank rankings}

\begin{definition}[Generalized importance
  vector]\label{def:generalized_importance_vector}
  Let $\lambda_1 \leq \lambda_2 \leq ... \leq \lambda_n$ be the eigenvalues of the Laplacian associated with a given transition matrix $P$ of the input graph and $(u_1, ..., u_n)$ a basis of unit eigenvectors associated to these eigenvalues.

  Given a non-increasing Graph Kernel $\mathbb{K}$ (see
  \ref{par:graph_kernel}), the $\mathbb{K}$-generalized importance
  vector is the inverse Fourier Transform of $\mathbb{K}$, \textit{i.e.} such that
  :.
\begin{equation}
    \forall (i,j) \in V^2. i \leq j \implies \mathbb{K}_i \geq \mathbb{K}_j.
\end{equation}

\end{definition}

Here are some remarks regarding the previous definition :
\begin{itemize}
\item Most of the time, the transition matrix $P$ will be chosen to be either the classical or the personalized Google Matrix. Since these matrices are regular and easily computable. However, we have also found that other candidates yield interesting properties: we will give some examples and discuss their usefulness in our framework.

\item A first useful example of a generalized importance vector is given by the Kernel $\delta^0$, where
  $\delta^0_{i} = \delta_{i \in \mathbb{J}}$ where
  $\mathbb{J} = \{ i \in V \mbox{ st: } \lambda_i = 0 \}$ and
  $\lambda_i$ is the $i^{th}$ eigenvalue of the Laplacian of the
  Google Matrix. In the case of the Google Matrix, this amounts to compute the classical PageRank, and then to compute the probability limit distribution of $\mathcal{G}$. Thus, we call the Kernel  $\delta^0$, the classical PageRank Kernel.
    
\item An interesting choice of Kernel may be the Heat Kernel
  $\mathbb{H} = (e^{-\lambda_1 t}, e^{-\lambda_2 t}, \hdots,
  e^{-\lambda_n t})$, $t \in \mathbb{R^+}$, this Kernel models the propagation of heat waves on the Laplacian: for a given choice of $t$, one can choose the duration of the heat propagation, and then carefully
  adjust the locality of the ranking. Note that the work from
  \cite{yang_king_lyu_2007}, corresponds in fact to the Heat Kernel particular case of our PageRank generalization.
    
\item The approach presented in \cite{baeza-yates_boldi_castillo_2006} cab also be treated in our framework. Indeed, the power series decomposition
  $\underset{k\in [|0,\infty|]}{\sum} \gamma_k P^k$ can be rewritten,,using the eigendecomposition of $P=JUJ^{-1}$ :
    
    \begin{align}
        \underset{k\in [|0,\infty|]}{\sum} \gamma_k P^k &= J (\underset{k\in [|0,\infty|]}{\sum} \gamma_k (diag(\lambda_1^k, \hdots, \lambda_n^k) + R^k) J^{-1} \\ &= J  (diag(f(\lambda_1), \hdots, f(\lambda_n)) + \underset{k\in [|0,r^{max}|]}{\sum} \gamma_k R^k) J^{-1}
    \end{align} 
    Where $r^{max} < \infty$ and
    $f : \mathbb{C} \rightarrow \mathbb{C}; z \rightarrow
    \underset{k\in [|0,\infty|]}{\sum} \gamma_k z^k$ is an analytic
    function.
    
\end{itemize}

\subsubsection*{A comment on the Google Matrix}\label{subsec:main_consequences}
The Fourier Transform framework allows us to derive a generalized version of the PageRank algorithm which may better distinguish secondary hubs of nodes. The generalized importance vector allows one to fine-tune the balance between global graph structure and local graph properties.

In fact, this property can be easily understood for the \textit{Heat
  Kernel}, $\mathcal{H}$ : when the parameter $t$ is close to 0, the
heat has not yet propagated through the whole input graph, leading to a more prominent local description of the input graph ; whereas when $t \rightarrow \infty$, one has access to a global description of the properties of the input graph.


To fully exploit the power of Spectral Graph Theory, we have introduced several classes of candidate matrices that can be used to compute the Generalized PageRank: indeed, we have seen in section 
\ref{sec:SGTIntro} that only the properties of connectivity and aperiodicity aare required to be able to apply the Perron-Frobenius Theorem. Thus the original Google Matrix captures a very specific case of matrix regularization, which does favor general graph structure
over local properties by adding a complete graph with low transition weights to the original graph. Similarly, the personalized PageRank use of the personalization vector does refine the ranking around a specific node but does introduce dense matrix rows which may be
complicated to deal with in practice \cite{langville_meyer_2004}.


\subsection{Generalising  the Google Matrix.}
We introduce a generalization of the Google Matrix, and study some
interesting applications of our novel PageRank Matrices - Almost
Directed Stochastic Matrices (ADSM) - while proving that the classical
PageRank Matrix is in fact a very particular instance of ADSM. 

This generalized version of the Google Matrix heavily
relies on the Spectral Graph Theory as it is inspired from the use of the \textit{reversibilized} and \textit{lazy} matrices versions in the transition matrix regularization process. Indeed, these two extended matrix forms are useful to prepare admissible candidates to the PageRank theorem -\textit{ i.e.}
regular Matrices. 

\subsubsection{Patching the dangling nodes}
We will use the following version of the Weakly
Preferential PageRank method (see \ref{def:dangling}):
\begin{equation}
    P = \Bar{P} + \sum_{i\leq C} c_{i} u_{i}^T
\end{equation}

Where $C$ is the number of connected components of $\mathcal{G}$ and
$c_{i}$ the indicator matrix for the $i^{th}$ connected component and $u_i$ a uniform distribution on the $i^{th}$ connected component of the graph.

This way to patch dangling nodes is interesting as it doesn't link two seemingly unrelated connected components: this is necessary if these
components stay distinct in the final google matrix representation.

\subsubsection{Almost Directed Stochastic Matrices}
Next, we introduce the notion of Loop Completing Graph Extension that
heavily simplifies the expression of our generalized Google Matrix.

\begin{definition}[Loop Completing Graph Extension]
  Let $G = (V, E, W(E))$ be a directed weighted graph, we call a
  \textbf{loop completing directed weighted graph extension} (or
  simply loop completing graph extension) of G,
  $G^e = (V, E^e, W(E)^e)$, a graph that has the following properties:
\begin{enumerate}
    \item G is a subgraph of $G^e$, ie $E \subset E^e$
    \item $\forall x \in V$, $(x,x) \in E^e$ : the graph extension has self-looping edges.
    \item $\forall (i,j) \in E^e \setminus E, (j,i) \in E^e$ : the graph extension edges are undirected.
\end{enumerate}

\end{definition}

Then we rely on loop completing graph extensions to define a Generalized PageRank matrix - the \textit{Almost Directed Stochastic Matrices}, or ADSM.

The intuition behind ADSM is the following : given two nodes of any graph $\mathcal{G}$, $x$ and $y$, and a Markov chain $C$ on
$\mathcal{G}$, and assuming that the initial distribution probability
of the walkers is uniform (\textit{i.e.}
$\forall i \in V, \mathbb{P}(X_0= i)= \frac{1}{|V|}$) we have the
following equalities stemming from Bayes probability formula :

\begin{align}\label{eq:ADSM_intuition}
    P^{T_{stoch}}(y,x) := \mathbb{P}(X_0 = y | X_1 = x) &= \frac{\mathbb{P}(X_1=x | X_0 = y) \mathbb{P}(X_0 = y)}{\sum_{k\in V} \mathbb{P}(X_1 = y | X_0 = k) \mathbb{P}(X_0=k) } \\ 
    &= \frac{\mathbb{P}(X_1=x | X_0 = y)}{\sum_{k\in V} \mathbb{P}(X_1 = y | X_0 = k) } \\
    &= \frac{P_{y,x}}{\sum_{k\leq n} P_{k,x}}
\end{align}

Then, one can define a \textit{weak reversibilization} version
$P^{T_{stoch}}$ of a transition matrix using the last equation. This weak reversibilization unveils interesting results when walkers are uniformly distributed initially.

Equation \ref{eq:ADSM_intuition} shows a locally tempered behaviour for the reversed edges. The
more connected the node $y$, the lower $\mathbb{P}(X_1=x|X_0=y)$
and the higher $\mathbb{P}(X_1=y)$ will be: reversed edges penalize high connected nodes, producing a fairer ranking.  Thus, a an interpretation of weak and strong reversibilization is the following : the weak reversibilization satisfies the power equilibrium principle in the immediate neighbourhood of each node, whereas the strong reversibilization satisfies a global power equilibrium
principle, thanks to the limit probability distribution matrix $\pi$.

These matrices are called "almost directed" because the weight attributed to the reverse artificial nodes is small enough so that the probability of the random walker getting through these nodes is quite low (even though not equal to zero) :

\begin{definition}[Almost Directed Stochastic Matrices]\label{def:ADSM1}
Let P be a left stochastic matrix. We define :
\begin{equation}
    \mathcal{P} = (1-\alpha) P + \alpha (G^{gen})^{T_{stoch}}
\end{equation}
with

$(G^{gen})^{T_{stoch}} = (G^{gen})^{T} \boxtimes \frac{1}{G^{gen}_1}$, 

where $(\frac{1}{G^{gen}_1})_i = \left\{
    \begin{array}{ll}
        \frac{1}{\sum_{j\leq n} G^{gen}_{ji}} & \mbox{if $i$ has incoming nodes} \\
        0 & \mbox{ otherwise}
\end{array} \right.$ 

is a column vector and $\boxtimes$ is the elementwise product ($i^{th}$ row of $(G^{gen})^{T}$ multiplied by ($\frac{1}{G^{gen}_1}_i$)). $\alpha$ is a constant (usually small).

The \textit{Almost Directed Stochastic Matrices} is \textit{regular}, which means that one can apply the Perron-Frobenius theorem and compute a probability limit distribution.   

\end{definition}

\begin{proof}[Proof :] Let $P$ be this stochastic matrix, then by column permutation we can find a basis where $P$ has the form
  : \begin{equation}
P = \begin{pmatrix}
P_1 & 0 & \cdots & 0\\
0 & P_2 & \cdots & 0 \\
0 & 0 & \ddots & 0\\
0 & 0 & 0 & P_n
\end{pmatrix}
\end{equation}
Where $P_i$ is a stochastic matrix representing a strongly connected directed graph. Then
$\mathcal{P} = \begin{pmatrix} \mathcal{P}_1 & 0 & \cdots & 0\\
  0 & \mathcal{P}_2 & \cdots & 0 \\
  0 & 0 & \ddots & 0\\
  0 & 0 & 0 & \mathcal{P}_n
\end{pmatrix}$ where $\mathcal{P}_i$ is a regular stochastic matrix. The result holds.
\end{proof}

We introduce the \textit{Weak Stochastic
  Transposition} operator - $(\cdot)^{T_{stoch}}$ - such that
$(\cdot)^{T_{stoch}}: P \rightarrow P^{T_{stoch}}$, maps the set of left-stochastic matrices onto itself. This operator acts on any stochastic matrix (not solely the regular ones) and outputs a transition matrix with reversed edges by using only the original transition matrix. This operator is called \textit{Weak}, because it does not bear the usual transposition properties (linearity for instance), contrary to the reversibilization operator $(\cdot)^*$ that displays a behaviour similar transposition. Thus, one can regard the $(\cdot)^*$ operator as a \textit{Strong Stochastic Transposition}, that only acts on regular left-stochastic matrices.

The \textit{weak stochastic transition} can be efficiently constructed by saving the calculation of the sums $\frac{1}{G^{gen}_1}$ in the memory and updating them instead.

Figure \ref{fig:ADSM_construction} provides examples of ADSM graph construction.


\begin{figure}
\centering
\begin{subfigure}{0.45\textwidth}
\centering
 \resizebox{\linewidth}{!}{\begin{tikzpicture}[->,>=stealth',shorten >=5pt,auto,node distance=3.5cm,
                    semithick]
  \tikzstyle{every state}=[fill=red,draw=none,text=white]
  
  \node[state] (A)                    {$a$};
  \node[state]         (B) [above right of=A] {$b$};
  \node[state]         (D) [below right of=A] {$d$};
  \node[state]         (C) [below right of=B] {$c$};


  \path (A) edge              node[anchor = north, below] {0.5} (B)
            edge [loop left, blue]  node {0.33} (A)
            edge              node {0.25} (C)
        (B) edge    [bend right=80]          node[above] {0.33} (A)
            edge              node {0.25} (C)
            edge [loop above, blue]  node {0.5} (B)
        
        (C)  edge [loop right, blue]  node {0.25} (C)
        
        (D) edge            node[above] {0.25} (C)
            edge              node {0.33} (A)
            edge [loop below, blue]  node {1} (D);
\end{tikzpicture}}
\caption{Initial graph : we add looping edges to make the graph aperiodic}
\end{subfigure}%
\hfill
\begin{subfigure}{0.45\textwidth}
\centering
 \resizebox{\linewidth}{!}{\begin{tikzpicture}[->,>=stealth',shorten >=5pt,auto,node distance=3.5cm,
                    semithick]
  \tikzstyle{every state}=[fill=red,draw=none,text=white]
  
  \node[state] (A)                    {$a$};
  \node[state]         (B) [above right of=A] {$b$};
  \node[state]         (D) [below right of=A] {$d$};
  \node[state]         (C) [below right of=B] {$c$};


  \path (A) edge              node[anchor = north, below, text = purple] {0.471} (B)
            edge [loop left, blue, text=purple]  node {0.326} (A)
            edge    node {0.2125} (C)
            edge [bend right = 80, red, dashed] node[below] {0.035} (D)
            
        (B) edge    [bend right=80, text = purple]          node[above] {0.350} (A)
            edge              node[below] {0.2125} (C)
            edge [loop above, blue, text=purple]  node {0.494} (B)
        
        (C)  edge [loop right, blue]  node {0.3625} (C)
             edge [bend right = 80, red, dashed] node {0.035} (B)
             edge [bend left = 30, red, dashed] node[below] {0.035} (A)
             edge [bend left = 80, red, dashed] node {0.026} (D)
             
        (D) edge node[below] {0.2125} (C)
            edge [text=purple]              node {0.281} (A)
            edge [loop below, blue, text = purple]  node {0.939} (D);
\end{tikzpicture}}
\caption{Final Almost directed stochastic matrix : we added multiple reversed edges.}
\end{subfigure}
\caption{A construction example for Almost Directed Stochastic Matrices}
\label{fig:ADSM_construction}
\end{figure}

Besides, the notion of \textit{Almost Directed Stochastic Matrices} using \textit{Loop Completing Graph Extension} can also be seen as a generalisation of the classical PageRank. In fact, the google matrix is the particular
case in which $\mathcal{G}^e$ represents a complete graph.

Hence, the introduction of almost directed stochastic matrices based on the completing graph extension is relevant because according to the shape of the completing graph extension, the heat flow through the graph will change and the local properties of the graph won't be
translated the same way in the final result.

\subsection{Results and analysis}
\label{sec:results}

To better grasp our PageRank algorithm extensions we have produced several graph rankings on a classical
computer. Here we present results for various input graphs. In these numerical applications, we have used the \textit{reversed ADSM}, which is the \textit{reversibilized version} of the ADSM - $\frac{\mathcal{P} + \mathcal{P^*}}{2}$ - to comply with the definition of the directed graph Fourier transform in \ref{sec:SGTIntro}. 

Figures \ref{fig:simpleHeatK2} and \ref{fig:simpleHeatK3}
compare the Classical PageRank method to a Heat Kernel method (parameter $t=5$)
Importance distributions. The numerical data used to build these plots are provided in the figures \ref{fig:ranking2} and \ref{fig:ranking3}. On the figure \ref{fig:simpleheatKComparison}, one
can note that the importance vectors produced are fairly close to the standard classical PageRank importance vectors - the greater the time, the more damped the Kernel distribution and the Importance vector gets closer to
the stationary distribution. One can remark that for lower $t$  parameters ($t=5$ and below), the importance
vector unveils secondary hubs more clearly - this is shown by the greater oscillations of the Generalized Importance Vector associated with these Kernels. These oscillations can be clearly seen on figures
\ref{fig:complexHeatK1} and \ref{fig:complexHeatKComparison} where the Heat Kernel based Importance vectors attributes a higher score for nodes of secondary importance (\textit{i.e.} for nodes ranked after the fifth position for these graphs) when compared to the classical PageRank.

Figure \ref{fig:complexHeatKComparison} shows that this effect is even more accentauated for Heat Kernels with low time parameters ($t\leq 5$). This secondary hub highlight effect appears clearly for sufficiently large graphs (50 nodes and more), as it seems that, for very small graphs several boundary effects may affect the global ranking. Yet, this secondary node enhancement can be clearly seen on figures \ref{fig:simpleHeatK2} and \ref{fig:simpleHeatK3}: for the figure \ref{fig:simpleHeatK2} the effect is clear for the nodes 0, 3, 4, 5, 6 and 9; and for the figure \ref{fig:simpleHeatK3}, the nodes  0, 3, 4 and 10 exhibit an enhanced importance score when compared to the classical PageRank. This results in a more balanced importance distribution for the graphs associated with the Heat Kernels in figures \ref{fig:simpleHeatK2} and \ref{fig:simpleHeatK3}. 


\begin{figure}[H]
    \centering
    \begin{subfigure}{0.45\textwidth}
        \includegraphics[width=1.1\textwidth]{results_figure_simple/simple2_hk5_1.png}
        \caption{Generalized Importance Vector}
    \end{subfigure}
    \hfill
    \begin{subfigure}{0.45\textwidth}
        \includegraphics[width=1.1\textwidth]{results_figure_simple/simple2_hk5_classical.png}
        \caption{Classical PageRank}
    \end{subfigure}
    \hfill
    \begin{subfigure}{0.65\textwidth}
        \includegraphics[width=1.1\textwidth]{results_figure_simple/simple2_hk5_heat.png}
        \caption{Heat Kernel t=5, Reversed ADSM}
    \end{subfigure}
    
    \caption{Generalized PageRank Simulation Using a heat Kernel of parameter t=5 for the Reversibilized version of the ADSM}
    \label{fig:simpleHeatK2}
\end{figure}


\begin{figure}[H]
    \centering
    \begin{subfigure}{0.45\textwidth}
        \includegraphics[width=1.1\textwidth]{results_figure_simple/simple3_hk5_1.png}
        \caption{Generalized Importance Vector}
    \end{subfigure}
    \hfill
    \begin{subfigure}{0.45\textwidth}
        \includegraphics[width=1.1\textwidth]{results_figure_simple/simple3_hk5_classical.png}
        \caption{Classical PageRank}
    \end{subfigure}
    \hfill
    \begin{subfigure}{0.65\textwidth}
        \includegraphics[width=1.1\textwidth]{results_figure_simple/simple3_hk5_heat.png}
        \caption{Heat Kernel t=5, Reversed ADSM}
    \end{subfigure}
    
    \caption{Generalized PageRank Simulation Using a heat Kernel of parameter t=5 for the Reversibilized version of the ADSM of a simple graph}
    \label{fig:simpleHeatK3}
\end{figure}

\begin{figure}[H]
    \centering
    \begin{subfigure}{0.7\textwidth}
        \includegraphics[width=1\textwidth]{results_figure_simple/simple_hk579_1.png}
        \caption{Generalized Importance Vector}
    \end{subfigure}
    \hfill
    \begin{subfigure}{0.7\textwidth}
        \includegraphics[width=1\textwidth]{results_figure_simple/simple_hk579_2.png}
        \caption{Kernel distribution for generalized PageRank}
    \end{subfigure}
    \hfill
    \begin{subfigure}{0.47\textwidth}
        \includegraphics[width=1.1\textwidth]{results_figure_simple/simple_hk579_class.png}
        \caption{Classical PageRank}
    \end{subfigure}
    \hfill
    \begin{subfigure}{0.47\textwidth}
        \includegraphics[width=1.1\textwidth]{results_figure_simple/simple_hk579_heat9.png}
        \caption{Heat Kernel t=9, Reversed ADSM}
    \end{subfigure}
    \hfill
    \begin{subfigure}{0.47\textwidth}
        \includegraphics[width=1.1\textwidth]{results_figure_simple/simple_hk579_heat7.png}
        \caption{Heat Kernel t=7, Reversed ADSM}
    \end{subfigure}
    \hfill
    \begin{subfigure}{0.47\textwidth}
        \includegraphics[width=1.1\textwidth]{results_figure_simple/simple_hk579_heat5.png}
        \caption{Heat Kernel t=5, Reversed ADSM}
    \end{subfigure}
    
    \caption{Generalized PageRank Simulation Using different heat Kernels for the Reversibilized version of the ADSM of a complex graph}
    \label{fig:simpleheatKComparison}
\end{figure}

\begin{figure}[!ht]
    \centering
    \begin{subfigure}{0.7\textwidth}
        \includegraphics[width=1\textwidth]{results_figure_simple/complex_hk_135_1.png}
        \caption{Generalized Importance Vector}
    \end{subfigure}
    \hfill
    \begin{subfigure}{0.7\textwidth}
        \includegraphics[width=1\textwidth]{results_figure_simple/complex_hk_135_2.png}
        \caption{Kernel distribution for generalized PageRank}
    \end{subfigure}
    \hfill
    \begin{subfigure}{0.47\textwidth}
        \includegraphics[width=1.1\textwidth]{results_figure_simple/complex_hk_135_class.png}
        \caption{Classical PageRank}
    \end{subfigure}
    \hfill
    \begin{subfigure}{0.47\textwidth}
        \includegraphics[width=1.1\textwidth]{results_figure_simple/complex_hk_135_heat5.png}
        \caption{Heat Kernel t=5, Reversed ADSM}
    \end{subfigure}
    \hfill
    \begin{subfigure}{0.47\textwidth}
        \includegraphics[width=1.1\textwidth]{results_figure_simple/complex_hk_135_heat3.png}
        \caption{Heat Kernel t=3, Reversed ADSM}
    \end{subfigure}
    \hfill
    \begin{subfigure}{0.47\textwidth}
        \includegraphics[width=1.1\textwidth]{results_figure_simple/complex_hk_135_heat1.png}
        \caption{Heat Kernel t=1, Reversed ADSM}
    \end{subfigure}
    
    \caption{Generalized PageRank Simulation Using different heat Kernels for the Reversibilized version of the ADSM}
    \label{fig:complexHeatKComparison}
\end{figure}

Several interesting Kernels can be used to prepare less common rankings, we have given some examples of such Kernels in the figures peresented in appendix A : 
figure \ref{fig:cosK5} (cosine Kernel), figure  \ref{fig:monoK5} (monomial
Kernel) and figure \ref{fig:invK5} (inverse Kernel). The cosine Kernel, $(cos(\lambda_i \nicefrac{\pi}{4})^t)_i$, with $t=5$ in the figure \ref{fig:cosK5}, provides a considerably different ranking where external nodes achieve significantly higher rankings, to the detriment of central nodes which seem to be penalized by this importance distribution. The inverse kernel behaviour seems to be highly sensible to local graph structure: in figure \ref{fig:invK5}, this phenomenon is highlighted by the fact that the right part of the importance distribution graph absorbs most of the importance score for the inverse Heat Kernel, whereas this phenomenon does not appear in classical PageRank. Finally the monomial Kernel is illustrated in figure \ref{fig:monoK5}, provides a more balanced ranking than the other kernels for the same parameter: the difference between the most important nodes and the secondary ones is less visible for this kind of importance distribution.

Figure \ref{fig:benchmark} provides a first benchmark for
convergence analysis of ADSM using the power method. One notes that the number of iterations given a precision epsilon remains constant increasing the number of nodes of randomly generated Barabasi-Albert graphs. Adding this observation to the fact that ADSM are sparse
matrices, computing the reversibilized version of an ADSM can be done in $\mathcal{O}(nnz(P))$ operations, which $P$ the transition matrix of the input graph - which is at least as fast as the classical PageRank.


\begin{figure}[h!]
\centering
\csvreader[separator = semicolon, head to column names, 
tabular=|c|c|c|c|c|, before reading = \sisetup{
round-mode = places,
round-precision = 3
},
table head=\hline \multicolumn{1}{|p{2cm}|}{\centering Ranking} & \multicolumn{1}{p{2cm}|}{\centering Node \newline Classical \newline Kernel} & \multicolumn{1}{p{2cm}|}{\centering Score \newline Classical \newline Kernel} & \multicolumn{1}{p{2.5cm}|}{\centering Node \newline Heat Kernel \newline Time = 5} & \multicolumn{1}{p{2cm}|}{\centering Score  \newline Heat Kernel \newline Time = 5},
late after head=\\\hline\hline,
late after line=\csvifoddrow{\\\hline}{\\\hline}]
{results_csv/rankings_simple2_hk5.csv} {Ranking=\rank, Node Number Classical PageRank= \nnClass, Score Classical PageRank = \scoreClass, {Node Number Heat Kernel t = 5} =\nnHk, {Score Heat Kernel t = 5} =\scoreHk} {\rank & \nnClass & \tablenum{\scoreClass} & \nnHk & \tablenum{\scoreHk}}

    \caption{Ranking comparison between the classical PageRank and the generalized PageRank using the Heat Kernel of parameter t=5. Data from figure \ref{fig:simpleHeatK2}}
    \label{fig:ranking2}
\end{figure}

\begin{figure}[h!]
\centering
\csvreader[separator = semicolon, head to column names, 
tabular=|c|c|c|c|c|, before reading = \sisetup{
round-mode = places,
round-precision = 3
},
table head=\hline \multicolumn{1}{|p{2cm}|}{\centering Ranking} & \multicolumn{1}{p{2cm}|}{\centering Node \newline Classical \newline Kernel} & \multicolumn{1}{p{2cm}|}{\centering Score \newline Classical \newline Kernel} & \multicolumn{1}{p{2.5cm}|}{\centering Node \newline Heat Kernel \newline Time = 5} & \multicolumn{1}{p{2cm}|}{\centering Score  \newline Heat Kernel \newline Time = 5},
late after head=\\\hline\hline,
late after line=\csvifoddrow{\\\hline}{\\\hline}]
{results_csv/rankings_simple3_hk5.csv} {Ranking=\rank, Node Number Classical PageRank= \nnClass, Score Classical PageRank = \scoreClass, {Node Number Heat Kernel t = 5} =\nnHk, {Score Heat Kernel t = 5} =\scoreHk} {\rank & \nnClass & \tablenum{\scoreClass} & \nnHk & \tablenum{\scoreHk}}

    \caption{Ranking comparison between the classical Pagerank and the generalized pagerank using the Heat Kernel of Parameter t=5. Data from figure \ref{fig:simpleHeatK3}}
    \label{fig:ranking3}
\end{figure}
\newpage

\begin{figure}[H]
    \centering
    \begin{subfigure}{0.45\textwidth}
        \includegraphics[width=1.1\textwidth]{results_figure_simple/complex1_hk_5_1.png}
        \caption{Generalized Importance Vector}
    \end{subfigure}
    \hfill
    \begin{subfigure}{0.45\textwidth}
        \includegraphics[width=1.1\textwidth]{results_figure_simple/complex1_hk_5_class.png}
        \caption{Classical PageRank}
    \end{subfigure}
    \hfill
    \begin{subfigure}{0.65\textwidth}
        \includegraphics[width=1.1\textwidth]{results_figure_simple/complex1_hk_5_heat.png}
        \caption{Heat Kernel t=5, Reversed ADSM}
    \end{subfigure}
    
    \caption{Generalized PageRank Heat Kernel comparison with the Classical PageRank for a complex graph. Parameter t = 5}
    \label{fig:complexHeatK1}
\end{figure}

\begin{figure}[H]
    \centering
    \begin{tabular}{|c|c|}
    \hline
        \textbf{Number of nodes} & \textbf{Number of iterations}\\
        \hline \hline
        100 & 21 \\
        \hline
        215 & 22 \\
\hline 464 & 22 \\
\hline 1000 & 22 \\
\hline 2154 & 21 \\
\hline 4641 & 20 \\
\hline 10000 & 20 \\
\hline 21544 & 18 \\
\hline 46415 & 22 \\
\hline 100000 & 17\\
\hline
    \end{tabular}
    \caption{Results of convergence benchmark for the ADSM. Precision $10^{-7}$, graph type : Barabasi-Albert, parameter $5$}
    \label{fig:benchmark}
\end{figure}


\subsection{Generalized PageRank as an application to Quantum Inverse Graph Fourier Transform}
In the PageRank framework discussed in subsection
\ref{def:generalized_importance_vector}, the algorithm proposed in section \ref{sec:quantumGFT} can be 
directly used as it can simulate any $\epsilon$-polynomial approximate function of the transition matrix eigenvalues for any reversible Markov Chain.

A possible use case applied to the PageRank problem could be the following: after having computed a classical ranking with the ADSM structure introduced in section \ref{sec:SGTapplicationToPR} in a number of operations scaling linearly with the size of the input matrix, $N$, one can use the limit probability distribution vector to build a reversible transition matrix (in time $\mathcal{O}(N)$ due to the sparsity of ADSM) and apply the algorithm to build a finer ranking. Then amplitude estimation routines can be applied to the final quantum state to get refined rankings for hardly distinguishable sets of nodes. Since ADSM preserves strongly connected components, our quantum PageRank algorithm may be applied directly to a given connected component to produce a final quantum state representing the nodes of this particular connected component.

\subsection{Comparison with Paparo's discrete PageRank:} \label{par:paparo_comp} One can note
several similarities between our Generalized Quantum PageRank and the
one G. Paparo \textit{et al.} introduced in 2012 in
\cite{paparo_martin-delgado_2012}. The authors derive the following expression for the instantaneous PageRank score :

\begin{equation} \label{eq:instantaneous_pr}
    I_q(P_i, m) = \| \sum_{\mu_{i, \pm}} \mu_{i, \pm}^{2m} \bra{i}_2 \ket{\mu_{i, \pm}} \braket{\mu_{i, \pm} | \psi (0)} \|^2
\end{equation}

Where $\ket{\psi(0)}$ is an initial quantum state, $\mu_{i, \pm}$ are the eigenvalues of the Quantum Walk step operator that lie on the dynamical subspace (see \ref{sec:previousResults}) and
$\ket{\mu_{i, \pm}}$ the associated eigenvector. Given the classical Google Matrix $G$, if one notes $D$ the discriminant matrix defined by $D_{ij} = \sqrt{G_{ij} G_{ji}}$, $\lambda_i$ its eigenvalues and $\ket{\lambda_i}$ the associated eigenvectors, then Paparo \textit{et al.} derive the following set of relations (valid on the dynamical subspace, where $U^2$ the two-step walk operator acts non trivially):
\begin{align}
    \mu_{j, \pm} &= exp(\pm i \ arcos(\lambda_j)) \\
    \ket{\mu_{i, \pm}} &= \ket{\widetilde{\lambda}_i} - \mu_{i, \pm} S \ket{\widetilde{\lambda}_i}
\end{align}

Where $\ket{\widetilde{\lambda}_i} = A \ket{\lambda_i}$ where $A = \sum_{j=1}^N \ket{\psi_j} \bra{j}$ is an operator from the space of vertices $\mathbb{C}^N$, to the space of edges $\mathbb{C}^N \otimes \mathbb{C}^N$. The state $\ket{\psi_j}$ was defined in \ref{sec:previousResults}.

First note that one can apply directly our algorithm using a unitary encoding of $U_{dyn}$ (the walk operator restricted to the dynamical subspace) - since $U_{dyn}$ is unitary, its unitary encoding is itself. Then, using the singular value transformation to apply the polynomial $R_m=X^{2m}$ to the eigenspaces of $U_{dyn}$, which allows one to get the final state:

\begin{equation}
    \ket{\psi_f} = \sum_{\mu_{i, \pm}} \mu_{i, \pm}^{2m} \ket{\mu_{i, \pm}} \braket{\mu_{i, \pm}|\psi_{0}}
\end{equation}

Which exactly translates the expression of the instantaneous PageRank introduced above in \ref{eq:instantaneous_pr}. Indeed, let's remind that since $U_{dyn}$ is unitary, $U_{dyn}$ is normal and its left and right singular vectors coincide, which allows us to derive the above equation.

Note that, in equation \ref{eq:instantaneous_pr}, $\bra{i}_2$ is applied only to the last $n$ qubits of the space of the edges $\mathbb{C}^N \otimes \mathbb{C}^N$. Indeed, $U_{dyn}$ is a $2N \times 2N$ matrix (see \cite{paparo_martin-delgado_2012}). This feature is important in  \cite{paparo_martin-delgado_2012}: indeed, this phenomenon may explain some observed quantum effects on the final ranking - due to the non-trivial superposition of eigenstates represented by the sum of equation \ref{eq:instantaneous_pr}.

Computing the discrete quantum PageRank as Paparo \textit{et al.} proposed in \cite{paparo_martin-delgado_2012} becomes possible on a quantum computer providing that one can prepare the initial state $\ket{\psi_0} = \sum_i U_{dyn} \ket{i}\ket{j}$ efficiently. One would have to repeat the measurements of each instantaneous PageRank instance by applying successively the singular value transformation with the polynomials $R_m = X^{2m}$. However, let's point out that the eigenvalues of the dynamical subspace are of the form $e^{i\arccos(\lambda_i)}$ which becomes $e^{2mi\arccos(\lambda_i)}$ when exponentiated using $R_m$.  

Let's push the analogy with our method a bit further to try and understand the origin of quantum superposition that arises from the use of the walk operator $U_{dyn}$. Indeed, one has:

\begin{align}
    \ket{\psi_f} &= \sum_{i, \pm} \mu_{i, \pm}^{2m}(Id - S \mu_{i, \pm}) \ket{\widetilde{\lambda_i}} \braket{\mu_{i, \pm}| \psi(0)} \\
    &= \sum_{i, \pm} \mu_{i, \pm}^{2m} \ket{\widetilde{\lambda_i}} \braket{\widetilde{\lambda_i}| \psi(0)} - S \sum_{i, \pm} \mu_{i, \pm}^{2m+1} \ket{\widetilde{\lambda_i}} \braket{\mu_{i, \pm} |\psi(0)}
\end{align}

Let's choose the initial state $\ket{\psi(0)} = C \sum_{i} \ket{\widetilde{\lambda_i}}$, where $C$ is a normalization constant we will drop to simplify the notations.

\begin{align}
    \ket{\psi_f} &= \sum_{i, \pm}(\mu_{i,\pm}^{2m} - \lambda_i \mu_{i, \pm}^{2m+1}) \ket{\widetilde{\lambda_i}} - S \sum_{i, \pm} (\mu_{i, \pm}^{2m+1} - \lambda_i \mu_{i, \pm}^{2m+2} \ket{\widetilde{\lambda_i}} \\
    &= 2\sum_i (T_{2m}(\lambda_i) - \lambda_i T_{2m+1}(\lambda_i)) \ket{\widetilde{\lambda_i}} - 2 S \sum_i (T_{2m+1} (\lambda_i) - \lambda_i T_{2m+2}(\lambda_i)) \ket{\widetilde{\lambda_i}} \\
    &\propto \sum_i ( \lambda_i T_{2m+1}(\lambda_i) - T_{2m}(\lambda_i)) \ket{\widetilde{\lambda_i}} - U \sum_i (\lambda_i T_{2m+2}(\lambda_i) - T_{2m+1} (\lambda_i)) \ket{\widetilde{\lambda_i}}  \label{eq:final_state_paparo}
\end{align}

In the last line, we have dropped the two normalisation constants and used the fact that $U\ket{\widetilde{\lambda_i}} = S\ket{\widetilde{\lambda_i}}$ (see \cite{paparo_martin-delgado_2012}). 

$T_k$ is the $k^{th}$ Chebyshev Polynomial of the first kind. We use the following relation to derive the second equality:

\begin{equation}
    T_k(\lambda_i) = \frac{1}{2}( (\lambda_i + \sqrt{\lambda_i^2 -1})^k + (\lambda_i - \sqrt{\lambda_i^2 -1})^k) = \frac{1}{2} (\mu_{i, +} + \mu_{i, -})
\end{equation}

Equation \ref{eq:final_state_paparo} includes a linear combination of states that can be built using our Spectral Graph Theory quantum algorithm: 

$\sum_i (\lambda_i T_{2m+1}(\lambda_i) - T_{2m}(\lambda_i) ) \ket{\widetilde{\lambda_i}}$ and $ U \sum_i (\lambda_i T_{2m+2}(\lambda_i) - T_{2m+1} (\lambda_i) ) \ket{\widetilde{\lambda_i}}$.

Recently, the authors in \cite{gilyen_su_low_wiebe_2019}, have provided an efficient method to emulate the polynomial $T_{2m}$ in the singular value transformation, and the linear combination theorems of unitary block encoding. This method allows us to prepare efficiently the two former states in combination with our quantum algorithm. In this very special case, the amplitudes add up and create the quantum superposition effect exploited by Paparo \textit{et al.} to compute the instantaneous PageRanks: one can see that the first member frequencies are affected by the higher frequencies of the second term. The second term is related to the first one as it is a rotation (by the unitary $U$) on a higher degree filter of the eigenspaces of the discriminant matrix - in other words, using the Spectral Graph Theory framework, the instantaneous PageRank is derived from the interference of two Kernel generated graph signal states; one of which being rotated by the unitary $U$. 

This discussion paves the way to a generalized quantum based PageRank model, which adapts the Fourier Transform based generalized PageRank approach to the quantum domain. 

\begin{definition}{Quantum Fourier Transform based PageRank}
Let $G=(V,E)$ a directed graph, $(\lambda_1, \hdots, \lambda_n)$ be a Fourier Basis associated with G, $\mathbb{H}=(h_1, \hdots, h_n)$ a graph kernel with the associated coefficients $(\widehat{h}_1, \hdots, \widehat{h}_n)$ of the inverse graph Fourier Transform. Let $\ket{\widetilde{\lambda_i}}$ be the vector state defined as above. 

The (Instantaneous) Quantum Fourier Transform PageRank may be defined, like in \cite{paparo_martin-delgado_2012}, by: \\

\begin{equation}
    \mathcal{I}_{\mathbb{H}, PR} = \| \ket{\psi_f} \| = \|\sum_i \widehat{h}_i \ket{\widetilde{\lambda_i}} - U \sum_i \widehat{h}_i \ket{\widetilde{\lambda_i}}\|
\end{equation}
    
Note that if one chooses a sequence of Kernels $\mathbb{H}_k$, one can define a time averaged PageRank by :

\begin{equation}
    \forall T \in \mathbb{N}, \mathcal{P}_{T, \mathbb{H}, PR} = \frac{1}{T} \sum_i \mathcal{I}_{\mathbb{H}_i, PR}
\end{equation}

\end{definition}

The study of this generalized quantum PageRank formulation is left as an open research topic.

\section{Conclusion}
Our paper presents three contributions to the field of Quantum Information Processing:

\begin{itemize}

\item First, we introduce a Quantum Circuit based algorithm, dealing with a class of problems similar to the one of Adiabatic Quantum algorithm introduced by \cite{garnerone_zanardi_lidar_2012}. Our Circuit prepares the state $\ket{\pi}$, $\pi$ being the classical Google Importance vector, simulating an Hamiltonian for time $\mathcal{O}(\frac{log(N)}{\epsilon})$. We have then detailed several use cases for this algorithm.
    
\item The main contribution of the paper consists in the introduction of one of the first Quantum Based Algorithms performing a wide range of graph signal filters and Kernel-based signal production systems with a low gate complexity. This algorithm relies heavily on the Quantum Singular Value Transformation, a recent field of Quantum Signal Processing.
    
\item Our last contribution consists in applying the Spectral Graph Theory Framework to the PageRank algorithm theory in order to generalize the scope of
  the original algorithm. To this effect, we introduce a generalized PageRank ranking method based on the Directed Graph Fourier Transform which allows to produce more pertinent rankings by filtering the spectrum of the Google Graph. We then introduce an extension of the Google Matrix that can be used conjointly with the generalized PageRank algorithm to fully exploit its versatility. Finally, we provide
  an application example of our Spectral Graph Quantum Algorithm to the generalized PageRank model.
    
\end{itemize}

Thie work presented here, describing the links between Spectral Graph Theory, PageRank, and Quantum Information Processing, paves the way to a new, yet exciting extension of Quantum Signal Processing - the use of Quantum Algorithms in application to Spectral Graph Theory. To this extent,
one of the most interesting questions raised here may be to research more in depth for the generalization of  Quantum PageRank algorithms using our generalized PageRank approach.


\subsection*{Contributions}
T. Chapuis-Chkaiban, as the main author, produced most of the results presented in this paper and wrote the first version of the manuscript. Z.Toffano and B.Valiron, as co-authors, have made several research suggestions - both of them contributed equally to the scientific content. Besides, Z.Toffano and B.Valiron corrected and contributed to the writing and clarification of the manuscript and made several structural improvements for the final submitted version of this paper. 

\subsection*{Competing Interests}
The authors have no competing interests to declare that are relevant to the content of this article.

\subsection*{Data availability}
The datasets generated during and/or analysed during the current study are available in the  repository, [PERSISTENT WEB LINK TO DATASETS]

\newpage
\begin{appendices}

\section{Generalized PageRank using various Kernels.}

\begin{sidewaystable}
\sidewaystablefn%
\begin{center}
\begin{minipage}{\textheight}

\begin{figure}[H]
\centering
\csvreader[separator = semicolon, head to column names, 
tabular=|c|c|c|c|c|c|c|c|c|, before reading = \sisetup{
round-mode = places,
round-precision = 3
},
table head=\hline \multicolumn{1}{|p{1.5cm}|}{\centering Nodes \newline Ranking} & \multicolumn{1}{p{1.5cm}|}{\centering Node \newline Classical \newline Kernel} & \multicolumn{1}{p{1.5cm}|}{\centering Score \newline Classical \newline Kernel} & \multicolumn{1}{p{1.5cm}|}{\centering Node \newline Heat \newline Kernel \newline Time = 5} & \multicolumn{1}{p{1.5cm}|}{\centering Score  \newline Heat Kernel \newline Time = 5} & \multicolumn{1}{p{1.5cm}|}{\centering Node \newline Heat \newline Kernel \newline Time = 7} & \multicolumn{1}{p{1.5cm}|}{\centering Score  \newline Heat Kernel \newline Time = 7} & \multicolumn{1}{p{1.5cm}|}{\centering Node \newline Heat Kernel \newline Time = 9} & \multicolumn{1}{p{1.5cm}|}{\centering Score  \newline Heat Kernel \newline Time = 9},
late after head=\\\hline\hline,
late after line=\csvifoddrow{\\\hline}{\\\hline}]
{results_csv/rankings_simple1_hk579.csv} {Ranking=\rank, Node Number Classical PageRank= \nnClass, Score Classical PageRank = \scoreClass, {Node Number Heat Kernel t = 5} =\nnHkf, {Score Heat Kernel t = 5} =\scoreHkf, {Node Number Heat Kernel t = 7} =\nnHks, {Score Heat Kernel t = 7} =\scoreHks, {Node Number Heat Kernel t = 9} =\nnHkn, {Score Heat Kernel t = 9} =\scoreHkn} {\rank & \nnClass & \tablenum{\scoreClass} & \nnHkf & \tablenum{\scoreHkf} & \nnHks & \tablenum{\scoreHks} & \nnHkn & \tablenum{\scoreHkn}}

    \caption{Ranking comparison between the classical Pagerank and the generalized pagerank using the Heat Kernels of parameters 5, 7 and 9}
    \label{fig:noderanking1}
\end{figure}


\end{minipage}
\end{center}
\end{sidewaystable}

\begin{figure}[H]
    \centering
    \centerline{
    \includegraphics[width= 1.15\textwidth]{results_figures/complex_cosine2_5.png}
    }
    \caption{Generalized PageRank Cosine Kernel comparison with the Classical PageRank. Parameter : exponent = 5}
    \label{fig:cosK5}
\end{figure}

\begin{figure}[H]
    \centering
    \centerline{
    \includegraphics[width= 1.15\textwidth]{results_figures/complex_inverse_1_5.png}
    }
    \caption{Generalized PageRank Inverse Kernel comparison with the Classical PageRank. Parameter : exponent = 5}
    \label{fig:invK5}
\end{figure}


\begin{figure}[H]
    \centering
    \centerline{
    \includegraphics[width= 1.15\textwidth]{results_figures/complex_monomial2_5.png}
    }
    \caption{Generalized PageRank monomial Kernel comparison with the Classical PageRank. Parameter : exponent = 5}
    \label{fig:monoK5}
\end{figure}

\begin{figure}[H]
\centering
\csvreader[separator = semicolon, head to column names, 
longtable=|c|c|c|c|c|, before reading = \sisetup{
round-mode = places,
round-precision = 3
},
table head=\hline \multicolumn{1}{|p{2cm}|}{\centering Ranking} & \multicolumn{1}{p{2cm}|}{\centering Node \newline Classical \newline Kernel} & \multicolumn{1}{p{2cm}|}{\centering Score \newline Classical \newline Kernel} & \multicolumn{1}{p{2.5cm}|}{\centering Node \newline Heat Kernel \newline Time = 5} & \multicolumn{1}{p{2cm}|}{\centering Score  \newline Heat Kernel \newline Time = 5},
late after head=\\\hline\hline,
late after line=\csvifoddrow{\\\hline}{\\\hline}]
{results_csv/ranking_complex_hk5_short.csv} {Ranking=\rank, Node Number Classical PageRank= \nnClass, Score Classical PageRank = \scoreClass, {Node Number Heat Kernel t = 5} =\nnHk, {Score Heat Kernel t = 5} =\scoreHk} {\rank & \nnClass & \tablenum{\scoreClass} & \nnHk & \tablenum{\scoreHk}}

    \caption{Ranking comparison between the classical Pagerank and the generalized pagerank using the Heat Kernel of parameter t=5}
    \label{fig:ranking1}
\end{figure}




\end{appendices}

%%===========================================================================================%%
%% If you are submitting to one of the Nature Portfolio journals, using the eJP submission   %%
%% system, please include the references within the manuscript file itself. You may do this  %%
%% by copying the reference list from your .bbl file, paste it into the main manuscript .tex %%
%% file, and delete the associated \verb+\bibliography+ commands.                            %%
%%===========================================================================================%%

\bibliography{references}% common bib file
%% if required, the content of .bbl file can be included here once bbl is generated
%%\input sn-article.bbl

%% Default %%
%%\input sn-sample-bib.tex%

\end{document}

%%% Local Variables:
%%% mode: latex
%%% TeX-master: t
%%% End: