Skip to content

Instantly share code, notes, and snippets.

@EauDeData
Created November 9, 2024 04:49
Show Gist options
  • Select an option

  • Save EauDeData/d101bfdbcca2d52afb27864786888e2a to your computer and use it in GitHub Desktop.

Select an option

Save EauDeData/d101bfdbcca2d52afb27864786888e2a to your computer and use it in GitHub Desktop.
File sent from Gephi
This file has been truncated, but you can view the full file.
<?xml version='1.0' encoding='UTF-8'?>
<gexf xmlns="http://gexf.net/1.3" version="1.3" xmlns:viz="http://gexf.net/1.3/viz" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://gexf.net/1.3 http://gexf.net/1.3/gexf.xsd">
<meta lastmodifieddate="2024-11-09">
<creator>Gephi 0.10.1</creator>
<title></title>
<description></description>
</meta>
<graph defaultedgetype="undirected" mode="static">
<attributes class="node" mode="static">
<attribute id="0" title="type" type="string"/>
<attribute id="1" title="content" type="string"/>
<attribute id="2" title="n_subastracts" type="long"/>
<attribute id="degree" title="Degree" type="integer">
<default>0</default>
</attribute>
<attribute id="eccentricity" title="Eccentricity" type="double">
<default>0.0</default>
</attribute>
<attribute id="closnesscentrality" title="Closeness Centrality" type="double">
<default>0.0</default>
</attribute>
<attribute id="harmonicclosnesscentrality" title="Harmonic Closeness Centrality" type="double">
<default>0.0</default>
</attribute>
<attribute id="betweenesscentrality" title="Betweenness Centrality" type="double">
<default>0.0</default>
</attribute>
<attribute id="modularity_class" title="Modularity Class" type="integer">
<default>0</default>
</attribute>
<attribute id="pageranks" title="PageRank" type="double">
<default>0.0</default>
</attribute>
</attributes>
<attributes class="edge" mode="static">
<attribute id="3" title="relationship" type="string"/>
</attributes>
<nodes>
<node id="0" label="0">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="5"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3750213565692807"/>
<attvalue for="harmonicclosnesscentrality" value="0.38936977980259546"/>
<attvalue for="betweenesscentrality" value="0.0013456033322529987"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="4.3393035731448325E-4"/>
</attvalues>
<viz:size value="57.06492"/>
<viz:position x="1625.2963" y="-477.6334"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="1" label="1">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="4"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3750213565692807"/>
<attvalue for="harmonicclosnesscentrality" value="0.38936977980259546"/>
<attvalue for="betweenesscentrality" value="0.001334364270418749"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="4.4657312840825654E-4"/>
</attvalues>
<viz:size value="57.655094"/>
<viz:position x="-3492.7168" y="532.13055"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="2" label="2">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="21"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37682403433476397"/>
<attvalue for="harmonicclosnesscentrality" value="0.39362186788156256"/>
<attvalue for="betweenesscentrality" value="0.00440333988861826"/>
<attvalue for="modularity_class" value="5"/>
<attvalue for="pageranks" value="0.0014900124596239745"/>
</attvalues>
<viz:size value="106.363594"/>
<viz:position x="-2795.0198" y="-9013.449"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="3" label="3">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567336"/>
<attvalue for="betweenesscentrality" value="0.0018512532050443864"/>
<attvalue for="modularity_class" value="2"/>
<attvalue for="pageranks" value="5.753012917240277E-4"/>
</attvalues>
<viz:size value="63.66422"/>
<viz:position x="12100.24" y="-6159.679"/>
<viz:color r="211" g="179" b="176"/>
</node>
<node id="4" label="4">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380266"/>
<attvalue for="betweenesscentrality" value="0.0018995528717830976"/>
<attvalue for="modularity_class" value="18"/>
<attvalue for="pageranks" value="5.914456501966707E-4"/>
</attvalues>
<viz:size value="64.41785"/>
<viz:position x="-10911.804" y="-4165.1855"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="5" label="5">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="4"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3748932536293766"/>
<attvalue for="harmonicclosnesscentrality" value="0.38906605922552634"/>
<attvalue for="betweenesscentrality" value="9.111617312072891E-4"/>
<attvalue for="modularity_class" value="7"/>
<attvalue for="pageranks" value="4.557573547843128E-4"/>
</attvalues>
<viz:size value="58.08382"/>
<viz:position x="228.81061" y="1180.8315"/>
<viz:color r="255" g="136" b="5"/>
</node>
<node id="6" label="6">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="17"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380266"/>
<attvalue for="betweenesscentrality" value="0.0018927187182510763"/>
<attvalue for="modularity_class" value="2"/>
<attvalue for="pageranks" value="6.156853659167135E-4"/>
</attvalues>
<viz:size value="65.54938"/>
<viz:position x="12484.774" y="-5473.0063"/>
<viz:color r="211" g="179" b="176"/>
</node>
<node id="7" label="7">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="25"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380266"/>
<attvalue for="betweenesscentrality" value="0.0019155720709043752"/>
<attvalue for="modularity_class" value="2"/>
<attvalue for="pageranks" value="5.915974428751599E-4"/>
</attvalues>
<viz:size value="64.424934"/>
<viz:position x="14190.377" y="-7699.7075"/>
<viz:color r="211" g="179" b="176"/>
</node>
<node id="8" label="8">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3750213565692807"/>
<attvalue for="harmonicclosnesscentrality" value="0.38936977980259546"/>
<attvalue for="betweenesscentrality" value="9.513964911508267E-4"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="3.313791851968895E-4"/>
</attvalues>
<viz:size value="52.27776"/>
<viz:position x="5398.928" y="4505.7266"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="9" label="9">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="23"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37604934041459653"/>
<attvalue for="harmonicclosnesscentrality" value="0.39179954441914805"/>
<attvalue for="betweenesscentrality" value="0.003021843928176282"/>
<attvalue for="modularity_class" value="16"/>
<attvalue for="pageranks" value="0.0010224973810086925"/>
</attvalues>
<viz:size value="84.53966"/>
<viz:position x="8910.2295" y="-17516.918"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="10" label="10">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="15"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37617823479005996"/>
<attvalue for="harmonicclosnesscentrality" value="0.39210326499621717"/>
<attvalue for="betweenesscentrality" value="0.003429231086546365"/>
<attvalue for="modularity_class" value="16"/>
<attvalue for="pageranks" value="0.0011192062917119022"/>
</attvalues>
<viz:size value="89.05409"/>
<viz:position x="8985.735" y="-18669.016"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="11" label="11">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="18"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3763072175552889"/>
<attvalue for="harmonicclosnesscentrality" value="0.39240698557328624"/>
<attvalue for="betweenesscentrality" value="0.002453676698296365"/>
<attvalue for="modularity_class" value="13"/>
<attvalue for="pageranks" value="0.0010803049820664375"/>
</attvalues>
<viz:size value="87.23816"/>
<viz:position x="-1013.46783" y="-3545.3882"/>
<viz:color r="255" g="85" b="132"/>
</node>
<node id="12" label="12">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="18"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3756631867191511"/>
<attvalue for="harmonicclosnesscentrality" value="0.39088838268794085"/>
<attvalue for="betweenesscentrality" value="0.002162507127754013"/>
<attvalue for="modularity_class" value="13"/>
<attvalue for="pageranks" value="7.22477253252852E-4"/>
</attvalues>
<viz:size value="70.5345"/>
<viz:position x="-769.4042" y="-6213.906"/>
<viz:color r="255" g="85" b="132"/>
</node>
<node id="13" label="13">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567336"/>
<attvalue for="betweenesscentrality" value="0.001823876947457259"/>
<attvalue for="modularity_class" value="1"/>
<attvalue for="pageranks" value="5.921141299052358E-4"/>
</attvalues>
<viz:size value="64.44905"/>
<viz:position x="4494.482" y="2033.3405"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="14" label="14">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="14"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37617823479005996"/>
<attvalue for="harmonicclosnesscentrality" value="0.39210326499621717"/>
<attvalue for="betweenesscentrality" value="0.0034172029183326775"/>
<attvalue for="modularity_class" value="4"/>
<attvalue for="pageranks" value="0.0011334245426802944"/>
</attvalues>
<viz:size value="89.71782"/>
<viz:position x="-2556.0994" y="20131.234"/>
<viz:color r="0" g="189" b="148"/>
</node>
<node id="15" label="15">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="18"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37604934041459653"/>
<attvalue for="harmonicclosnesscentrality" value="0.39179954441914805"/>
<attvalue for="betweenesscentrality" value="0.002546135223719886"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="9.59363701324519E-4"/>
</attvalues>
<viz:size value="81.59253"/>
<viz:position x="3297.2122" y="-3741.0562"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="16" label="16">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="9"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37553464499572287"/>
<attvalue for="harmonicclosnesscentrality" value="0.3905846621108717"/>
<attvalue for="betweenesscentrality" value="0.0015739204112983384"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="6.351276918881776E-4"/>
</attvalues>
<viz:size value="66.456955"/>
<viz:position x="2337.085" y="-913.79553"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="17" label="17">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567336"/>
<attvalue for="betweenesscentrality" value="0.0018369213269795012"/>
<attvalue for="modularity_class" value="4"/>
<attvalue for="pageranks" value="6.106543805663347E-4"/>
</attvalues>
<viz:size value="65.31453"/>
<viz:position x="36.87326" y="8900.403"/>
<viz:color r="0" g="189" b="148"/>
</node>
<node id="18" label="18">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="21"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37579181646978255"/>
<attvalue for="harmonicclosnesscentrality" value="0.3911921032650099"/>
<attvalue for="betweenesscentrality" value="0.002773618754990283"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="8.601121862907198E-4"/>
</attvalues>
<viz:size value="76.9594"/>
<viz:position x="-10306.16" y="-6419.3003"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="19" label="19">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="16"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37553464499572287"/>
<attvalue for="harmonicclosnesscentrality" value="0.3905846621108717"/>
<attvalue for="betweenesscentrality" value="0.002257666413638373"/>
<attvalue for="modularity_class" value="5"/>
<attvalue for="pageranks" value="6.955222905796345E-4"/>
</attvalues>
<viz:size value="69.27622"/>
<viz:position x="-670.23364" y="15.233297"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="20" label="20">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="5"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3751495470859682"/>
<attvalue for="harmonicclosnesscentrality" value="0.38967350037966453"/>
<attvalue for="betweenesscentrality" value="0.0012801047840202803"/>
<attvalue for="modularity_class" value="3"/>
<attvalue for="pageranks" value="4.931660638677714E-4"/>
</attvalues>
<viz:size value="59.830086"/>
<viz:position x="4681.4565" y="-647.4949"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="21" label="21">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37553464499572287"/>
<attvalue for="harmonicclosnesscentrality" value="0.3905846621108717"/>
<attvalue for="betweenesscentrality" value="0.0015512223208086048"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="6.604102332559622E-4"/>
</attvalues>
<viz:size value="67.63717"/>
<viz:position x="9235.666" y="9019.237"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="22" label="22">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="13"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3750213565692807"/>
<attvalue for="harmonicclosnesscentrality" value="0.38936977980259546"/>
<attvalue for="betweenesscentrality" value="9.272131354527876E-4"/>
<attvalue for="modularity_class" value="4"/>
<attvalue for="pageranks" value="3.470255991667876E-4"/>
</attvalues>
<viz:size value="53.008144"/>
<viz:position x="385.22504" y="8307.543"/>
<viz:color r="0" g="189" b="148"/>
</node>
<node id="23" label="23">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="18"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3756631867191511"/>
<attvalue for="harmonicclosnesscentrality" value="0.39088838268794085"/>
<attvalue for="betweenesscentrality" value="0.0027021581769767667"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="7.887420349900085E-4"/>
</attvalues>
<viz:size value="73.627785"/>
<viz:position x="3879.3179" y="-5638.9077"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="24" label="24">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3750213565692807"/>
<attvalue for="harmonicclosnesscentrality" value="0.38936977980259546"/>
<attvalue for="betweenesscentrality" value="0.001359304292715344"/>
<attvalue for="modularity_class" value="4"/>
<attvalue for="pageranks" value="4.5133270290975415E-4"/>
</attvalues>
<viz:size value="57.877274"/>
<viz:position x="27.320475" y="-58.192654"/>
<viz:color r="0" g="189" b="148"/>
</node>
<node id="25" label="25">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380266"/>
<attvalue for="betweenesscentrality" value="0.0013156479764074318"/>
<attvalue for="modularity_class" value="7"/>
<attvalue for="pageranks" value="5.539469122542524E-4"/>
</attvalues>
<viz:size value="62.66738"/>
<viz:position x="-976.2353" y="-14601.723"/>
<viz:color r="255" g="136" b="5"/>
</node>
<node id="26" label="26">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567336"/>
<attvalue for="betweenesscentrality" value="0.0016139493320476786"/>
<attvalue for="modularity_class" value="0"/>
<attvalue for="pageranks" value="5.516640299311501E-4"/>
</attvalues>
<viz:size value="62.560814"/>
<viz:position x="8379.446" y="9799.581"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="27" label="27">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="14"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380266"/>
<attvalue for="betweenesscentrality" value="0.0012269035543232888"/>
<attvalue for="modularity_class" value="18"/>
<attvalue for="pageranks" value="4.987997787163339E-4"/>
</attvalues>
<viz:size value="60.09307"/>
<viz:position x="-1258.4279" y="1361.9801"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="28" label="28">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="13"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3756631867191511"/>
<attvalue for="harmonicclosnesscentrality" value="0.39088838268794085"/>
<attvalue for="betweenesscentrality" value="0.0022421994784156836"/>
<attvalue for="modularity_class" value="1"/>
<attvalue for="pageranks" value="7.822874872787612E-4"/>
</attvalues>
<viz:size value="73.32648"/>
<viz:position x="3198.961" y="1542.8711"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="29" label="29">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3750213565692807"/>
<attvalue for="harmonicclosnesscentrality" value="0.38936977980259535"/>
<attvalue for="betweenesscentrality" value="2.4782968081418793E-4"/>
<attvalue for="modularity_class" value="0"/>
<attvalue for="pageranks" value="2.6522645966164666E-4"/>
</attvalues>
<viz:size value="49.1897"/>
<viz:position x="8372.074" y="9520.346"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="30" label="30">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="4"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3750213565692807"/>
<attvalue for="harmonicclosnesscentrality" value="0.38936977980259546"/>
<attvalue for="betweenesscentrality" value="0.001083856276956102"/>
<attvalue for="modularity_class" value="5"/>
<attvalue for="pageranks" value="3.458715698278958E-4"/>
</attvalues>
<viz:size value="52.954273"/>
<viz:position x="-2000.3265" y="-1903.6885"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="31" label="31">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="4"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3750213565692807"/>
<attvalue for="harmonicclosnesscentrality" value="0.38936977980259546"/>
<attvalue for="betweenesscentrality" value="0.001209101370211826"/>
<attvalue for="modularity_class" value="3"/>
<attvalue for="pageranks" value="4.1656387011697647E-4"/>
</attvalues>
<viz:size value="56.254242"/>
<viz:position x="4391.9067" y="-557.39075"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="32" label="32">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567336"/>
<attvalue for="betweenesscentrality" value="0.0014510815554060444"/>
<attvalue for="modularity_class" value="0"/>
<attvalue for="pageranks" value="5.062032277923987E-4"/>
</attvalues>
<viz:size value="60.43867"/>
<viz:position x="6425.3896" y="7410.019"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="33" label="33">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="9"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3756631867191511"/>
<attvalue for="harmonicclosnesscentrality" value="0.39088838268794085"/>
<attvalue for="betweenesscentrality" value="0.002104032886792904"/>
<attvalue for="modularity_class" value="18"/>
<attvalue for="pageranks" value="7.224476730615316E-4"/>
</attvalues>
<viz:size value="70.53312"/>
<viz:position x="-11287.889" y="-4301.474"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="34" label="34">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380266"/>
<attvalue for="betweenesscentrality" value="0.0023356189778849915"/>
<attvalue for="modularity_class" value="18"/>
<attvalue for="pageranks" value="6.728155097566127E-4"/>
</attvalues>
<viz:size value="68.21625"/>
<viz:position x="1842.1586" y="4928.936"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="35" label="35">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380266"/>
<attvalue for="betweenesscentrality" value="0.0012562229410698358"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="5.46597619894939E-4"/>
</attvalues>
<viz:size value="62.32431"/>
<viz:position x="7390.069" y="5694.6353"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="36" label="36">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="16"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37643628880123475"/>
<attvalue for="harmonicclosnesscentrality" value="0.3927107061503553"/>
<attvalue for="betweenesscentrality" value="0.0029978084853252355"/>
<attvalue for="modularity_class" value="15"/>
<attvalue for="pageranks" value="0.0011658427833234827"/>
</attvalues>
<viz:size value="91.231125"/>
<viz:position x="-6184.852" y="5499.5044"/>
<viz:color r="76" g="70" b="62"/>
</node>
<node id="37" label="37">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="16"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37643628880123475"/>
<attvalue for="harmonicclosnesscentrality" value="0.3927107061503553"/>
<attvalue for="betweenesscentrality" value="0.0034346645921407896"/>
<attvalue for="modularity_class" value="5"/>
<attvalue for="pageranks" value="0.0011865129155951663"/>
</attvalues>
<viz:size value="92.196014"/>
<viz:position x="-4844.0137" y="3249.192"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="38" label="38">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="25"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37695346041559336"/>
<attvalue for="harmonicclosnesscentrality" value="0.3939255884586316"/>
<attvalue for="betweenesscentrality" value="0.0039834765370175725"/>
<attvalue for="modularity_class" value="13"/>
<attvalue for="pageranks" value="0.0015018025616763085"/>
</attvalues>
<viz:size value="106.91397"/>
<viz:position x="311.9334" y="4387.9062"/>
<viz:color r="255" g="85" b="132"/>
</node>
<node id="39" label="39">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380266"/>
<attvalue for="betweenesscentrality" value="0.001180939707777605"/>
<attvalue for="modularity_class" value="7"/>
<attvalue for="pageranks" value="5.431509336922731E-4"/>
</attvalues>
<viz:size value="62.163414"/>
<viz:position x="-1883.8206" y="-14520.941"/>
<viz:color r="255" g="136" b="5"/>
</node>
<node id="40" label="40">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="26"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3763072175552889"/>
<attvalue for="harmonicclosnesscentrality" value="0.39240698557328624"/>
<attvalue for="betweenesscentrality" value="0.0035167453881694744"/>
<attvalue for="modularity_class" value="13"/>
<attvalue for="pageranks" value="0.001144088749806325"/>
</attvalues>
<viz:size value="90.21563"/>
<viz:position x="-2436.3647" y="-10728.002"/>
<viz:color r="255" g="85" b="132"/>
</node>
<node id="41" label="41">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="5"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3751495470859682"/>
<attvalue for="harmonicclosnesscentrality" value="0.38967350037966453"/>
<attvalue for="betweenesscentrality" value="0.0010776584112206092"/>
<attvalue for="modularity_class" value="7"/>
<attvalue for="pageranks" value="4.271059977067188E-4"/>
</attvalues>
<viz:size value="56.746353"/>
<viz:position x="-1734.3035" y="-12809.715"/>
<viz:color r="255" g="136" b="5"/>
</node>
<node id="42" label="42">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380266"/>
<attvalue for="betweenesscentrality" value="0.0011768068915923875"/>
<attvalue for="modularity_class" value="7"/>
<attvalue for="pageranks" value="5.57426076130552E-4"/>
</attvalues>
<viz:size value="62.82979"/>
<viz:position x="-1051.4652" y="-13655.329"/>
<viz:color r="255" g="136" b="5"/>
</node>
<node id="43" label="43">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="16"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37643628880123475"/>
<attvalue for="harmonicclosnesscentrality" value="0.3927107061503553"/>
<attvalue for="betweenesscentrality" value="0.002566159085407282"/>
<attvalue for="modularity_class" value="13"/>
<attvalue for="pageranks" value="0.001118636968089123"/>
</attvalues>
<viz:size value="89.02753"/>
<viz:position x="-2062.0042" y="-7123.521"/>
<viz:color r="255" g="85" b="132"/>
</node>
<node id="44" label="44">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="21"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37617823479005996"/>
<attvalue for="harmonicclosnesscentrality" value="0.39210326499621717"/>
<attvalue for="betweenesscentrality" value="0.0032919387086194846"/>
<attvalue for="modularity_class" value="3"/>
<attvalue for="pageranks" value="0.0011778728218934417"/>
</attvalues>
<viz:size value="91.792694"/>
<viz:position x="6905.0845" y="-1762.3033"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="45" label="45">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567336"/>
<attvalue for="betweenesscentrality" value="0.0011253439062276932"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="4.651665452552538E-4"/>
</attvalues>
<viz:size value="58.52305"/>
<viz:position x="2193.8655" y="-1922.864"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="46" label="46">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="15"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37579181646978255"/>
<attvalue for="harmonicclosnesscentrality" value="0.3911921032650099"/>
<attvalue for="betweenesscentrality" value="0.002721622502083928"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="8.839003175515467E-4"/>
</attvalues>
<viz:size value="78.06984"/>
<viz:position x="4399.422" y="-2059.1765"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="47" label="47">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37553464499572287"/>
<attvalue for="harmonicclosnesscentrality" value="0.3905846621108717"/>
<attvalue for="betweenesscentrality" value="0.001232463844738311"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="6.036795128804605E-4"/>
</attvalues>
<viz:size value="64.98894"/>
<viz:position x="1202.8726" y="-723.7263"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="48" label="48">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37553464499572287"/>
<attvalue for="harmonicclosnesscentrality" value="0.3905846621108717"/>
<attvalue for="betweenesscentrality" value="0.0015512223208086053"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="6.604102332559622E-4"/>
</attvalues>
<viz:size value="67.63717"/>
<viz:position x="9370.8" y="9038.355"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="49" label="49">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="10"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37579181646978255"/>
<attvalue for="harmonicclosnesscentrality" value="0.3911921032650099"/>
<attvalue for="betweenesscentrality" value="0.0022853067167847322"/>
<attvalue for="modularity_class" value="4"/>
<attvalue for="pageranks" value="8.625163873872292E-4"/>
</attvalues>
<viz:size value="77.071625"/>
<viz:position x="-2339.0898" y="19572.625"/>
<viz:color r="0" g="189" b="148"/>
</node>
<node id="50" label="50">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37553464499572287"/>
<attvalue for="harmonicclosnesscentrality" value="0.3905846621108717"/>
<attvalue for="betweenesscentrality" value="0.0015512223208086053"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="6.604102332559622E-4"/>
</attvalues>
<viz:size value="67.63717"/>
<viz:position x="9280.728" y="8895.847"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="51" label="51">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380266"/>
<attvalue for="betweenesscentrality" value="0.0013161975803301096"/>
<attvalue for="modularity_class" value="15"/>
<attvalue for="pageranks" value="5.303680453101047E-4"/>
</attvalues>
<viz:size value="61.566704"/>
<viz:position x="-4726.4053" y="5769.2812"/>
<viz:color r="76" g="70" b="62"/>
</node>
<node id="52" label="52">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="9"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37553464499572287"/>
<attvalue for="harmonicclosnesscentrality" value="0.3905846621108717"/>
<attvalue for="betweenesscentrality" value="0.0017237358098757033"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="6.814942268084688E-4"/>
</attvalues>
<viz:size value="68.62138"/>
<viz:position x="9138.159" y="5837.962"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="53" label="53">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="4"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3750213565692807"/>
<attvalue for="harmonicclosnesscentrality" value="0.38936977980259546"/>
<attvalue for="betweenesscentrality" value="0.0013484814717938287"/>
<attvalue for="modularity_class" value="15"/>
<attvalue for="pageranks" value="4.432652810686091E-4"/>
</attvalues>
<viz:size value="57.500683"/>
<viz:position x="-1978.1321" y="4071.5298"/>
<viz:color r="76" g="70" b="62"/>
</node>
<node id="54" label="54">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567336"/>
<attvalue for="betweenesscentrality" value="0.0014238141421109597"/>
<attvalue for="modularity_class" value="15"/>
<attvalue for="pageranks" value="5.266018469197015E-4"/>
</attvalues>
<viz:size value="61.390892"/>
<viz:position x="-3740.24" y="5907.3984"/>
<viz:color r="76" g="70" b="62"/>
</node>
<node id="55" label="55">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="12"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37604934041459653"/>
<attvalue for="harmonicclosnesscentrality" value="0.39179954441914805"/>
<attvalue for="betweenesscentrality" value="0.0018995434997248171"/>
<attvalue for="modularity_class" value="15"/>
<attvalue for="pageranks" value="8.905459912798246E-4"/>
</attvalues>
<viz:size value="78.380066"/>
<viz:position x="-5007.027" y="8254.607"/>
<viz:color r="76" g="70" b="62"/>
</node>
<node id="56" label="56">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="11"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3759205343380716"/>
<attvalue for="harmonicclosnesscentrality" value="0.391495823842079"/>
<attvalue for="betweenesscentrality" value="0.001444585579698554"/>
<attvalue for="modularity_class" value="15"/>
<attvalue for="pageranks" value="7.967789757857759E-4"/>
</attvalues>
<viz:size value="74.00296"/>
<viz:position x="-4917.6914" y="7798.3896"/>
<viz:color r="76" g="70" b="62"/>
</node>
<node id="57" label="57">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="12"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37604934041459653"/>
<attvalue for="harmonicclosnesscentrality" value="0.39179954441914805"/>
<attvalue for="betweenesscentrality" value="0.0018995434997248171"/>
<attvalue for="modularity_class" value="15"/>
<attvalue for="pageranks" value="8.905459912798246E-4"/>
</attvalues>
<viz:size value="78.380066"/>
<viz:position x="-4888.316" y="8062.944"/>
<viz:color r="76" g="70" b="62"/>
</node>
<node id="58" label="58">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380266"/>
<attvalue for="betweenesscentrality" value="0.001074676667663544"/>
<attvalue for="modularity_class" value="15"/>
<attvalue for="pageranks" value="4.958855731207386E-4"/>
</attvalues>
<viz:size value="59.957035"/>
<viz:position x="-4613.7803" y="6947.4043"/>
<viz:color r="76" g="70" b="62"/>
</node>
<node id="59" label="59">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380266"/>
<attvalue for="betweenesscentrality" value="0.0011032508475147269"/>
<attvalue for="modularity_class" value="15"/>
<attvalue for="pageranks" value="5.004611483603373E-4"/>
</attvalues>
<viz:size value="60.170624"/>
<viz:position x="-4261.662" y="6934.8735"/>
<viz:color r="76" g="70" b="62"/>
</node>
<node id="60" label="60">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3751495470859682"/>
<attvalue for="harmonicclosnesscentrality" value="0.38967350037966453"/>
<attvalue for="betweenesscentrality" value="0.0015334054507392008"/>
<attvalue for="modularity_class" value="18"/>
<attvalue for="pageranks" value="5.064085640916194E-4"/>
</attvalues>
<viz:size value="60.448257"/>
<viz:position x="-457.24283" y="927.84705"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="61" label="61">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3750213565692807"/>
<attvalue for="harmonicclosnesscentrality" value="0.38936977980259535"/>
<attvalue for="betweenesscentrality" value="6.341202279908639E-4"/>
<attvalue for="modularity_class" value="18"/>
<attvalue for="pageranks" value="3.6607274791027755E-4"/>
</attvalues>
<viz:size value="53.897278"/>
<viz:position x="-116.78565" y="2679.8484"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="62" label="62">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="11"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37579181646978255"/>
<attvalue for="harmonicclosnesscentrality" value="0.3911921032650099"/>
<attvalue for="betweenesscentrality" value="0.0016887877493415326"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="8.509149080619792E-4"/>
</attvalues>
<viz:size value="76.53006"/>
<viz:position x="7076.0366" y="4925.562"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="63" label="63">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3751495470859682"/>
<attvalue for="harmonicclosnesscentrality" value="0.38967350037966453"/>
<attvalue for="betweenesscentrality" value="0.0010781043975336075"/>
<attvalue for="modularity_class" value="4"/>
<attvalue for="pageranks" value="4.344345046622278E-4"/>
</attvalues>
<viz:size value="57.088455"/>
<viz:position x="337.6429" y="9356.949"/>
<viz:color r="0" g="189" b="148"/>
</node>
<node id="64" label="64">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="21"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37682403433476397"/>
<attvalue for="harmonicclosnesscentrality" value="0.39362186788156256"/>
<attvalue for="betweenesscentrality" value="0.003364465005155648"/>
<attvalue for="modularity_class" value="4"/>
<attvalue for="pageranks" value="0.001399232243387218"/>
</attvalues>
<viz:size value="102.125916"/>
<viz:position x="-1430.9254" y="3262.0303"/>
<viz:color r="0" g="189" b="148"/>
</node>
<node id="65" label="65">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="10"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3750213565692807"/>
<attvalue for="harmonicclosnesscentrality" value="0.38936977980259546"/>
<attvalue for="betweenesscentrality" value="0.0010530573877120144"/>
<attvalue for="modularity_class" value="16"/>
<attvalue for="pageranks" value="3.6317829294648204E-4"/>
</attvalues>
<viz:size value="53.76216"/>
<viz:position x="7860.2476" y="2732.5725"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="66" label="66">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567336"/>
<attvalue for="betweenesscentrality" value="0.0016531869314115124"/>
<attvalue for="modularity_class" value="13"/>
<attvalue for="pageranks" value="5.087288444856628E-4"/>
</attvalues>
<viz:size value="60.55657"/>
<viz:position x="8188.139" y="-672.31903"/>
<viz:color r="255" g="85" b="132"/>
</node>
<node id="67" label="67">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3751495470859682"/>
<attvalue for="harmonicclosnesscentrality" value="0.38967350037966453"/>
<attvalue for="betweenesscentrality" value="0.0014005014665967336"/>
<attvalue for="modularity_class" value="1"/>
<attvalue for="pageranks" value="4.8596279185740993E-4"/>
</attvalues>
<viz:size value="59.493835"/>
<viz:position x="4596.8823" y="2049.1484"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="68" label="68">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="16"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37579181646978255"/>
<attvalue for="harmonicclosnesscentrality" value="0.3911921032650099"/>
<attvalue for="betweenesscentrality" value="0.00223094320263635"/>
<attvalue for="modularity_class" value="1"/>
<attvalue for="pageranks" value="8.343482673790737E-4"/>
</attvalues>
<viz:size value="75.756714"/>
<viz:position x="-478.6775" y="-3258.964"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="69" label="69">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="12"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567336"/>
<attvalue for="betweenesscentrality" value="0.0016957052414689226"/>
<attvalue for="modularity_class" value="1"/>
<attvalue for="pageranks" value="5.714692333470293E-4"/>
</attvalues>
<viz:size value="63.485336"/>
<viz:position x="6127.2026" y="2340.0386"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="70" label="70">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="5"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3751495470859682"/>
<attvalue for="harmonicclosnesscentrality" value="0.38967350037966453"/>
<attvalue for="betweenesscentrality" value="0.0014059663943136109"/>
<attvalue for="modularity_class" value="1"/>
<attvalue for="pageranks" value="5.382424060049651E-4"/>
</attvalues>
<viz:size value="61.93428"/>
<viz:position x="3381.4453" y="1965.2327"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="71" label="71">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567336"/>
<attvalue for="betweenesscentrality" value="0.0014767569473599974"/>
<attvalue for="modularity_class" value="1"/>
<attvalue for="pageranks" value="5.93700123788007E-4"/>
</attvalues>
<viz:size value="64.52309"/>
<viz:position x="5069.8477" y="2012.5786"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="72" label="72">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380266"/>
<attvalue for="betweenesscentrality" value="0.0015081251047766534"/>
<attvalue for="modularity_class" value="2"/>
<attvalue for="pageranks" value="5.758125113891016E-4"/>
</attvalues>
<viz:size value="63.68808"/>
<viz:position x="12812.886" y="-5584.4507"/>
<viz:color r="211" g="179" b="176"/>
</node>
<node id="73" label="73">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3750213565692807"/>
<attvalue for="harmonicclosnesscentrality" value="0.38936977980259546"/>
<attvalue for="betweenesscentrality" value="9.849805495367784E-4"/>
<attvalue for="modularity_class" value="3"/>
<attvalue for="pageranks" value="3.6832224767866726E-4"/>
</attvalues>
<viz:size value="54.00229"/>
<viz:position x="6060.2534" y="-754.6576"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="74" label="74">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380255"/>
<attvalue for="betweenesscentrality" value="0.0010481167943042736"/>
<attvalue for="modularity_class" value="3"/>
<attvalue for="pageranks" value="6.07284338192639E-4"/>
</attvalues>
<viz:size value="65.15721"/>
<viz:position x="6628.026" y="-1216.9733"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="75" label="75">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3750213565692807"/>
<attvalue for="harmonicclosnesscentrality" value="0.38936977980259546"/>
<attvalue for="betweenesscentrality" value="0.0011374246978730596"/>
<attvalue for="modularity_class" value="18"/>
<attvalue for="pageranks" value="4.1339376109230573E-4"/>
</attvalues>
<viz:size value="56.10626"/>
<viz:position x="1206.4205" y="3400.4841"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="76" label="76">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567336"/>
<attvalue for="betweenesscentrality" value="0.001614616912335068"/>
<attvalue for="modularity_class" value="18"/>
<attvalue for="pageranks" value="6.400296745404916E-4"/>
</attvalues>
<viz:size value="66.68578"/>
<viz:position x="1202.5596" y="3637.535"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="77" label="77">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567336"/>
<attvalue for="betweenesscentrality" value="0.001064571626975534"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="4.568305233936285E-4"/>
</attvalues>
<viz:size value="58.13392"/>
<viz:position x="-12295.348" y="-2210.5896"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="78" label="78">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="19"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37669469709970826"/>
<attvalue for="harmonicclosnesscentrality" value="0.3933181473044935"/>
<attvalue for="betweenesscentrality" value="0.0042890304311611425"/>
<attvalue for="modularity_class" value="5"/>
<attvalue for="pageranks" value="0.001379792824453625"/>
</attvalues>
<viz:size value="101.21846"/>
<viz:position x="-8152.733" y="-5527.329"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="79" label="79">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="16"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3763072175552889"/>
<attvalue for="harmonicclosnesscentrality" value="0.39240698557328624"/>
<attvalue for="betweenesscentrality" value="0.0036551675255700124"/>
<attvalue for="modularity_class" value="13"/>
<attvalue for="pageranks" value="0.0011293922933508867"/>
</attvalues>
<viz:size value="89.52959"/>
<viz:position x="-3341.9497" y="-8370.892"/>
<viz:color r="255" g="85" b="132"/>
</node>
<node id="80" label="80">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="19"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3763072175552889"/>
<attvalue for="harmonicclosnesscentrality" value="0.39240698557328624"/>
<attvalue for="betweenesscentrality" value="0.003651977417616066"/>
<attvalue for="modularity_class" value="13"/>
<attvalue for="pageranks" value="0.0011279181745790727"/>
</attvalues>
<viz:size value="89.46077"/>
<viz:position x="-3125.5713" y="-8234.889"/>
<viz:color r="255" g="85" b="132"/>
</node>
<node id="81" label="81">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="22"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37643628880123475"/>
<attvalue for="harmonicclosnesscentrality" value="0.3927107061503553"/>
<attvalue for="betweenesscentrality" value="0.0035317958359442304"/>
<attvalue for="modularity_class" value="5"/>
<attvalue for="pageranks" value="0.0012399964395745078"/>
</attvalues>
<viz:size value="94.692665"/>
<viz:position x="-5807.3696" y="-3110.556"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="82" label="82">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="17"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3763072175552889"/>
<attvalue for="harmonicclosnesscentrality" value="0.39240698557328624"/>
<attvalue for="betweenesscentrality" value="0.002936517324654728"/>
<attvalue for="modularity_class" value="18"/>
<attvalue for="pageranks" value="0.0010927746086358495"/>
</attvalues>
<viz:size value="87.82025"/>
<viz:position x="-6782.157" y="-5446.3574"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="83" label="83">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="25"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37617823479005996"/>
<attvalue for="harmonicclosnesscentrality" value="0.39210326499621717"/>
<attvalue for="betweenesscentrality" value="0.002778766793656318"/>
<attvalue for="modularity_class" value="18"/>
<attvalue for="pageranks" value="0.0010135163188291336"/>
</attvalues>
<viz:size value="84.12041"/>
<viz:position x="-8563.533" y="-6291.264"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="84" label="84">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="9"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3756631867191511"/>
<attvalue for="harmonicclosnesscentrality" value="0.39088838268794085"/>
<attvalue for="betweenesscentrality" value="0.002204052646673723"/>
<attvalue for="modularity_class" value="5"/>
<attvalue for="pageranks" value="7.603758848362031E-4"/>
</attvalues>
<viz:size value="72.303635"/>
<viz:position x="-3653.2195" y="-5761.8867"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="85" label="85">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567336"/>
<attvalue for="betweenesscentrality" value="0.001033379615199722"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="4.833152359342298E-4"/>
</attvalues>
<viz:size value="59.370243"/>
<viz:position x="7552.3364" y="5963.9585"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="86" label="86">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567336"/>
<attvalue for="betweenesscentrality" value="0.0018462151013023682"/>
<attvalue for="modularity_class" value="1"/>
<attvalue for="pageranks" value="6.382747748391647E-4"/>
</attvalues>
<viz:size value="66.60387"/>
<viz:position x="3679.2524" y="1199.2076"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="87" label="87">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="13"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37617823479005996"/>
<attvalue for="harmonicclosnesscentrality" value="0.39210326499621717"/>
<attvalue for="betweenesscentrality" value="0.0019046869809476224"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="9.440445893571384E-4"/>
</attvalues>
<viz:size value="80.877426"/>
<viz:position x="-14964.035" y="-1716.9816"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="88" label="88">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567336"/>
<attvalue for="betweenesscentrality" value="0.001595210918396378"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="5.170277628638994E-4"/>
</attvalues>
<viz:size value="60.943966"/>
<viz:position x="-10845.451" y="-5088.485"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="89" label="89">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="32"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37617823479005996"/>
<attvalue for="harmonicclosnesscentrality" value="0.39210326499621717"/>
<attvalue for="betweenesscentrality" value="0.004478394276315343"/>
<attvalue for="modularity_class" value="12"/>
<attvalue for="pageranks" value="0.0012289057805420383"/>
</attvalues>
<viz:size value="94.17494"/>
<viz:position x="-23399.56" y="-6348.1133"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="90" label="90">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3751495470859682"/>
<attvalue for="harmonicclosnesscentrality" value="0.38967350037966453"/>
<attvalue for="betweenesscentrality" value="9.482027914179448E-4"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="3.746507809172782E-4"/>
</attvalues>
<viz:size value="54.297707"/>
<viz:position x="-10917.445" y="-949.7261"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="91" label="91">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="12"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3759205343380716"/>
<attvalue for="harmonicclosnesscentrality" value="0.391495823842079"/>
<attvalue for="betweenesscentrality" value="0.00212257959784597"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="8.618085469272958E-4"/>
</attvalues>
<viz:size value="77.03858"/>
<viz:position x="-14558.851" y="-44.104504"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="92" label="92">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="5"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3750213565692807"/>
<attvalue for="harmonicclosnesscentrality" value="0.38936977980259546"/>
<attvalue for="betweenesscentrality" value="0.001203172398174893"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="3.742638208952379E-4"/>
</attvalues>
<viz:size value="54.279644"/>
<viz:position x="-10182.443" y="1790.6731"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="93" label="93">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="14"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3763072175552889"/>
<attvalue for="harmonicclosnesscentrality" value="0.39240698557328624"/>
<attvalue for="betweenesscentrality" value="0.0026803295558851204"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="0.0010589466476686698"/>
</attvalues>
<viz:size value="86.241135"/>
<viz:position x="-17898.953" y="671.09344"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="94" label="94">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="16"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380266"/>
<attvalue for="betweenesscentrality" value="0.0015626265696378435"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="5.326521217342466E-4"/>
</attvalues>
<viz:size value="61.673325"/>
<viz:position x="-14695.698" y="-840.84174"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="95" label="95">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="12"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3756631867191511"/>
<attvalue for="harmonicclosnesscentrality" value="0.39088838268794085"/>
<attvalue for="betweenesscentrality" value="0.0019448594527017738"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="6.834872466992211E-4"/>
</attvalues>
<viz:size value="68.71442"/>
<viz:position x="-17727.2" y="1593.6257"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="96" label="96">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380266"/>
<attvalue for="betweenesscentrality" value="0.0017111682614769168"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="6.01000218254905E-4"/>
</attvalues>
<viz:size value="64.86386"/>
<viz:position x="-11129.623" y="-55.394627"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="97" label="97">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567336"/>
<attvalue for="betweenesscentrality" value="0.001366565565310526"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="5.032979463025227E-4"/>
</attvalues>
<viz:size value="60.303047"/>
<viz:position x="-7845.296" y="-20.118998"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="98" label="98">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37553464499572287"/>
<attvalue for="harmonicclosnesscentrality" value="0.3905846621108717"/>
<attvalue for="betweenesscentrality" value="0.0013963723547195143"/>
<attvalue for="modularity_class" value="15"/>
<attvalue for="pageranks" value="5.93590054094722E-4"/>
</attvalues>
<viz:size value="64.51795"/>
<viz:position x="-4693.0757" y="6444.101"/>
<viz:color r="76" g="70" b="62"/>
</node>
<node id="99" label="99">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="18"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3756631867191511"/>
<attvalue for="harmonicclosnesscentrality" value="0.39088838268794085"/>
<attvalue for="betweenesscentrality" value="0.002606272760566037"/>
<attvalue for="modularity_class" value="15"/>
<attvalue for="pageranks" value="7.98979504315452E-4"/>
</attvalues>
<viz:size value="74.10568"/>
<viz:position x="-7005.475" y="11014.668"/>
<viz:color r="76" g="70" b="62"/>
</node>
<node id="100" label="100">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3750213565692807"/>
<attvalue for="harmonicclosnesscentrality" value="0.38936977980259546"/>
<attvalue for="betweenesscentrality" value="9.246385867742809E-4"/>
<attvalue for="modularity_class" value="15"/>
<attvalue for="pageranks" value="3.1739441061657766E-4"/>
</attvalues>
<viz:size value="51.62494"/>
<viz:position x="-3895.8694" y="5675.876"/>
<viz:color r="76" g="70" b="62"/>
</node>
<node id="101" label="101">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567336"/>
<attvalue for="betweenesscentrality" value="0.0014471115488745702"/>
<attvalue for="modularity_class" value="15"/>
<attvalue for="pageranks" value="5.019130668943656E-4"/>
</attvalues>
<viz:size value="60.238403"/>
<viz:position x="-5562.734" y="6745.838"/>
<viz:color r="76" g="70" b="62"/>
</node>
<node id="102" label="102">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="10"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380266"/>
<attvalue for="betweenesscentrality" value="0.0017082808053879954"/>
<attvalue for="modularity_class" value="15"/>
<attvalue for="pageranks" value="5.838323148662565E-4"/>
</attvalues>
<viz:size value="64.062454"/>
<viz:position x="-7066.9995" y="10632.901"/>
<viz:color r="76" g="70" b="62"/>
</node>
<node id="103" label="103">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="15"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37617823479005996"/>
<attvalue for="harmonicclosnesscentrality" value="0.39210326499621717"/>
<attvalue for="betweenesscentrality" value="0.004631828307629533"/>
<attvalue for="modularity_class" value="15"/>
<attvalue for="pageranks" value="0.0012801645938597915"/>
</attvalues>
<viz:size value="96.56775"/>
<viz:position x="-10367.1045" y="17566.072"/>
<viz:color r="76" g="70" b="62"/>
</node>
<node id="104" label="104">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="17"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37617823479005996"/>
<attvalue for="harmonicclosnesscentrality" value="0.39210326499621717"/>
<attvalue for="betweenesscentrality" value="0.004631828307629533"/>
<attvalue for="modularity_class" value="15"/>
<attvalue for="pageranks" value="0.0012801645938597915"/>
</attvalues>
<viz:size value="96.56775"/>
<viz:position x="-10515.048" y="17454.572"/>
<viz:color r="76" g="70" b="62"/>
</node>
<node id="105" label="105">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="14"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380266"/>
<attvalue for="betweenesscentrality" value="0.0014605231266262334"/>
<attvalue for="modularity_class" value="15"/>
<attvalue for="pageranks" value="5.401730816957527E-4"/>
</attvalues>
<viz:size value="62.024406"/>
<viz:position x="-6469.0356" y="8188.7515"/>
<viz:color r="76" g="70" b="62"/>
</node>
<node id="106" label="106">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="9"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380266"/>
<attvalue for="betweenesscentrality" value="0.0013478929704162602"/>
<attvalue for="modularity_class" value="7"/>
<attvalue for="pageranks" value="5.291958203343151E-4"/>
</attvalues>
<viz:size value="61.51198"/>
<viz:position x="-3414.6833" y="6453.2344"/>
<viz:color r="255" g="136" b="5"/>
</node>
<node id="107" label="107">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37553464499572287"/>
<attvalue for="harmonicclosnesscentrality" value="0.3905846621108717"/>
<attvalue for="betweenesscentrality" value="0.0012324638447383115"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="6.036795128804605E-4"/>
</attvalues>
<viz:size value="64.98894"/>
<viz:position x="1125.3495" y="-793.7"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="108" label="108">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567336"/>
<attvalue for="betweenesscentrality" value="0.00151739354626317"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="5.04002668748888E-4"/>
</attvalues>
<viz:size value="60.335945"/>
<viz:position x="2283.6895" y="-2156.3792"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="109" label="109">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380266"/>
<attvalue for="betweenesscentrality" value="0.0013614435511374878"/>
<attvalue for="modularity_class" value="0"/>
<attvalue for="pageranks" value="5.897154921903772E-4"/>
</attvalues>
<viz:size value="64.33708"/>
<viz:position x="10937.914" y="6632.3525"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="110" label="110">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="9"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3756631867191511"/>
<attvalue for="harmonicclosnesscentrality" value="0.39088838268794085"/>
<attvalue for="betweenesscentrality" value="0.0017304030031574141"/>
<attvalue for="modularity_class" value="0"/>
<attvalue for="pageranks" value="7.904809763313333E-4"/>
</attvalues>
<viz:size value="73.70896"/>
<viz:position x="10394.186" y="6225.819"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="111" label="111">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="21"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3759205343380716"/>
<attvalue for="harmonicclosnesscentrality" value="0.391495823842079"/>
<attvalue for="betweenesscentrality" value="0.003712723831431227"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="0.0010400413776312052"/>
</attvalues>
<viz:size value="85.35862"/>
<viz:position x="5481.864" y="-6656.2344"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="112" label="112">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380266"/>
<attvalue for="betweenesscentrality" value="0.0014727309523800412"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="5.990591143114981E-4"/>
</attvalues>
<viz:size value="64.77325"/>
<viz:position x="1324.0369" y="-1155.9873"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="113" label="113">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="16"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37617823479005996"/>
<attvalue for="harmonicclosnesscentrality" value="0.39210326499621717"/>
<attvalue for="betweenesscentrality" value="0.0030088422247042906"/>
<attvalue for="modularity_class" value="5"/>
<attvalue for="pageranks" value="0.0010497784502970715"/>
</attvalues>
<viz:size value="85.813156"/>
<viz:position x="1527.4113" y="-4836.358"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="114" label="114">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="5"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3751495470859682"/>
<attvalue for="harmonicclosnesscentrality" value="0.3896735003796644"/>
<attvalue for="betweenesscentrality" value="9.186966511226391E-4"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="3.769860635013606E-4"/>
</attvalues>
<viz:size value="54.40672"/>
<viz:position x="891.40967" y="-1483.9391"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="115" label="115">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380266"/>
<attvalue for="betweenesscentrality" value="0.0016765468890304902"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="5.700844046916415E-4"/>
</attvalues>
<viz:size value="63.420692"/>
<viz:position x="1433.7094" y="-2080.7366"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="116" label="116">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567336"/>
<attvalue for="betweenesscentrality" value="0.0015568128960661255"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="5.070037803327389E-4"/>
</attvalues>
<viz:size value="60.47604"/>
<viz:position x="3890.9011" y="-2474.8347"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="117" label="117">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3750213565692807"/>
<attvalue for="harmonicclosnesscentrality" value="0.38936977980259546"/>
<attvalue for="betweenesscentrality" value="9.262785869402627E-4"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="3.2246101286719403E-4"/>
</attvalues>
<viz:size value="51.86145"/>
<viz:position x="2545.024" y="-1868.6948"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="118" label="118">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="20"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37682403433476397"/>
<attvalue for="harmonicclosnesscentrality" value="0.39362186788156256"/>
<attvalue for="betweenesscentrality" value="0.0036264510682116895"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="0.0014104169278178827"/>
</attvalues>
<viz:size value="102.64802"/>
<viz:position x="-685.48267" y="-4199.084"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="119" label="119">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="16"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37643628880123475"/>
<attvalue for="harmonicclosnesscentrality" value="0.3927107061503553"/>
<attvalue for="betweenesscentrality" value="0.003023287289550321"/>
<attvalue for="modularity_class" value="13"/>
<attvalue for="pageranks" value="0.0011561333605205715"/>
</attvalues>
<viz:size value="90.77788"/>
<viz:position x="1038.923" y="-5105.53"/>
<viz:color r="255" g="85" b="132"/>
</node>
<node id="120" label="120">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="9"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3756631867191511"/>
<attvalue for="harmonicclosnesscentrality" value="0.39088838268794085"/>
<attvalue for="betweenesscentrality" value="0.001880505404718542"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="7.349967457080808E-4"/>
</attvalues>
<viz:size value="71.11891"/>
<viz:position x="2482.3184" y="-765.4817"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="121" label="121">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="15"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37579181646978255"/>
<attvalue for="harmonicclosnesscentrality" value="0.3911921032650099"/>
<attvalue for="betweenesscentrality" value="0.0028884552614379336"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="8.6945204965548E-4"/>
</attvalues>
<viz:size value="77.395386"/>
<viz:position x="2948.3684" y="-6064.969"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="122" label="122">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380266"/>
<attvalue for="betweenesscentrality" value="0.0013081904933738204"/>
<attvalue for="modularity_class" value="2"/>
<attvalue for="pageranks" value="5.590644612984828E-4"/>
</attvalues>
<viz:size value="62.906273"/>
<viz:position x="13109.61" y="-5916.632"/>
<viz:color r="211" g="179" b="176"/>
</node>
<node id="123" label="123">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="9"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3756631867191511"/>
<attvalue for="harmonicclosnesscentrality" value="0.39088838268794085"/>
<attvalue for="betweenesscentrality" value="0.002341016268736033"/>
<attvalue for="modularity_class" value="2"/>
<attvalue for="pageranks" value="7.988433714650304E-4"/>
</attvalues>
<viz:size value="74.09932"/>
<viz:position x="13476.939" y="-6620.5347"/>
<viz:color r="211" g="179" b="176"/>
</node>
<node id="124" label="124">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="14"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37579181646978255"/>
<attvalue for="harmonicclosnesscentrality" value="0.3911921032650099"/>
<attvalue for="betweenesscentrality" value="0.0022712609197009697"/>
<attvalue for="modularity_class" value="4"/>
<attvalue for="pageranks" value="8.086429364375025E-4"/>
</attvalues>
<viz:size value="74.55678"/>
<viz:position x="11839.198" y="-5583.209"/>
<viz:color r="0" g="189" b="148"/>
</node>
<node id="125" label="125">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="5"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3751495470859682"/>
<attvalue for="harmonicclosnesscentrality" value="0.38967350037966453"/>
<attvalue for="betweenesscentrality" value="0.0010823642656383884"/>
<attvalue for="modularity_class" value="2"/>
<attvalue for="pageranks" value="3.9630655480460864E-4"/>
</attvalues>
<viz:size value="55.308617"/>
<viz:position x="12700.638" y="-5802.4434"/>
<viz:color r="211" g="179" b="176"/>
</node>
<node id="126" label="126">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3750213565692807"/>
<attvalue for="harmonicclosnesscentrality" value="0.38936977980259546"/>
<attvalue for="betweenesscentrality" value="9.199616648620006E-4"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="3.3028362663512927E-4"/>
</attvalues>
<viz:size value="52.226616"/>
<viz:position x="6213.0713" y="4944.011"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="127" label="127">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380266"/>
<attvalue for="betweenesscentrality" value="0.0011088075667708326"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="5.462043568887559E-4"/>
</attvalues>
<viz:size value="62.30595"/>
<viz:position x="8607.481" y="8604.45"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="128" label="128">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3751495470859682"/>
<attvalue for="harmonicclosnesscentrality" value="0.38967350037966453"/>
<attvalue for="betweenesscentrality" value="0.001402598721164946"/>
<attvalue for="modularity_class" value="15"/>
<attvalue for="pageranks" value="4.29106926047563E-4"/>
</attvalues>
<viz:size value="56.83976"/>
<viz:position x="-5310.7363" y="6604.906"/>
<viz:color r="76" g="70" b="62"/>
</node>
<node id="129" label="129">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="5"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3751495470859682"/>
<attvalue for="harmonicclosnesscentrality" value="0.38967350037966453"/>
<attvalue for="betweenesscentrality" value="0.001406406951209051"/>
<attvalue for="modularity_class" value="15"/>
<attvalue for="pageranks" value="4.4125254096687806E-4"/>
</attvalues>
<viz:size value="57.406723"/>
<viz:position x="-5563.289" y="6604.9453"/>
<viz:color r="76" g="70" b="62"/>
</node>
<node id="130" label="130">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="16"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37617823479005996"/>
<attvalue for="harmonicclosnesscentrality" value="0.39210326499621717"/>
<attvalue for="betweenesscentrality" value="0.0021717284371632795"/>
<attvalue for="modularity_class" value="4"/>
<attvalue for="pageranks" value="9.778087084783278E-4"/>
</attvalues>
<viz:size value="82.45355"/>
<viz:position x="-989.05383" y="2140.6821"/>
<viz:color r="0" g="189" b="148"/>
</node>
<node id="131" label="131">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="13"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37604934041459653"/>
<attvalue for="harmonicclosnesscentrality" value="0.39179954441914805"/>
<attvalue for="betweenesscentrality" value="0.003133770710632089"/>
<attvalue for="modularity_class" value="4"/>
<attvalue for="pageranks" value="0.001051337196436307"/>
</attvalues>
<viz:size value="85.88592"/>
<viz:position x="-594.0232" y="12491.159"/>
<viz:color r="0" g="189" b="148"/>
</node>
<node id="132" label="132">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="11"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3759205343380716"/>
<attvalue for="harmonicclosnesscentrality" value="0.391495823842079"/>
<attvalue for="betweenesscentrality" value="0.002659233768937985"/>
<attvalue for="modularity_class" value="4"/>
<attvalue for="pageranks" value="9.356415735855154E-4"/>
</attvalues>
<viz:size value="80.48517"/>
<viz:position x="-2466.3486" y="19909.541"/>
<viz:color r="0" g="189" b="148"/>
</node>
<node id="133" label="133">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="5"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3751495470859682"/>
<attvalue for="harmonicclosnesscentrality" value="0.38967350037966453"/>
<attvalue for="betweenesscentrality" value="0.0010304525820235932"/>
<attvalue for="modularity_class" value="4"/>
<attvalue for="pageranks" value="4.065193604751191E-4"/>
</attvalues>
<viz:size value="55.785355"/>
<viz:position x="1479.015" y="7547.026"/>
<viz:color r="0" g="189" b="148"/>
</node>
<node id="134" label="134">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3751495470859682"/>
<attvalue for="harmonicclosnesscentrality" value="0.38967350037966453"/>
<attvalue for="betweenesscentrality" value="0.0010781043975336085"/>
<attvalue for="modularity_class" value="4"/>
<attvalue for="pageranks" value="4.3443450466222784E-4"/>
</attvalues>
<viz:size value="57.088455"/>
<viz:position x="253.76004" y="9297.636"/>
<viz:color r="0" g="189" b="148"/>
</node>
<node id="135" label="135">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3751495470859682"/>
<attvalue for="harmonicclosnesscentrality" value="0.38967350037966453"/>
<attvalue for="betweenesscentrality" value="0.001425323845276761"/>
<attvalue for="modularity_class" value="7"/>
<attvalue for="pageranks" value="4.95445172481871E-4"/>
</attvalues>
<viz:size value="59.936478"/>
<viz:position x="5498.8477" y="-1924.1583"/>
<viz:color r="255" g="136" b="5"/>
</node>
<node id="136" label="136">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380266"/>
<attvalue for="betweenesscentrality" value="0.0015041129569595513"/>
<attvalue for="modularity_class" value="3"/>
<attvalue for="pageranks" value="6.39294187947365E-4"/>
</attvalues>
<viz:size value="66.65145"/>
<viz:position x="6588.677" y="-1143.9843"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="137" label="137">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="5"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3751495470859682"/>
<attvalue for="harmonicclosnesscentrality" value="0.38967350037966453"/>
<attvalue for="betweenesscentrality" value="0.0010538310078249576"/>
<attvalue for="modularity_class" value="3"/>
<attvalue for="pageranks" value="4.5257621237262973E-4"/>
</attvalues>
<viz:size value="57.935326"/>
<viz:position x="6099.197" y="-1019.13684"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="138" label="138">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="5"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3751495470859682"/>
<attvalue for="harmonicclosnesscentrality" value="0.38967350037966453"/>
<attvalue for="betweenesscentrality" value="0.0010538310078249576"/>
<attvalue for="modularity_class" value="3"/>
<attvalue for="pageranks" value="4.525762123726298E-4"/>
</attvalues>
<viz:size value="57.935326"/>
<viz:position x="6168.87" y="-1002.2008"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="139" label="139">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37553464499572287"/>
<attvalue for="harmonicclosnesscentrality" value="0.3905846621108717"/>
<attvalue for="betweenesscentrality" value="0.0019592785255115597"/>
<attvalue for="modularity_class" value="3"/>
<attvalue for="pageranks" value="7.564797710416056E-4"/>
</attvalues>
<viz:size value="72.121765"/>
<viz:position x="6753.3887" y="-1243.5247"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="140" label="140">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="4"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3750213565692807"/>
<attvalue for="harmonicclosnesscentrality" value="0.38936977980259535"/>
<attvalue for="betweenesscentrality" value="1.4266927661763965E-4"/>
<attvalue for="modularity_class" value="3"/>
<attvalue for="pageranks" value="2.8547806356996443E-4"/>
</attvalues>
<viz:size value="50.13506"/>
<viz:position x="5998.692" y="-962.7304"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="141" label="141">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567336"/>
<attvalue for="betweenesscentrality" value="0.001202396167969789"/>
<attvalue for="modularity_class" value="4"/>
<attvalue for="pageranks" value="5.00995796071774E-4"/>
</attvalues>
<viz:size value="60.195583"/>
<viz:position x="-375.0165" y="11723.217"/>
<viz:color r="0" g="189" b="148"/>
</node>
<node id="142" label="142">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="9"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380266"/>
<attvalue for="betweenesscentrality" value="0.0013515075961430254"/>
<attvalue for="modularity_class" value="4"/>
<attvalue for="pageranks" value="5.813023711447281E-4"/>
</attvalues>
<viz:size value="63.94435"/>
<viz:position x="-460.2594" y="12019.228"/>
<viz:color r="0" g="189" b="148"/>
</node>
<node id="143" label="143">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567336"/>
<attvalue for="betweenesscentrality" value="0.001202396167969789"/>
<attvalue for="modularity_class" value="4"/>
<attvalue for="pageranks" value="5.00995796071774E-4"/>
</attvalues>
<viz:size value="60.195583"/>
<viz:position x="-440.9549" y="11784.457"/>
<viz:color r="0" g="189" b="148"/>
</node>
<node id="144" label="144">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567336"/>
<attvalue for="betweenesscentrality" value="0.001202396167969789"/>
<attvalue for="modularity_class" value="4"/>
<attvalue for="pageranks" value="5.00995796071774E-4"/>
</attvalues>
<viz:size value="60.195583"/>
<viz:position x="-409.52197" y="11618.46"/>
<viz:color r="0" g="189" b="148"/>
</node>
<node id="145" label="145">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37553464499572287"/>
<attvalue for="harmonicclosnesscentrality" value="0.3905846621108717"/>
<attvalue for="betweenesscentrality" value="0.002214668603496073"/>
<attvalue for="modularity_class" value="4"/>
<attvalue for="pageranks" value="7.793515140977219E-4"/>
</attvalues>
<viz:size value="73.18943"/>
<viz:position x="203.28687" y="10044.933"/>
<viz:color r="0" g="189" b="148"/>
</node>
<node id="146" label="146">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="14"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37617823479005996"/>
<attvalue for="harmonicclosnesscentrality" value="0.39210326499621717"/>
<attvalue for="betweenesscentrality" value="0.0034172029183326767"/>
<attvalue for="modularity_class" value="4"/>
<attvalue for="pageranks" value="0.0011334245426802944"/>
</attvalues>
<viz:size value="89.71782"/>
<viz:position x="-2340.9214" y="20169.13"/>
<viz:color r="0" g="189" b="148"/>
</node>
<node id="147" label="147">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37553464499572287"/>
<attvalue for="harmonicclosnesscentrality" value="0.3905846621108717"/>
<attvalue for="betweenesscentrality" value="0.002214668603496073"/>
<attvalue for="modularity_class" value="4"/>
<attvalue for="pageranks" value="7.793515140977219E-4"/>
</attvalues>
<viz:size value="73.18943"/>
<viz:position x="114.51402" y="10054.579"/>
<viz:color r="0" g="189" b="148"/>
</node>
<node id="148" label="148">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="23"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37669469709970826"/>
<attvalue for="harmonicclosnesscentrality" value="0.3933181473044935"/>
<attvalue for="betweenesscentrality" value="0.0033084465144924305"/>
<attvalue for="modularity_class" value="5"/>
<attvalue for="pageranks" value="0.0013259961053416008"/>
</attvalues>
<viz:size value="98.70719"/>
<viz:position x="-2991.044" y="-4119.0674"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="149" label="149">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="4"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3750213565692807"/>
<attvalue for="harmonicclosnesscentrality" value="0.38936977980259546"/>
<attvalue for="betweenesscentrality" value="0.001360884473137539"/>
<attvalue for="modularity_class" value="1"/>
<attvalue for="pageranks" value="4.6528100722809064E-4"/>
</attvalues>
<viz:size value="58.52839"/>
<viz:position x="3454.5547" y="1190.5333"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="150" label="150">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="4"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3750213565692807"/>
<attvalue for="harmonicclosnesscentrality" value="0.38936977980259546"/>
<attvalue for="betweenesscentrality" value="0.0013611616885634854"/>
<attvalue for="modularity_class" value="1"/>
<attvalue for="pageranks" value="4.809313356496969E-4"/>
</attvalues>
<viz:size value="59.25896"/>
<viz:position x="3160.3347" y="1918.989"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="151" label="151">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="4"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3750213565692807"/>
<attvalue for="harmonicclosnesscentrality" value="0.38936977980259546"/>
<attvalue for="betweenesscentrality" value="0.0013208381858389499"/>
<attvalue for="modularity_class" value="13"/>
<attvalue for="pageranks" value="4.1044106188117965E-4"/>
</attvalues>
<viz:size value="55.968422"/>
<viz:position x="-136.981" y="160.61337"/>
<viz:color r="255" g="85" b="132"/>
</node>
<node id="152" label="152">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="4"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3750213565692807"/>
<attvalue for="harmonicclosnesscentrality" value="0.38936977980259546"/>
<attvalue for="betweenesscentrality" value="9.84980549536777E-4"/>
<attvalue for="modularity_class" value="3"/>
<attvalue for="pageranks" value="3.6832224767866726E-4"/>
</attvalues>
<viz:size value="54.00229"/>
<viz:position x="5933.9053" y="-718.5958"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="153" label="153">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="4"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3750213565692807"/>
<attvalue for="harmonicclosnesscentrality" value="0.38936977980259546"/>
<attvalue for="betweenesscentrality" value="0.001086690284258463"/>
<attvalue for="modularity_class" value="7"/>
<attvalue for="pageranks" value="3.9192827665641413E-4"/>
</attvalues>
<viz:size value="55.104233"/>
<viz:position x="281.3119" y="-9076.23"/>
<viz:color r="255" g="136" b="5"/>
</node>
<node id="154" label="154">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="4"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3748932536293766"/>
<attvalue for="harmonicclosnesscentrality" value="0.38906605922552634"/>
<attvalue for="betweenesscentrality" value="9.111617312072891E-4"/>
<attvalue for="modularity_class" value="7"/>
<attvalue for="pageranks" value="4.557573547843128E-4"/>
</attvalues>
<viz:size value="58.08382"/>
<viz:position x="220.30708" y="1018.8171"/>
<viz:color r="255" g="136" b="5"/>
</node>
<node id="155" label="155">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="4"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3750213565692807"/>
<attvalue for="harmonicclosnesscentrality" value="0.38936977980259546"/>
<attvalue for="betweenesscentrality" value="0.0011260070779248671"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="3.658188034569526E-4"/>
</attvalues>
<viz:size value="53.88542"/>
<viz:position x="6194.4346" y="7021.622"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="156" label="156">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="4"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3750213565692807"/>
<attvalue for="harmonicclosnesscentrality" value="0.38936977980259546"/>
<attvalue for="betweenesscentrality" value="0.0011177902939354754"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="3.600908276440786E-4"/>
</attvalues>
<viz:size value="53.61804"/>
<viz:position x="5517.4224" y="3513.126"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="157" label="157">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="4"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3750213565692807"/>
<attvalue for="harmonicclosnesscentrality" value="0.38936977980259546"/>
<attvalue for="betweenesscentrality" value="0.0013542092690024688"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="4.4622416226582247E-4"/>
</attvalues>
<viz:size value="57.638805"/>
<viz:position x="2095.3794" y="360.4977"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="158" label="158">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="4"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3750213565692807"/>
<attvalue for="harmonicclosnesscentrality" value="0.38936977980259546"/>
<attvalue for="betweenesscentrality" value="0.0011780370302365767"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="3.6973945947410975E-4"/>
</attvalues>
<viz:size value="54.068443"/>
<viz:position x="2283.3835" y="3288.982"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="159" label="159">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="4"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3750213565692807"/>
<attvalue for="harmonicclosnesscentrality" value="0.38936977980259546"/>
<attvalue for="betweenesscentrality" value="9.952807236933733E-4"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="3.3850960871262156E-4"/>
</attvalues>
<viz:size value="52.61061"/>
<viz:position x="6402.53" y="6947.4414"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="160" label="160">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380266"/>
<attvalue for="betweenesscentrality" value="0.0021396728775948033"/>
<attvalue for="modularity_class" value="4"/>
<attvalue for="pageranks" value="7.141158760617739E-4"/>
</attvalues>
<viz:size value="70.14418"/>
<viz:position x="-279.90482" y="9560.348"/>
<viz:color r="0" g="189" b="148"/>
</node>
<node id="161" label="161">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="9"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37553464499572287"/>
<attvalue for="harmonicclosnesscentrality" value="0.3905846621108717"/>
<attvalue for="betweenesscentrality" value="0.001575189899048797"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="6.254757304195686E-4"/>
</attvalues>
<viz:size value="66.00639"/>
<viz:position x="-13314.805" y="288.23608"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="162" label="162">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="9"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37553464499572287"/>
<attvalue for="harmonicclosnesscentrality" value="0.3905846621108717"/>
<attvalue for="betweenesscentrality" value="0.0012077706219242956"/>
<attvalue for="modularity_class" value="15"/>
<attvalue for="pageranks" value="5.787061422192034E-4"/>
</attvalues>
<viz:size value="63.82316"/>
<viz:position x="-5622.1978" y="8282.624"/>
<viz:color r="76" g="70" b="62"/>
</node>
<node id="163" label="163">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="17"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37604934041459653"/>
<attvalue for="harmonicclosnesscentrality" value="0.39179954441914805"/>
<attvalue for="betweenesscentrality" value="0.0034503200680487955"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="0.0010642043871941216"/>
</attvalues>
<viz:size value="86.486565"/>
<viz:position x="3662.1672" y="303.20505"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="164" label="164">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="9"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567336"/>
<attvalue for="betweenesscentrality" value="0.0019412304462081316"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="6.156101796607377E-4"/>
</attvalues>
<viz:size value="65.54587"/>
<viz:position x="3614.6238" y="-8517.629"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="165" label="165">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="10"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37579181646978255"/>
<attvalue for="harmonicclosnesscentrality" value="0.3911921032650099"/>
<attvalue for="betweenesscentrality" value="0.0024434389440068944"/>
<attvalue for="modularity_class" value="16"/>
<attvalue for="pageranks" value="8.354107746263785E-4"/>
</attvalues>
<viz:size value="75.80632"/>
<viz:position x="9074.098" y="-18453.82"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="166" label="166">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37553464499572287"/>
<attvalue for="harmonicclosnesscentrality" value="0.3905846621108717"/>
<attvalue for="betweenesscentrality" value="0.0019340655024032232"/>
<attvalue for="modularity_class" value="16"/>
<attvalue for="pageranks" value="6.561923653202488E-4"/>
</attvalues>
<viz:size value="67.44027"/>
<viz:position x="8766.96" y="-18172.328"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="167" label="167">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567336"/>
<attvalue for="betweenesscentrality" value="0.0012800605897882718"/>
<attvalue for="modularity_class" value="1"/>
<attvalue for="pageranks" value="5.554750610014627E-4"/>
</attvalues>
<viz:size value="62.738716"/>
<viz:position x="4997.071" y="2582.366"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="168" label="168">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="20"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37604934041459653"/>
<attvalue for="harmonicclosnesscentrality" value="0.39179954441914805"/>
<attvalue for="betweenesscentrality" value="0.002745345949618633"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="9.650643298722964E-4"/>
</attvalues>
<viz:size value="81.85864"/>
<viz:position x="-14193.313" y="-865.69434"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="169" label="169">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567336"/>
<attvalue for="betweenesscentrality" value="0.001607280150601469"/>
<attvalue for="modularity_class" value="4"/>
<attvalue for="pageranks" value="5.627761589212577E-4"/>
</attvalues>
<viz:size value="63.079536"/>
<viz:position x="86.22729" y="10552.384"/>
<viz:color r="0" g="189" b="148"/>
</node>
<node id="170" label="170">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37553464499572287"/>
<attvalue for="harmonicclosnesscentrality" value="0.3905846621108717"/>
<attvalue for="betweenesscentrality" value="0.0015817377401724565"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="6.914974050103558E-4"/>
</attvalues>
<viz:size value="69.08833"/>
<viz:position x="9511.848" y="5911.7847"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="171" label="171">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="5"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3750213565692807"/>
<attvalue for="harmonicclosnesscentrality" value="0.38936977980259546"/>
<attvalue for="betweenesscentrality" value="9.272131354527867E-4"/>
<attvalue for="modularity_class" value="4"/>
<attvalue for="pageranks" value="3.470255991667876E-4"/>
</attvalues>
<viz:size value="53.008144"/>
<viz:position x="273.40823" y="8025.3384"/>
<viz:color r="0" g="189" b="148"/>
</node>
<node id="172" label="172">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3751495470859682"/>
<attvalue for="harmonicclosnesscentrality" value="0.38967350037966453"/>
<attvalue for="betweenesscentrality" value="0.001351526281190416"/>
<attvalue for="modularity_class" value="4"/>
<attvalue for="pageranks" value="4.446195937926706E-4"/>
</attvalues>
<viz:size value="57.563904"/>
<viz:position x="175.4602" y="7236.0303"/>
<viz:color r="0" g="189" b="148"/>
</node>
<node id="173" label="173">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="20"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37669469709970826"/>
<attvalue for="harmonicclosnesscentrality" value="0.3933181473044935"/>
<attvalue for="betweenesscentrality" value="0.0044111477222802615"/>
<attvalue for="modularity_class" value="7"/>
<attvalue for="pageranks" value="0.0014065240790714615"/>
</attvalues>
<viz:size value="102.4663"/>
<viz:position x="-7462.662" y="-7700.9414"/>
<viz:color r="255" g="136" b="5"/>
</node>
<node id="174" label="174">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="11"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37579181646978255"/>
<attvalue for="harmonicclosnesscentrality" value="0.3911921032650099"/>
<attvalue for="betweenesscentrality" value="0.002004991608548049"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="7.509636825919474E-4"/>
</attvalues>
<viz:size value="71.864265"/>
<viz:position x="-17662.86" y="1410.9802"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="175" label="175">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="11"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380266"/>
<attvalue for="betweenesscentrality" value="0.0015005845226213719"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="5.271927396353013E-4"/>
</attvalues>
<viz:size value="61.418476"/>
<viz:position x="-16761.527" y="320.7234"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="176" label="176">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380266"/>
<attvalue for="betweenesscentrality" value="0.0013151642112676613"/>
<attvalue for="modularity_class" value="5"/>
<attvalue for="pageranks" value="5.404474038864631E-4"/>
</attvalues>
<viz:size value="62.037216"/>
<viz:position x="-3274.4568" y="-3538.4575"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="177" label="177">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3750213565692807"/>
<attvalue for="harmonicclosnesscentrality" value="0.38936977980259546"/>
<attvalue for="betweenesscentrality" value="9.482567369780947E-4"/>
<attvalue for="modularity_class" value="5"/>
<attvalue for="pageranks" value="3.298967743245876E-4"/>
</attvalues>
<viz:size value="52.208557"/>
<viz:position x="-2345.8782" y="-2100.482"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="178" label="178">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="16"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37617823479005996"/>
<attvalue for="harmonicclosnesscentrality" value="0.39210326499621717"/>
<attvalue for="betweenesscentrality" value="0.0023379884489811427"/>
<attvalue for="modularity_class" value="5"/>
<attvalue for="pageranks" value="0.0010172812315036408"/>
</attvalues>
<viz:size value="84.29616"/>
<viz:position x="-3504.2908" y="-4851.199"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="179" label="179">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="9"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3756631867191511"/>
<attvalue for="harmonicclosnesscentrality" value="0.39088838268794085"/>
<attvalue for="betweenesscentrality" value="0.0019175283391220644"/>
<attvalue for="modularity_class" value="5"/>
<attvalue for="pageranks" value="7.199238490560363E-4"/>
</attvalues>
<viz:size value="70.4153"/>
<viz:position x="-3447.4863" y="-3778.3608"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="180" label="180">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="11"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3759205343380716"/>
<attvalue for="harmonicclosnesscentrality" value="0.391495823842079"/>
<attvalue for="betweenesscentrality" value="0.002214759743640482"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="9.172698162052417E-4"/>
</attvalues>
<viz:size value="79.627556"/>
<viz:position x="8937.635" y="7906.9736"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="181" label="181">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380266"/>
<attvalue for="betweenesscentrality" value="0.00131619758033011"/>
<attvalue for="modularity_class" value="15"/>
<attvalue for="pageranks" value="5.303680453101047E-4"/>
</attvalues>
<viz:size value="61.566704"/>
<viz:position x="-4785.691" y="5662.0093"/>
<viz:color r="76" g="70" b="62"/>
</node>
<node id="182" label="182">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="9"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3756631867191511"/>
<attvalue for="harmonicclosnesscentrality" value="0.39088838268794085"/>
<attvalue for="betweenesscentrality" value="0.0015088443313893908"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="7.185729640297384E-4"/>
</attvalues>
<viz:size value="70.35225"/>
<viz:position x="8810.421" y="8694.236"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="183" label="183">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="11"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37579181646978255"/>
<attvalue for="harmonicclosnesscentrality" value="0.3911921032650099"/>
<attvalue for="betweenesscentrality" value="0.0014662389681651968"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="8.122942683147859E-4"/>
</attvalues>
<viz:size value="74.72722"/>
<viz:position x="9054.477" y="6686.9116"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="184" label="184">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3751495470859682"/>
<attvalue for="harmonicclosnesscentrality" value="0.38967350037966453"/>
<attvalue for="betweenesscentrality" value="0.0011589914120214541"/>
<attvalue for="modularity_class" value="0"/>
<attvalue for="pageranks" value="4.258988054295887E-4"/>
</attvalues>
<viz:size value="56.690002"/>
<viz:position x="8398.489" y="9657.176"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="185" label="185">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567336"/>
<attvalue for="betweenesscentrality" value="0.001033143265888898"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="4.682240902925847E-4"/>
</attvalues>
<viz:size value="58.66578"/>
<viz:position x="8407.902" y="8296.977"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="186" label="186">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="17"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3765654486189741"/>
<attvalue for="harmonicclosnesscentrality" value="0.39301442672742437"/>
<attvalue for="betweenesscentrality" value="0.0021624937971958147"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="0.0011505386300194318"/>
</attvalues>
<viz:size value="90.516716"/>
<viz:position x="-14079.488" y="-2407.1057"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="187" label="187">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37553464499572287"/>
<attvalue for="harmonicclosnesscentrality" value="0.3905846621108717"/>
<attvalue for="betweenesscentrality" value="0.0012324638447383117"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="6.036795128804605E-4"/>
</attvalues>
<viz:size value="64.98894"/>
<viz:position x="1021.41785" y="-718.1679"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="188" label="188">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="9"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3756631867191511"/>
<attvalue for="harmonicclosnesscentrality" value="0.39088838268794085"/>
<attvalue for="betweenesscentrality" value="0.0013055229531330427"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="6.805950838052607E-4"/>
</attvalues>
<viz:size value="68.57941"/>
<viz:position x="1420.7489" y="-554.7479"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="189" label="189">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="13"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37553464499572287"/>
<attvalue for="harmonicclosnesscentrality" value="0.3905846621108717"/>
<attvalue for="betweenesscentrality" value="0.0017729038962616696"/>
<attvalue for="modularity_class" value="5"/>
<attvalue for="pageranks" value="6.476771691760151E-4"/>
</attvalues>
<viz:size value="67.04277"/>
<viz:position x="-57.81131" y="-5462.072"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="190" label="190">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="22"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3759205343380716"/>
<attvalue for="harmonicclosnesscentrality" value="0.391495823842079"/>
<attvalue for="betweenesscentrality" value="0.0024035000940838673"/>
<attvalue for="modularity_class" value="13"/>
<attvalue for="pageranks" value="8.906125242273841E-4"/>
</attvalues>
<viz:size value="78.38318"/>
<viz:position x="-2582.0579" y="-12706.404"/>
<viz:color r="255" g="85" b="132"/>
</node>
<node id="191" label="191">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380266"/>
<attvalue for="betweenesscentrality" value="0.0016206145468603095"/>
<attvalue for="modularity_class" value="7"/>
<attvalue for="pageranks" value="5.847471282724938E-4"/>
</attvalues>
<viz:size value="64.105156"/>
<viz:position x="-2740.7542" y="-13839.9"/>
<viz:color r="255" g="136" b="5"/>
</node>
<node id="192" label="192">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567336"/>
<attvalue for="betweenesscentrality" value="0.0010656847037168733"/>
<attvalue for="modularity_class" value="7"/>
<attvalue for="pageranks" value="4.6307602771120083E-4"/>
</attvalues>
<viz:size value="58.42546"/>
<viz:position x="-1595.121" y="-14602.076"/>
<viz:color r="255" g="136" b="5"/>
</node>
<node id="193" label="193">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="24"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37579181646978255"/>
<attvalue for="harmonicclosnesscentrality" value="0.3911921032650099"/>
<attvalue for="betweenesscentrality" value="0.002384665078921955"/>
<attvalue for="modularity_class" value="7"/>
<attvalue for="pageranks" value="8.575704883957125E-4"/>
</attvalues>
<viz:size value="76.840744"/>
<viz:position x="-2082.0085" y="-14889.524"/>
<viz:color r="255" g="136" b="5"/>
</node>
<node id="194" label="194">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380266"/>
<attvalue for="betweenesscentrality" value="0.001315647976407432"/>
<attvalue for="modularity_class" value="7"/>
<attvalue for="pageranks" value="5.539469122542524E-4"/>
</attvalues>
<viz:size value="62.66738"/>
<viz:position x="-905.7356" y="-14523.926"/>
<viz:color r="255" g="136" b="5"/>
</node>
<node id="195" label="195">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567336"/>
<attvalue for="betweenesscentrality" value="0.0015744798170831537"/>
<attvalue for="modularity_class" value="7"/>
<attvalue for="pageranks" value="5.846382744217055E-4"/>
</attvalues>
<viz:size value="64.100075"/>
<viz:position x="115.14881" y="-9549.061"/>
<viz:color r="255" g="136" b="5"/>
</node>
<node id="196" label="196">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="15"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37604934041459653"/>
<attvalue for="harmonicclosnesscentrality" value="0.39179954441914805"/>
<attvalue for="betweenesscentrality" value="0.0031337707106320898"/>
<attvalue for="modularity_class" value="4"/>
<attvalue for="pageranks" value="0.001051337196436307"/>
</attvalues>
<viz:size value="85.88592"/>
<viz:position x="-736.6576" y="12389.806"/>
<viz:color r="0" g="189" b="148"/>
</node>
<node id="197" label="197">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567336"/>
<attvalue for="betweenesscentrality" value="0.001304960411969351"/>
<attvalue for="modularity_class" value="4"/>
<attvalue for="pageranks" value="5.366496374667417E-4"/>
</attvalues>
<viz:size value="61.859932"/>
<viz:position x="307.28482" y="9770.936"/>
<viz:color r="0" g="189" b="148"/>
</node>
<node id="198" label="198">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567336"/>
<attvalue for="betweenesscentrality" value="0.001667845877603873"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="5.316734212756331E-4"/>
</attvalues>
<viz:size value="61.62764"/>
<viz:position x="-11008.904" y="-3.201856"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="199" label="199">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567336"/>
<attvalue for="betweenesscentrality" value="0.001366565565310523"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="5.032979463025227E-4"/>
</attvalues>
<viz:size value="60.303047"/>
<viz:position x="-7773.7485" y="-9.55612"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="200" label="200">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="19"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37604934041459653"/>
<attvalue for="harmonicclosnesscentrality" value="0.39179954441914805"/>
<attvalue for="betweenesscentrality" value="0.002783257494539317"/>
<attvalue for="modularity_class" value="13"/>
<attvalue for="pageranks" value="9.711917433661761E-4"/>
</attvalues>
<viz:size value="82.14467"/>
<viz:position x="-6237.7056" y="-4380.3237"/>
<viz:color r="255" g="85" b="132"/>
</node>
<node id="201" label="201">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380255"/>
<attvalue for="betweenesscentrality" value="3.959624085953999E-4"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="4.408083285290062E-4"/>
</attvalues>
<viz:size value="57.385986"/>
<viz:position x="-16950.46" y="639.93274"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="202" label="202">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="12"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37579181646978255"/>
<attvalue for="harmonicclosnesscentrality" value="0.3911921032650098"/>
<attvalue for="betweenesscentrality" value="0.0013921057854238987"/>
<attvalue for="modularity_class" value="18"/>
<attvalue for="pageranks" value="7.136646277197249E-4"/>
</attvalues>
<viz:size value="70.123116"/>
<viz:position x="-8964.53" y="-4681.858"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="203" label="203">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567336"/>
<attvalue for="betweenesscentrality" value="0.0010639324553200597"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="4.6010571573100234E-4"/>
</attvalues>
<viz:size value="58.286804"/>
<viz:position x="-9878.269" y="-1691.5457"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="204" label="204">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="19"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3763072175552889"/>
<attvalue for="harmonicclosnesscentrality" value="0.39240698557328624"/>
<attvalue for="betweenesscentrality" value="0.004415910023815413"/>
<attvalue for="modularity_class" value="12"/>
<attvalue for="pageranks" value="0.0012943371523965253"/>
</attvalues>
<viz:size value="97.22934"/>
<viz:position x="-23219.135" y="-6894.0664"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="205" label="205">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="5"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3751495470859682"/>
<attvalue for="harmonicclosnesscentrality" value="0.38967350037966453"/>
<attvalue for="betweenesscentrality" value="0.0010538310078249584"/>
<attvalue for="modularity_class" value="3"/>
<attvalue for="pageranks" value="4.525762123726298E-4"/>
</attvalues>
<viz:size value="57.935326"/>
<viz:position x="6158.5454" y="-909.7181"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="206" label="206">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567336"/>
<attvalue for="betweenesscentrality" value="0.001546586985005657"/>
<attvalue for="modularity_class" value="11"/>
<attvalue for="pageranks" value="6.245697911044638E-4"/>
</attvalues>
<viz:size value="65.96411"/>
<viz:position x="8285.904" y="-3868.8975"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="207" label="207">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567336"/>
<attvalue for="betweenesscentrality" value="0.001546586985005657"/>
<attvalue for="modularity_class" value="11"/>
<attvalue for="pageranks" value="6.245697911044638E-4"/>
</attvalues>
<viz:size value="65.96411"/>
<viz:position x="8189.8496" y="-3895.0034"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="208" label="208">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37553464499572287"/>
<attvalue for="harmonicclosnesscentrality" value="0.3905846621108717"/>
<attvalue for="betweenesscentrality" value="0.001878592448598729"/>
<attvalue for="modularity_class" value="4"/>
<attvalue for="pageranks" value="7.158247478343743E-4"/>
</attvalues>
<viz:size value="70.22395"/>
<viz:position x="868.1337" y="8025.236"/>
<viz:color r="0" g="189" b="148"/>
</node>
<node id="209" label="209">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567336"/>
<attvalue for="betweenesscentrality" value="0.0013184799312202173"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="4.954352436801624E-4"/>
</attvalues>
<viz:size value="59.936012"/>
<viz:position x="3042.921" y="-2021.8965"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="210" label="210">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="5"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3751495470859682"/>
<attvalue for="harmonicclosnesscentrality" value="0.3896735003796644"/>
<attvalue for="betweenesscentrality" value="4.073182000129551E-4"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="3.4784587281244596E-4"/>
</attvalues>
<viz:size value="53.046436"/>
<viz:position x="2987.2937" y="-1971.177"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="211" label="211">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380266"/>
<attvalue for="betweenesscentrality" value="0.001264079198468209"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="5.407049879029308E-4"/>
</attvalues>
<viz:size value="62.049236"/>
<viz:position x="777.0275" y="-1054.1816"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="212" label="212">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380266"/>
<attvalue for="betweenesscentrality" value="0.0016959436637787916"/>
<attvalue for="modularity_class" value="13"/>
<attvalue for="pageranks" value="5.698562801571496E-4"/>
</attvalues>
<viz:size value="63.41004"/>
<viz:position x="-2402.6785" y="-11516.7"/>
<viz:color r="255" g="85" b="132"/>
</node>
<node id="213" label="213">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567336"/>
<attvalue for="betweenesscentrality" value="0.0016955534640976158"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="5.26409014343912E-4"/>
</attvalues>
<viz:size value="61.38189"/>
<viz:position x="1370.3528" y="-1518.3271"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="214" label="214">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="5"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3751495470859682"/>
<attvalue for="harmonicclosnesscentrality" value="0.38967350037966453"/>
<attvalue for="betweenesscentrality" value="0.0012872852651445398"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="4.2999816484816123E-4"/>
</attvalues>
<viz:size value="56.881363"/>
<viz:position x="3105.7961" y="-1921.8021"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="215" label="215">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380266"/>
<attvalue for="betweenesscentrality" value="0.001592743798041043"/>
<attvalue for="modularity_class" value="7"/>
<attvalue for="pageranks" value="5.891389130335452E-4"/>
</attvalues>
<viz:size value="64.310165"/>
<viz:position x="-197.36897" y="-40.722733"/>
<viz:color r="255" g="136" b="5"/>
</node>
<node id="216" label="216">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567336"/>
<attvalue for="betweenesscentrality" value="0.0014530698470446677"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="5.416093327086905E-4"/>
</attvalues>
<viz:size value="62.091454"/>
<viz:position x="2043.8682" y="-149.73242"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="217" label="217">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="3"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3748932536293766"/>
<attvalue for="harmonicclosnesscentrality" value="0.38906605922552623"/>
<attvalue for="betweenesscentrality" value="2.2747896001312334E-4"/>
<attvalue for="modularity_class" value="7"/>
<attvalue for="pageranks" value="2.988288915966659E-4"/>
</attvalues>
<viz:size value="50.758286"/>
<viz:position x="437.97226" y="1278.4568"/>
<viz:color r="255" g="136" b="5"/>
</node>
<node id="218" label="218">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="3"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3748932536293766"/>
<attvalue for="harmonicclosnesscentrality" value="0.38906605922552623"/>
<attvalue for="betweenesscentrality" value="2.2747896001312334E-4"/>
<attvalue for="modularity_class" value="7"/>
<attvalue for="pageranks" value="2.988288915966659E-4"/>
</attvalues>
<viz:size value="50.758286"/>
<viz:position x="425.80035" y="1261.7914"/>
<viz:color r="255" g="136" b="5"/>
</node>
<node id="219" label="219">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="3"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3748932536293766"/>
<attvalue for="harmonicclosnesscentrality" value="0.38906605922552623"/>
<attvalue for="betweenesscentrality" value="2.2747896001312334E-4"/>
<attvalue for="modularity_class" value="7"/>
<attvalue for="pageranks" value="2.988288915966659E-4"/>
</attvalues>
<viz:size value="50.758286"/>
<viz:position x="447.20276" y="1247.1108"/>
<viz:color r="255" g="136" b="5"/>
</node>
<node id="220" label="220">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380266"/>
<attvalue for="betweenesscentrality" value="0.002070084065650742"/>
<attvalue for="modularity_class" value="18"/>
<attvalue for="pageranks" value="7.798273764449207E-4"/>
</attvalues>
<viz:size value="73.21165"/>
<viz:position x="1222.096" y="3754.613"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="221" label="221">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="9"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567336"/>
<attvalue for="betweenesscentrality" value="0.00176553464672157"/>
<attvalue for="modularity_class" value="18"/>
<attvalue for="pageranks" value="6.347863575241565E-4"/>
</attvalues>
<viz:size value="66.441025"/>
<viz:position x="1290.4822" y="3813.0488"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="222" label="222">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380266"/>
<attvalue for="betweenesscentrality" value="0.001578069941143743"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="5.91572235054754E-4"/>
</attvalues>
<viz:size value="64.42376"/>
<viz:position x="636.2475" y="-518.27515"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="223" label="223">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="3"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3748932536293766"/>
<attvalue for="harmonicclosnesscentrality" value="0.38906605922552623"/>
<attvalue for="betweenesscentrality" value="2.2747896001312334E-4"/>
<attvalue for="modularity_class" value="7"/>
<attvalue for="pageranks" value="2.988288915966659E-4"/>
</attvalues>
<viz:size value="50.758286"/>
<viz:position x="414.7213" y="1244.3346"/>
<viz:color r="255" g="136" b="5"/>
</node>
<node id="224" label="224">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380266"/>
<attvalue for="betweenesscentrality" value="0.001108807566770832"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="5.462043568887559E-4"/>
</attvalues>
<viz:size value="62.30595"/>
<viz:position x="8612.57" y="8447.044"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="225" label="225">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567336"/>
<attvalue for="betweenesscentrality" value="0.0016139493320476786"/>
<attvalue for="modularity_class" value="0"/>
<attvalue for="pageranks" value="5.516640299311501E-4"/>
</attvalues>
<viz:size value="62.560814"/>
<viz:position x="8449.519" y="9785.503"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="226" label="226">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380266"/>
<attvalue for="betweenesscentrality" value="0.0013657433514391892"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="5.676801165720453E-4"/>
</attvalues>
<viz:size value="63.308456"/>
<viz:position x="7086.4727" y="7241.932"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="227" label="227">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37553464499572287"/>
<attvalue for="harmonicclosnesscentrality" value="0.3905846621108717"/>
<attvalue for="betweenesscentrality" value="0.001967303353095265"/>
<attvalue for="modularity_class" value="2"/>
<attvalue for="pageranks" value="6.909379497292978E-4"/>
</attvalues>
<viz:size value="69.06222"/>
<viz:position x="12777.1455" y="-5485.3975"/>
<viz:color r="211" g="179" b="176"/>
</node>
<node id="228" label="228">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380266"/>
<attvalue for="betweenesscentrality" value="0.0023347620821426046"/>
<attvalue for="modularity_class" value="13"/>
<attvalue for="pageranks" value="6.36254075292817E-4"/>
</attvalues>
<viz:size value="66.50954"/>
<viz:position x="8675.968" y="-10338.462"/>
<viz:color r="255" g="85" b="132"/>
</node>
<node id="229" label="229">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567336"/>
<attvalue for="betweenesscentrality" value="0.0014767569473599965"/>
<attvalue for="modularity_class" value="1"/>
<attvalue for="pageranks" value="5.93700123788007E-4"/>
</attvalues>
<viz:size value="64.52309"/>
<viz:position x="5109.655" y="2065.53"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="230" label="230">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="17"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37643628880123475"/>
<attvalue for="harmonicclosnesscentrality" value="0.3927107061503553"/>
<attvalue for="betweenesscentrality" value="0.003189570987206761"/>
<attvalue for="modularity_class" value="13"/>
<attvalue for="pageranks" value="0.0011455895934063742"/>
</attvalues>
<viz:size value="90.28569"/>
<viz:position x="-327.48193" y="-7363.925"/>
<viz:color r="255" g="85" b="132"/>
</node>
<node id="231" label="231">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="4"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3750213565692807"/>
<attvalue for="harmonicclosnesscentrality" value="0.38936977980259535"/>
<attvalue for="betweenesscentrality" value="2.0002169794317156E-4"/>
<attvalue for="modularity_class" value="2"/>
<attvalue for="pageranks" value="2.450007258488749E-4"/>
</attvalues>
<viz:size value="48.24555"/>
<viz:position x="12656.2705" y="-5678.994"/>
<viz:color r="211" g="179" b="176"/>
</node>
<node id="232" label="232">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="5"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3751495470859682"/>
<attvalue for="harmonicclosnesscentrality" value="0.3896735003796644"/>
<attvalue for="betweenesscentrality" value="4.2982289333032796E-4"/>
<attvalue for="modularity_class" value="2"/>
<attvalue for="pageranks" value="3.438015725755311E-4"/>
</attvalues>
<viz:size value="52.857643"/>
<viz:position x="8803.297" y="-3392.4294"/>
<viz:color r="211" g="179" b="176"/>
</node>
<node id="233" label="233">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380266"/>
<attvalue for="betweenesscentrality" value="0.0016237295021968357"/>
<attvalue for="modularity_class" value="13"/>
<attvalue for="pageranks" value="5.813524827635348E-4"/>
</attvalues>
<viz:size value="63.946693"/>
<viz:position x="12989.266" y="-6585.7227"/>
<viz:color r="255" g="85" b="132"/>
</node>
<node id="234" label="234">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567336"/>
<attvalue for="betweenesscentrality" value="0.001807371440814919"/>
<attvalue for="modularity_class" value="2"/>
<attvalue for="pageranks" value="5.324675892031863E-4"/>
</attvalues>
<viz:size value="61.66471"/>
<viz:position x="10096.52" y="-6928.807"/>
<viz:color r="211" g="179" b="176"/>
</node>
<node id="235" label="235">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="4"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3748932536293766"/>
<attvalue for="harmonicclosnesscentrality" value="0.38906605922552634"/>
<attvalue for="betweenesscentrality" value="9.111617312072891E-4"/>
<attvalue for="modularity_class" value="7"/>
<attvalue for="pageranks" value="4.557573547843128E-4"/>
</attvalues>
<viz:size value="58.08382"/>
<viz:position x="233.14345" y="873.0856"/>
<viz:color r="255" g="136" b="5"/>
</node>
<node id="236" label="236">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="4"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3750213565692807"/>
<attvalue for="harmonicclosnesscentrality" value="0.38936977980259546"/>
<attvalue for="betweenesscentrality" value="0.0012020020326125225"/>
<attvalue for="modularity_class" value="1"/>
<attvalue for="pageranks" value="4.214766071018541E-4"/>
</attvalues>
<viz:size value="56.48357"/>
<viz:position x="4368.6914" y="2378.1702"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="237" label="237">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="4"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3750213565692807"/>
<attvalue for="harmonicclosnesscentrality" value="0.38936977980259546"/>
<attvalue for="betweenesscentrality" value="0.001016949769269413"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="3.3685658047764383E-4"/>
</attvalues>
<viz:size value="52.533447"/>
<viz:position x="2173.7207" y="-720.6352"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="238" label="238">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="4"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3750213565692807"/>
<attvalue for="harmonicclosnesscentrality" value="0.38936977980259546"/>
<attvalue for="betweenesscentrality" value="0.0010506487838329297"/>
<attvalue for="modularity_class" value="15"/>
<attvalue for="pageranks" value="3.442007206740942E-4"/>
</attvalues>
<viz:size value="52.876274"/>
<viz:position x="-3296.4941" y="4344.4487"/>
<viz:color r="76" g="70" b="62"/>
</node>
<node id="239" label="239">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="4"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3750213565692807"/>
<attvalue for="harmonicclosnesscentrality" value="0.38936977980259546"/>
<attvalue for="betweenesscentrality" value="0.001204131808893072"/>
<attvalue for="modularity_class" value="7"/>
<attvalue for="pageranks" value="3.9105754514116576E-4"/>
</attvalues>
<viz:size value="55.063587"/>
<viz:position x="6.5715117" y="31.896763"/>
<viz:color r="255" g="136" b="5"/>
</node>
<node id="240" label="240">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="4"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3750213565692807"/>
<attvalue for="harmonicclosnesscentrality" value="0.38936977980259546"/>
<attvalue for="betweenesscentrality" value="0.0011254801898901829"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="3.8118224566340106E-4"/>
</attvalues>
<viz:size value="54.6026"/>
<viz:position x="7620.652" y="3819.9937"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="241" label="241">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="4"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3750213565692807"/>
<attvalue for="harmonicclosnesscentrality" value="0.38936977980259546"/>
<attvalue for="betweenesscentrality" value="0.001018865387474067"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="3.4370474423426337E-4"/>
</attvalues>
<viz:size value="52.853123"/>
<viz:position x="1224.6522" y="-133.22652"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="242" label="242">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="4"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3750213565692807"/>
<attvalue for="harmonicclosnesscentrality" value="0.38936977980259546"/>
<attvalue for="betweenesscentrality" value="0.0011167505885898337"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="3.555086633730667E-4"/>
</attvalues>
<viz:size value="53.404137"/>
<viz:position x="641.1111" y="-375.79944"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="243" label="243">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567336"/>
<attvalue for="betweenesscentrality" value="0.0010556758440994846"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="4.696574604601706E-4"/>
</attvalues>
<viz:size value="58.73269"/>
<viz:position x="8434.682" y="6099.5874"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="244" label="244">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567336"/>
<attvalue for="betweenesscentrality" value="0.001290580366390088"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="4.8437327596727217E-4"/>
</attvalues>
<viz:size value="59.419632"/>
<viz:position x="8312.755" y="7663.592"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="245" label="245">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37553464499572287"/>
<attvalue for="harmonicclosnesscentrality" value="0.3905846621108717"/>
<attvalue for="betweenesscentrality" value="0.0016120013957881235"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="6.466086346784316E-4"/>
</attvalues>
<viz:size value="66.9929"/>
<viz:position x="9048.716" y="8482.14"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="246" label="246">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567336"/>
<attvalue for="betweenesscentrality" value="0.0014766120328597158"/>
<attvalue for="modularity_class" value="1"/>
<attvalue for="pageranks" value="5.877105454824476E-4"/>
</attvalues>
<viz:size value="64.24349"/>
<viz:position x="5331.2207" y="1820.9486"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="247" label="247">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="5"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3751495470859682"/>
<attvalue for="harmonicclosnesscentrality" value="0.38967350037966453"/>
<attvalue for="betweenesscentrality" value="0.0012425149469733484"/>
<attvalue for="modularity_class" value="1"/>
<attvalue for="pageranks" value="4.882183402687363E-4"/>
</attvalues>
<viz:size value="59.59912"/>
<viz:position x="4708.7188" y="2399.0366"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="248" label="248">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380266"/>
<attvalue for="betweenesscentrality" value="0.002397808189778452"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="7.549838945417023E-4"/>
</attvalues>
<viz:size value="72.05193"/>
<viz:position x="3901.4944" y="-9054.152"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="249" label="249">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380266"/>
<attvalue for="betweenesscentrality" value="0.0017808029168820073"/>
<attvalue for="modularity_class" value="1"/>
<attvalue for="pageranks" value="7.090122430947769E-4"/>
</attvalues>
<viz:size value="69.905945"/>
<viz:position x="5300.4536" y="1978.8126"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="250" label="250">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380255"/>
<attvalue for="betweenesscentrality" value="4.852106235122473E-4"/>
<attvalue for="modularity_class" value="15"/>
<attvalue for="pageranks" value="4.6401061867403637E-4"/>
</attvalues>
<viz:size value="58.46909"/>
<viz:position x="-4595.4375" y="6416.5977"/>
<viz:color r="76" g="70" b="62"/>
</node>
<node id="251" label="251">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3751495470859682"/>
<attvalue for="harmonicclosnesscentrality" value="0.3896735003796644"/>
<attvalue for="betweenesscentrality" value="2.2188209614038988E-4"/>
<attvalue for="modularity_class" value="15"/>
<attvalue for="pageranks" value="3.091456064626114E-4"/>
</attvalues>
<viz:size value="51.239876"/>
<viz:position x="-4503.3047" y="6522.7515"/>
<viz:color r="76" g="70" b="62"/>
</node>
<node id="252" label="252">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="10"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37579181646978255"/>
<attvalue for="harmonicclosnesscentrality" value="0.3911921032650099"/>
<attvalue for="betweenesscentrality" value="0.0015211012393427361"/>
<attvalue for="modularity_class" value="15"/>
<attvalue for="pageranks" value="7.453605796514304E-4"/>
</attvalues>
<viz:size value="71.60271"/>
<viz:position x="-4854.8096" y="6753.6406"/>
<viz:color r="76" g="70" b="62"/>
</node>
<node id="253" label="253">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567335"/>
<attvalue for="betweenesscentrality" value="6.591508980779901E-4"/>
<attvalue for="modularity_class" value="7"/>
<attvalue for="pageranks" value="4.1252836244509244E-4"/>
</attvalues>
<viz:size value="56.065857"/>
<viz:position x="-3881.0537" y="6320.9614"/>
<viz:color r="255" g="136" b="5"/>
</node>
<node id="254" label="254">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="5"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3750213565692807"/>
<attvalue for="harmonicclosnesscentrality" value="0.38936977980259535"/>
<attvalue for="betweenesscentrality" value="3.1209715949528986E-4"/>
<attvalue for="modularity_class" value="7"/>
<attvalue for="pageranks" value="2.4062884403653795E-4"/>
</attvalues>
<viz:size value="48.041466"/>
<viz:position x="-3926.9775" y="6402.3315"/>
<viz:color r="255" g="136" b="5"/>
</node>
<node id="255" label="255">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567335"/>
<attvalue for="betweenesscentrality" value="5.67926216001609E-4"/>
<attvalue for="modularity_class" value="15"/>
<attvalue for="pageranks" value="4.1601128560888207E-4"/>
</attvalues>
<viz:size value="56.228447"/>
<viz:position x="-5247.4937" y="6733.806"/>
<viz:color r="76" g="70" b="62"/>
</node>
<node id="256" label="256">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="9"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3751495470859682"/>
<attvalue for="harmonicclosnesscentrality" value="0.38967350037966453"/>
<attvalue for="betweenesscentrality" value="0.0010700049744598536"/>
<attvalue for="modularity_class" value="15"/>
<attvalue for="pageranks" value="3.9007115520668043E-4"/>
</attvalues>
<viz:size value="55.01754"/>
<viz:position x="-4495.131" y="5354.284"/>
<viz:color r="76" g="70" b="62"/>
</node>
<node id="257" label="257">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567336"/>
<attvalue for="betweenesscentrality" value="0.0012256532574359458"/>
<attvalue for="modularity_class" value="15"/>
<attvalue for="pageranks" value="4.472261596675535E-4"/>
</attvalues>
<viz:size value="57.685577"/>
<viz:position x="-5506.1694" y="7923.1865"/>
<viz:color r="76" g="70" b="62"/>
</node>
<node id="258" label="258">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567336"/>
<attvalue for="betweenesscentrality" value="0.0014099378454158506"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="4.78332753905871E-4"/>
</attvalues>
<viz:size value="59.137657"/>
<viz:position x="-11970.63" y="-2628.7437"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="259" label="259">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="13"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37604934041459653"/>
<attvalue for="harmonicclosnesscentrality" value="0.39179954441914805"/>
<attvalue for="betweenesscentrality" value="0.0018391138171208203"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="8.742412045170603E-4"/>
</attvalues>
<viz:size value="77.61895"/>
<viz:position x="-14778.239" y="-1943.007"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="260" label="260">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567336"/>
<attvalue for="betweenesscentrality" value="0.001469867315728437"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="5.433921470822671E-4"/>
</attvalues>
<viz:size value="62.174675"/>
<viz:position x="-8655.241" y="-716.40845"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="261" label="261">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="11"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3759205343380716"/>
<attvalue for="harmonicclosnesscentrality" value="0.391495823842079"/>
<attvalue for="betweenesscentrality" value="0.0016918854330959408"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="7.987783780362395E-4"/>
</attvalues>
<viz:size value="74.09629"/>
<viz:position x="-14596.621" y="-2130.9338"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="262" label="262">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567336"/>
<attvalue for="betweenesscentrality" value="0.0015275886961376949"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="5.488175903687065E-4"/>
</attvalues>
<viz:size value="62.42794"/>
<viz:position x="7982.288" y="3956.96"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="263" label="263">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3751495470859682"/>
<attvalue for="harmonicclosnesscentrality" value="0.38967350037966453"/>
<attvalue for="betweenesscentrality" value="0.0013738656226540509"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="4.533556303590393E-4"/>
</attvalues>
<viz:size value="57.971706"/>
<viz:position x="7876.479" y="4148.9443"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="264" label="264">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567336"/>
<attvalue for="betweenesscentrality" value="0.001202544023107358"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="5.150185583075184E-4"/>
</attvalues>
<viz:size value="60.850174"/>
<viz:position x="8813.525" y="4647.0796"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="265" label="265">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567336"/>
<attvalue for="betweenesscentrality" value="0.0011558487350917571"/>
<attvalue for="modularity_class" value="16"/>
<attvalue for="pageranks" value="5.190338663576192E-4"/>
</attvalues>
<viz:size value="61.037613"/>
<viz:position x="8093.0903" y="2806.6262"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="266" label="266">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="11"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3756631867191511"/>
<attvalue for="harmonicclosnesscentrality" value="0.39088838268794085"/>
<attvalue for="betweenesscentrality" value="0.0025805440356945135"/>
<attvalue for="modularity_class" value="5"/>
<attvalue for="pageranks" value="7.876344001391574E-4"/>
</attvalues>
<viz:size value="73.57608"/>
<viz:position x="-3867.4636" y="-3893.692"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="267" label="267">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="9"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3756631867191511"/>
<attvalue for="harmonicclosnesscentrality" value="0.39088838268794085"/>
<attvalue for="betweenesscentrality" value="0.0020130485768975206"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="7.15622861403457E-4"/>
</attvalues>
<viz:size value="70.21453"/>
<viz:position x="2494.201" y="-3709.3508"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="268" label="268">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37553464499572287"/>
<attvalue for="harmonicclosnesscentrality" value="0.3905846621108717"/>
<attvalue for="betweenesscentrality" value="0.0013193322170688023"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="6.156786149867846E-4"/>
</attvalues>
<viz:size value="65.54906"/>
<viz:position x="941.1201" y="-1131.0034"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="269" label="269">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380266"/>
<attvalue for="betweenesscentrality" value="0.0014054528316565223"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="5.482071424864716E-4"/>
</attvalues>
<viz:size value="62.399445"/>
<viz:position x="3228.3552" y="-3029.1775"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="270" label="270">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567336"/>
<attvalue for="betweenesscentrality" value="0.0010591436999039605"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="4.658454765995286E-4"/>
</attvalues>
<viz:size value="58.55474"/>
<viz:position x="7869.414" y="5641.1133"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="271" label="271">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="4"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3750213565692807"/>
<attvalue for="harmonicclosnesscentrality" value="0.38936977980259546"/>
<attvalue for="betweenesscentrality" value="0.0012089040797590106"/>
<attvalue for="modularity_class" value="18"/>
<attvalue for="pageranks" value="3.8853759212674146E-4"/>
</attvalues>
<viz:size value="54.945953"/>
<viz:position x="-4019.6917" y="1582.6027"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="272" label="272">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567336"/>
<attvalue for="betweenesscentrality" value="0.0012525766658067566"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="4.8045203505930797E-4"/>
</attvalues>
<viz:size value="59.236588"/>
<viz:position x="-11062.33" y="-217.1459"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="273" label="273">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="9"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567335"/>
<attvalue for="betweenesscentrality" value="9.415188368043562E-4"/>
<attvalue for="modularity_class" value="12"/>
<attvalue for="pageranks" value="4.5603156570460656E-4"/>
</attvalues>
<viz:size value="58.096622"/>
<viz:position x="-13246.312" y="-3215.0056"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="274" label="274">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="9"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3750213565692807"/>
<attvalue for="harmonicclosnesscentrality" value="0.38936977980259535"/>
<attvalue for="betweenesscentrality" value="4.588514306795334E-4"/>
<attvalue for="modularity_class" value="12"/>
<attvalue for="pageranks" value="2.6610183327714805E-4"/>
</attvalues>
<viz:size value="49.230564"/>
<viz:position x="-11134.165" y="-1993.9062"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="275" label="275">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567336"/>
<attvalue for="betweenesscentrality" value="0.0010305940359770882"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="4.5434617146033313E-4"/>
</attvalues>
<viz:size value="58.017944"/>
<viz:position x="-11494.369" y="-1375.0554"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="276" label="276">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37553464499572287"/>
<attvalue for="harmonicclosnesscentrality" value="0.39058466211087167"/>
<attvalue for="betweenesscentrality" value="0.0015164901714502371"/>
<attvalue for="modularity_class" value="8"/>
<attvalue for="pageranks" value="6.355895998233066E-4"/>
</attvalues>
<viz:size value="66.478516"/>
<viz:position x="-15752.125" y="-10414.484"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="277" label="277">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567336"/>
<attvalue for="betweenesscentrality" value="0.0010775974870660206"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="4.5826292321958604E-4"/>
</attvalues>
<viz:size value="58.200783"/>
<viz:position x="-11152.921" y="-580.1774"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="278" label="278">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567335"/>
<attvalue for="betweenesscentrality" value="9.084917651232095E-4"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="4.616276199375822E-4"/>
</attvalues>
<viz:size value="58.35785"/>
<viz:position x="-10879.818" y="-5213.725"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="279" label="279">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567336"/>
<attvalue for="betweenesscentrality" value="0.0010639324553200602"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="4.6010571573100234E-4"/>
</attvalues>
<viz:size value="58.286804"/>
<viz:position x="-9876.439" y="-1786.9999"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="280" label="280">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="13"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3759205343380716"/>
<attvalue for="harmonicclosnesscentrality" value="0.391495823842079"/>
<attvalue for="betweenesscentrality" value="0.002300809331056021"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="8.408832501750792E-4"/>
</attvalues>
<viz:size value="76.061775"/>
<viz:position x="-16943.902" y="107.03941"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="281" label="281">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="19"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37617823479005996"/>
<attvalue for="harmonicclosnesscentrality" value="0.39210326499621717"/>
<attvalue for="betweenesscentrality" value="0.0023483676067156825"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="9.659755551914854E-4"/>
</attvalues>
<viz:size value="81.90118"/>
<viz:position x="-18210.676" y="1075.8447"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="282" label="282">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567336"/>
<attvalue for="betweenesscentrality" value="0.0010639324553200602"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="4.6010571573100234E-4"/>
</attvalues>
<viz:size value="58.286804"/>
<viz:position x="-9976.201" y="-1685.1439"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="283" label="283">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="5"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3751495470859682"/>
<attvalue for="harmonicclosnesscentrality" value="0.38967350037966453"/>
<attvalue for="betweenesscentrality" value="0.001030452582023593"/>
<attvalue for="modularity_class" value="4"/>
<attvalue for="pageranks" value="4.065193604751191E-4"/>
</attvalues>
<viz:size value="55.785355"/>
<viz:position x="1541.5638" y="7488.205"/>
<viz:color r="0" g="189" b="148"/>
</node>
<node id="284" label="284">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="3"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3747652381765409"/>
<attvalue for="harmonicclosnesscentrality" value="0.38876233864845716"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="7"/>
<attvalue for="pageranks" value="6.836360321764694E-5"/>
</attvalues>
<viz:size value="40.0"/>
<viz:position x="1011.7936" y="1284.3994"/>
<viz:color r="255" g="136" b="5"/>
</node>
<node id="285" label="285">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3751495470859682"/>
<attvalue for="harmonicclosnesscentrality" value="0.38967350037966453"/>
<attvalue for="betweenesscentrality" value="0.0011529474175428235"/>
<attvalue for="modularity_class" value="4"/>
<attvalue for="pageranks" value="4.3848328570884654E-4"/>
</attvalues>
<viz:size value="57.27745"/>
<viz:position x="329.99173" y="9468.639"/>
<viz:color r="0" g="189" b="148"/>
</node>
<node id="286" label="286">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3750213565692807"/>
<attvalue for="harmonicclosnesscentrality" value="0.38936977980259546"/>
<attvalue for="betweenesscentrality" value="9.272131354527863E-4"/>
<attvalue for="modularity_class" value="4"/>
<attvalue for="pageranks" value="3.470255991667876E-4"/>
</attvalues>
<viz:size value="53.008144"/>
<viz:position x="352.8563" y="8481.348"/>
<viz:color r="0" g="189" b="148"/>
</node>
<node id="287" label="287">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567336"/>
<attvalue for="betweenesscentrality" value="0.0018369213269794989"/>
<attvalue for="modularity_class" value="4"/>
<attvalue for="pageranks" value="6.106543805663347E-4"/>
</attvalues>
<viz:size value="65.31453"/>
<viz:position x="-23.365858" y="8867.739"/>
<viz:color r="0" g="189" b="148"/>
</node>
<node id="288" label="288">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3751495470859682"/>
<attvalue for="harmonicclosnesscentrality" value="0.38967350037966453"/>
<attvalue for="betweenesscentrality" value="9.82464542845526E-4"/>
<attvalue for="modularity_class" value="2"/>
<attvalue for="pageranks" value="3.819638146579632E-4"/>
</attvalues>
<viz:size value="54.639084"/>
<viz:position x="13657.214" y="-6970.203"/>
<viz:color r="211" g="179" b="176"/>
</node>
<node id="289" label="289">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="14"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3759205343380716"/>
<attvalue for="harmonicclosnesscentrality" value="0.391495823842079"/>
<attvalue for="betweenesscentrality" value="0.0033218446137034197"/>
<attvalue for="modularity_class" value="10"/>
<attvalue for="pageranks" value="9.947890410594634E-4"/>
</attvalues>
<viz:size value="83.24621"/>
<viz:position x="-13782.586" y="-3273.8464"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="290" label="290">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380266"/>
<attvalue for="betweenesscentrality" value="0.0013613209930676325"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="5.394238078205376E-4"/>
</attvalues>
<viz:size value="61.98943"/>
<viz:position x="-11866.156" y="-3342.5046"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="291" label="291">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="13"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3751495470859682"/>
<attvalue for="harmonicclosnesscentrality" value="0.38967350037966453"/>
<attvalue for="betweenesscentrality" value="0.0013798399542959052"/>
<attvalue for="modularity_class" value="4"/>
<attvalue for="pageranks" value="4.599732866413553E-4"/>
</attvalues>
<viz:size value="58.280624"/>
<viz:position x="788.3932" y="8365.049"/>
<viz:color r="0" g="189" b="148"/>
</node>
<node id="292" label="292">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380266"/>
<attvalue for="betweenesscentrality" value="0.0019652389542512802"/>
<attvalue for="modularity_class" value="4"/>
<attvalue for="pageranks" value="6.694874539773642E-4"/>
</attvalues>
<viz:size value="68.0609"/>
<viz:position x="44.648045" y="10866.889"/>
<viz:color r="0" g="189" b="148"/>
</node>
<node id="293" label="293">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="14"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567336"/>
<attvalue for="betweenesscentrality" value="0.001295092766632346"/>
<attvalue for="modularity_class" value="7"/>
<attvalue for="pageranks" value="4.7922478694601274E-4"/>
</attvalues>
<viz:size value="59.1793"/>
<viz:position x="-931.2794" y="-9226.958"/>
<viz:color r="255" g="136" b="5"/>
</node>
<node id="294" label="294">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3751495470859682"/>
<attvalue for="harmonicclosnesscentrality" value="0.38967350037966453"/>
<attvalue for="betweenesscentrality" value="0.0011086991044462913"/>
<attvalue for="modularity_class" value="7"/>
<attvalue for="pageranks" value="4.428681477061195E-4"/>
</attvalues>
<viz:size value="57.482143"/>
<viz:position x="131.25195" y="-9791.227"/>
<viz:color r="255" g="136" b="5"/>
</node>
<node id="295" label="295">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="9"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3756631867191511"/>
<attvalue for="harmonicclosnesscentrality" value="0.39088838268794085"/>
<attvalue for="betweenesscentrality" value="0.0018352097153471888"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="7.728969278522314E-4"/>
</attvalues>
<viz:size value="72.88812"/>
<viz:position x="8930.74" y="5844.5923"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="296" label="296">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380266"/>
<attvalue for="betweenesscentrality" value="0.0010890590297173092"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="5.34254303576466E-4"/>
</attvalues>
<viz:size value="61.748116"/>
<viz:position x="9492.951" y="7829.129"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="297" label="297">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380266"/>
<attvalue for="betweenesscentrality" value="0.0014677701061537039"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="5.590292930668185E-4"/>
</attvalues>
<viz:size value="62.904625"/>
<viz:position x="7400.4087" y="5291.5884"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="298" label="298">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380266"/>
<attvalue for="betweenesscentrality" value="0.0010855008803910956"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="5.406494755687256E-4"/>
</attvalues>
<viz:size value="62.046646"/>
<viz:position x="8791.625" y="7423.187"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="299" label="299">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380266"/>
<attvalue for="betweenesscentrality" value="0.0014547147913001478"/>
<attvalue for="modularity_class" value="0"/>
<attvalue for="pageranks" value="5.950357532252758E-4"/>
</attvalues>
<viz:size value="64.585434"/>
<viz:position x="8146.031" y="8981.348"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="300" label="300">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567336"/>
<attvalue for="betweenesscentrality" value="0.0010525398572108224"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="4.7006993844513317E-4"/>
</attvalues>
<viz:size value="58.751945"/>
<viz:position x="7330.61" y="7651.6343"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="301" label="301">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="31"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37579181646978255"/>
<attvalue for="harmonicclosnesscentrality" value="0.3911921032650099"/>
<attvalue for="betweenesscentrality" value="0.002889803946177433"/>
<attvalue for="modularity_class" value="16"/>
<attvalue for="pageranks" value="9.853234009538111E-4"/>
</attvalues>
<viz:size value="82.80435"/>
<viz:position x="10306.512" y="4166.523"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="302" label="302">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3750213565692807"/>
<attvalue for="harmonicclosnesscentrality" value="0.38936977980259546"/>
<attvalue for="betweenesscentrality" value="9.199616648620028E-4"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="3.3028362663512927E-4"/>
</attvalues>
<viz:size value="52.226616"/>
<viz:position x="6012.17" y="4836.339"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="303" label="303">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="5"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3751495470859682"/>
<attvalue for="harmonicclosnesscentrality" value="0.3896735003796644"/>
<attvalue for="betweenesscentrality" value="4.702825544314429E-4"/>
<attvalue for="modularity_class" value="4"/>
<attvalue for="pageranks" value="3.7830509806603804E-4"/>
</attvalues>
<viz:size value="54.468292"/>
<viz:position x="759.81744" y="9175.374"/>
<viz:color r="0" g="189" b="148"/>
</node>
<node id="304" label="304">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="5"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3751495470859682"/>
<attvalue for="harmonicclosnesscentrality" value="0.3896735003796644"/>
<attvalue for="betweenesscentrality" value="4.702825544314429E-4"/>
<attvalue for="modularity_class" value="4"/>
<attvalue for="pageranks" value="3.7830509806603804E-4"/>
</attvalues>
<viz:size value="54.468292"/>
<viz:position x="726.1414" y="9128.702"/>
<viz:color r="0" g="189" b="148"/>
</node>
<node id="305" label="305">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="13"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37604934041459653"/>
<attvalue for="harmonicclosnesscentrality" value="0.39179954441914805"/>
<attvalue for="betweenesscentrality" value="0.0027477684271656937"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="9.654349522059697E-4"/>
</attvalues>
<viz:size value="81.87594"/>
<viz:position x="7038.36" y="2903.842"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="306" label="306">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380266"/>
<attvalue for="betweenesscentrality" value="0.002117437629993245"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="7.173890528074156E-4"/>
</attvalues>
<viz:size value="70.296974"/>
<viz:position x="3615.7642" y="-8408.028"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="307" label="307">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="5"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3751495470859682"/>
<attvalue for="harmonicclosnesscentrality" value="0.38967350037966453"/>
<attvalue for="betweenesscentrality" value="0.0012055281990502452"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="4.4888299963985955E-4"/>
</attvalues>
<viz:size value="57.76292"/>
<viz:position x="3357.521" y="-7554.638"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="308" label="308">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="16"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3765654486189741"/>
<attvalue for="harmonicclosnesscentrality" value="0.39301442672742426"/>
<attvalue for="betweenesscentrality" value="0.004561826147448433"/>
<attvalue for="modularity_class" value="16"/>
<attvalue for="pageranks" value="0.0014073602625421073"/>
</attvalues>
<viz:size value="102.50534"/>
<viz:position x="9742.444" y="-19894.71"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="309" label="309">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380266"/>
<attvalue for="betweenesscentrality" value="0.0016206145468603056"/>
<attvalue for="modularity_class" value="7"/>
<attvalue for="pageranks" value="5.847471282724937E-4"/>
</attvalues>
<viz:size value="64.105156"/>
<viz:position x="-2702.395" y="-13755.224"/>
<viz:color r="255" g="136" b="5"/>
</node>
<node id="310" label="310">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567335"/>
<attvalue for="betweenesscentrality" value="4.040024800603781E-4"/>
<attvalue for="modularity_class" value="5"/>
<attvalue for="pageranks" value="4.042515818715612E-4"/>
</attvalues>
<viz:size value="55.679493"/>
<viz:position x="-3199.8862" y="-3465.9421"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="311" label="311">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380266"/>
<attvalue for="betweenesscentrality" value="0.001357339563712907"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="5.459273157959865E-4"/>
</attvalues>
<viz:size value="62.29302"/>
<viz:position x="2291.3572" y="-1431.6246"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="312" label="312">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37553464499572287"/>
<attvalue for="harmonicclosnesscentrality" value="0.3905846621108717"/>
<attvalue for="betweenesscentrality" value="0.0012324638447383117"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="6.036795128804605E-4"/>
</attvalues>
<viz:size value="64.98894"/>
<viz:position x="1114.1401" y="-626.977"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="313" label="313">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567336"/>
<attvalue for="betweenesscentrality" value="0.001132024152986263"/>
<attvalue for="modularity_class" value="15"/>
<attvalue for="pageranks" value="4.4348847949837923E-4"/>
</attvalues>
<viz:size value="57.5111"/>
<viz:position x="-5651.1055" y="7468.586"/>
<viz:color r="76" g="70" b="62"/>
</node>
<node id="314" label="314">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380266"/>
<attvalue for="betweenesscentrality" value="0.0013240818511696768"/>
<attvalue for="modularity_class" value="15"/>
<attvalue for="pageranks" value="5.162966160191184E-4"/>
</attvalues>
<viz:size value="60.909836"/>
<viz:position x="-5373.8687" y="7945.1567"/>
<viz:color r="76" g="70" b="62"/>
</node>
<node id="315" label="315">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3751495470859682"/>
<attvalue for="harmonicclosnesscentrality" value="0.38967350037966453"/>
<attvalue for="betweenesscentrality" value="9.830882975517684E-4"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="3.7733215768137195E-4"/>
</attvalues>
<viz:size value="54.422874"/>
<viz:position x="-11678.428" y="-1982.8386"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="316" label="316">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="20"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37553464499572287"/>
<attvalue for="harmonicclosnesscentrality" value="0.3905846621108717"/>
<attvalue for="betweenesscentrality" value="0.0023819778165657893"/>
<attvalue for="modularity_class" value="1"/>
<attvalue for="pageranks" value="7.874633984504708E-4"/>
</attvalues>
<viz:size value="73.5681"/>
<viz:position x="6569.2456" y="2615.8833"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="317" label="317">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="17"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3763072175552889"/>
<attvalue for="harmonicclosnesscentrality" value="0.39240698557328624"/>
<attvalue for="betweenesscentrality" value="0.0029854734676764178"/>
<attvalue for="modularity_class" value="13"/>
<attvalue for="pageranks" value="0.0011489458195225969"/>
</attvalues>
<viz:size value="90.44237"/>
<viz:position x="1154.4967" y="-6977.2773"/>
<viz:color r="255" g="85" b="132"/>
</node>
<node id="318" label="318">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380266"/>
<attvalue for="betweenesscentrality" value="0.0010855008803910958"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="5.406494755687256E-4"/>
</attvalues>
<viz:size value="62.046646"/>
<viz:position x="8661.83" y="7311.2183"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="319" label="319">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="25"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567336"/>
<attvalue for="betweenesscentrality" value="0.00167825306246158"/>
<attvalue for="modularity_class" value="4"/>
<attvalue for="pageranks" value="5.675284015038552E-4"/>
</attvalues>
<viz:size value="63.301376"/>
<viz:position x="67.6029" y="-2803.3582"/>
<viz:color r="0" g="189" b="148"/>
</node>
<node id="320" label="320">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567336"/>
<attvalue for="betweenesscentrality" value="0.0016011698401902606"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="5.507402248057325E-4"/>
</attvalues>
<viz:size value="62.517693"/>
<viz:position x="8780.999" y="4536.7144"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="321" label="321">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380266"/>
<attvalue for="betweenesscentrality" value="0.001569812836624929"/>
<attvalue for="modularity_class" value="0"/>
<attvalue for="pageranks" value="5.771642854612632E-4"/>
</attvalues>
<viz:size value="63.751183"/>
<viz:position x="9313.905" y="11483.396"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="322" label="322">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380266"/>
<attvalue for="betweenesscentrality" value="0.001355518726350868"/>
<attvalue for="modularity_class" value="0"/>
<attvalue for="pageranks" value="5.878703800472966E-4"/>
</attvalues>
<viz:size value="64.25095"/>
<viz:position x="11086.405" y="6818.352"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="323" label="323">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3751495470859682"/>
<attvalue for="harmonicclosnesscentrality" value="0.38967350037966453"/>
<attvalue for="betweenesscentrality" value="0.0011259089429348722"/>
<attvalue for="modularity_class" value="16"/>
<attvalue for="pageranks" value="3.977110062760829E-4"/>
</attvalues>
<viz:size value="55.374176"/>
<viz:position x="8461.552" y="-17446.438"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="324" label="324">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37553464499572287"/>
<attvalue for="harmonicclosnesscentrality" value="0.3905846621108717"/>
<attvalue for="betweenesscentrality" value="0.0015183329558768183"/>
<attvalue for="modularity_class" value="2"/>
<attvalue for="pageranks" value="6.088512723996551E-4"/>
</attvalues>
<viz:size value="65.230354"/>
<viz:position x="1656.2998" y="-241.25377"/>
<viz:color r="211" g="179" b="176"/>
</node>
<node id="325" label="325">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380266"/>
<attvalue for="betweenesscentrality" value="0.001130203158014188"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="5.213336324222247E-4"/>
</attvalues>
<viz:size value="61.144966"/>
<viz:position x="1892.5985" y="-452.25558"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="326" label="326">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380266"/>
<attvalue for="betweenesscentrality" value="0.001340154994979057"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="5.357931137162683E-4"/>
</attvalues>
<viz:size value="61.819946"/>
<viz:position x="285.23105" y="-767.12396"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="327" label="327">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380266"/>
<attvalue for="betweenesscentrality" value="0.001340154994979057"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="5.357931137162683E-4"/>
</attvalues>
<viz:size value="61.819946"/>
<viz:position x="305.35934" y="-891.0921"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="328" label="328">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="18"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3763072175552889"/>
<attvalue for="harmonicclosnesscentrality" value="0.39240698557328624"/>
<attvalue for="betweenesscentrality" value="0.0025951299335564984"/>
<attvalue for="modularity_class" value="5"/>
<attvalue for="pageranks" value="0.0010798604207099662"/>
</attvalues>
<viz:size value="87.21741"/>
<viz:position x="-2660.8245" y="-4570.7983"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="329" label="329">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="12"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380266"/>
<attvalue for="betweenesscentrality" value="0.0010806765204181541"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="5.305870855037856E-4"/>
</attvalues>
<viz:size value="61.576927"/>
<viz:position x="7066.1006" y="7411.9263"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="330" label="330">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567336"/>
<attvalue for="betweenesscentrality" value="0.0010470693926904922"/>
<attvalue for="modularity_class" value="7"/>
<attvalue for="pageranks" value="4.683209792907197E-4"/>
</attvalues>
<viz:size value="58.6703"/>
<viz:position x="-1349.3593" y="-14077.081"/>
<viz:color r="255" g="136" b="5"/>
</node>
<node id="331" label="331">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380266"/>
<attvalue for="betweenesscentrality" value="0.0011768068915923879"/>
<attvalue for="modularity_class" value="7"/>
<attvalue for="pageranks" value="5.57426076130552E-4"/>
</attvalues>
<viz:size value="62.82979"/>
<viz:position x="-1050.1226" y="-13838.082"/>
<viz:color r="255" g="136" b="5"/>
</node>
<node id="332" label="332">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="34"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37579181646978255"/>
<attvalue for="harmonicclosnesscentrality" value="0.3911921032650099"/>
<attvalue for="betweenesscentrality" value="0.002384665078921955"/>
<attvalue for="modularity_class" value="7"/>
<attvalue for="pageranks" value="8.575704883957125E-4"/>
</attvalues>
<viz:size value="76.840744"/>
<viz:position x="-2023.5039" y="-15047.903"/>
<viz:color r="255" g="136" b="5"/>
</node>
<node id="333" label="333">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567336"/>
<attvalue for="betweenesscentrality" value="0.0013029986393091387"/>
<attvalue for="modularity_class" value="7"/>
<attvalue for="pageranks" value="5.177734103683444E-4"/>
</attvalues>
<viz:size value="60.978775"/>
<viz:position x="-1855.0641" y="-13142.126"/>
<viz:color r="255" g="136" b="5"/>
</node>
<node id="334" label="334">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567336"/>
<attvalue for="betweenesscentrality" value="0.0013029986393091387"/>
<attvalue for="modularity_class" value="7"/>
<attvalue for="pageranks" value="5.177734103683444E-4"/>
</attvalues>
<viz:size value="60.978775"/>
<viz:position x="-1871.0739" y="-13313.69"/>
<viz:color r="255" g="136" b="5"/>
</node>
<node id="335" label="335">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380266"/>
<attvalue for="betweenesscentrality" value="0.002397808189778458"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="7.549838945417023E-4"/>
</attvalues>
<viz:size value="72.05193"/>
<viz:position x="3829.155" y="-9072.834"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="336" label="336">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="15"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3756631867191511"/>
<attvalue for="harmonicclosnesscentrality" value="0.39088838268794085"/>
<attvalue for="betweenesscentrality" value="0.0019208457360239282"/>
<attvalue for="modularity_class" value="13"/>
<attvalue for="pageranks" value="6.738317521248244E-4"/>
</attvalues>
<viz:size value="68.26369"/>
<viz:position x="-1454.5594" y="-5998.911"/>
<viz:color r="255" g="85" b="132"/>
</node>
<node id="337" label="337">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="28"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37617823479005996"/>
<attvalue for="harmonicclosnesscentrality" value="0.39210326499621717"/>
<attvalue for="betweenesscentrality" value="0.0026616283260182073"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="0.001026612353723505"/>
</attvalues>
<viz:size value="84.73174"/>
<viz:position x="-14383.635" y="-1824.4827"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="338" label="338">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380266"/>
<attvalue for="betweenesscentrality" value="0.001432330803675976"/>
<attvalue for="modularity_class" value="16"/>
<attvalue for="pageranks" value="6.095865986933067E-4"/>
</attvalues>
<viz:size value="65.26468"/>
<viz:position x="9454.231" y="4362.3105"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="339" label="339">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="13"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380266"/>
<attvalue for="betweenesscentrality" value="0.001599640691668404"/>
<attvalue for="modularity_class" value="3"/>
<attvalue for="pageranks" value="6.332085699050108E-4"/>
</attvalues>
<viz:size value="66.36737"/>
<viz:position x="4058.641" y="-1172.5708"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="340" label="340">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="4"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3750213565692807"/>
<attvalue for="harmonicclosnesscentrality" value="0.38936977980259535"/>
<attvalue for="betweenesscentrality" value="1.7712220133421865E-4"/>
<attvalue for="modularity_class" value="2"/>
<attvalue for="pageranks" value="2.3747054130607452E-4"/>
</attvalues>
<viz:size value="47.894035"/>
<viz:position x="13615.543" y="-7241.175"/>
<viz:color r="211" g="179" b="176"/>
</node>
<node id="341" label="341">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="11"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37579181646978255"/>
<attvalue for="harmonicclosnesscentrality" value="0.3911921032650098"/>
<attvalue for="betweenesscentrality" value="0.0022888838863477324"/>
<attvalue for="modularity_class" value="2"/>
<attvalue for="pageranks" value="8.297979129415735E-4"/>
</attvalues>
<viz:size value="75.544304"/>
<viz:position x="15503.969" y="-7637.5005"/>
<viz:color r="211" g="179" b="176"/>
</node>
<node id="342" label="342">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="14"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37617823479005996"/>
<attvalue for="harmonicclosnesscentrality" value="0.39210326499621717"/>
<attvalue for="betweenesscentrality" value="0.005028275649773475"/>
<attvalue for="modularity_class" value="2"/>
<attvalue for="pageranks" value="0.0013731649391139538"/>
</attvalues>
<viz:size value="100.90907"/>
<viz:position x="23677.334" y="-4248.5054"/>
<viz:color r="211" g="179" b="176"/>
</node>
<node id="343" label="343">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="34"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3751495470859682"/>
<attvalue for="harmonicclosnesscentrality" value="0.38967350037966453"/>
<attvalue for="betweenesscentrality" value="0.001391082704236136"/>
<attvalue for="modularity_class" value="2"/>
<attvalue for="pageranks" value="4.3854517776760417E-4"/>
</attvalues>
<viz:size value="57.280342"/>
<viz:position x="13344.725" y="-6783.982"/>
<viz:color r="211" g="179" b="176"/>
</node>
<node id="344" label="344">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567336"/>
<attvalue for="betweenesscentrality" value="0.0013409846245376368"/>
<attvalue for="modularity_class" value="2"/>
<attvalue for="pageranks" value="4.904989990651082E-4"/>
</attvalues>
<viz:size value="59.705585"/>
<viz:position x="9166.337" y="-3549.7437"/>
<viz:color r="211" g="179" b="176"/>
</node>
<node id="345" label="345">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3751495470859682"/>
<attvalue for="harmonicclosnesscentrality" value="0.38967350037966453"/>
<attvalue for="betweenesscentrality" value="0.0014331356916440217"/>
<attvalue for="modularity_class" value="17"/>
<attvalue for="pageranks" value="5.072190821779066E-4"/>
</attvalues>
<viz:size value="60.48609"/>
<viz:position x="3286.7385" y="12871.312"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="346" label="346">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3750213565692807"/>
<attvalue for="harmonicclosnesscentrality" value="0.38936977980259546"/>
<attvalue for="betweenesscentrality" value="9.789163619620463E-4"/>
<attvalue for="modularity_class" value="17"/>
<attvalue for="pageranks" value="3.752650211672233E-4"/>
</attvalues>
<viz:size value="54.326378"/>
<viz:position x="3040.0122" y="11793.872"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="347" label="347">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567336"/>
<attvalue for="betweenesscentrality" value="0.0015465869850056627"/>
<attvalue for="modularity_class" value="11"/>
<attvalue for="pageranks" value="6.245697911044638E-4"/>
</attvalues>
<viz:size value="65.96411"/>
<viz:position x="8237.524" y="-3958.9087"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="348" label="348">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567336"/>
<attvalue for="betweenesscentrality" value="0.0015465869850056627"/>
<attvalue for="modularity_class" value="11"/>
<attvalue for="pageranks" value="6.245697911044638E-4"/>
</attvalues>
<viz:size value="65.96411"/>
<viz:position x="8222.976" y="-3844.0278"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="349" label="349">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380266"/>
<attvalue for="betweenesscentrality" value="0.0015076990141225186"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="5.63955551900543E-4"/>
</attvalues>
<viz:size value="63.13459"/>
<viz:position x="-10069.456" y="-1392.0696"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="350" label="350">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380266"/>
<attvalue for="betweenesscentrality" value="0.0015087322737334463"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="5.854649876415224E-4"/>
</attvalues>
<viz:size value="64.138664"/>
<viz:position x="7013.3677" y="5787.3296"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="351" label="351">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37553464499572287"/>
<attvalue for="harmonicclosnesscentrality" value="0.3905846621108717"/>
<attvalue for="betweenesscentrality" value="0.0011573441585868652"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="6.073097425637008E-4"/>
</attvalues>
<viz:size value="65.158394"/>
<viz:position x="9761.461" y="7894.5405"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="352" label="352">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567336"/>
<attvalue for="betweenesscentrality" value="0.0010775974870660225"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="4.5826292321958604E-4"/>
</attvalues>
<viz:size value="58.200783"/>
<viz:position x="-11264.017" y="-815.7952"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="353" label="353">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="4"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3750213565692807"/>
<attvalue for="harmonicclosnesscentrality" value="0.38936977980259546"/>
<attvalue for="betweenesscentrality" value="0.0012129724799177855"/>
<attvalue for="modularity_class" value="18"/>
<attvalue for="pageranks" value="4.281466339716092E-4"/>
</attvalues>
<viz:size value="56.79493"/>
<viz:position x="1106.0066" y="3313.874"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="354" label="354">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="4"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3750213565692807"/>
<attvalue for="harmonicclosnesscentrality" value="0.38936977980259546"/>
<attvalue for="betweenesscentrality" value="0.0010529761145502184"/>
<attvalue for="modularity_class" value="0"/>
<attvalue for="pageranks" value="3.5600438841832906E-4"/>
</attvalues>
<viz:size value="53.42728"/>
<viz:position x="9142.639" y="5226.059"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="355" label="355">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="4"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3750213565692807"/>
<attvalue for="harmonicclosnesscentrality" value="0.38936977980259546"/>
<attvalue for="betweenesscentrality" value="0.0010803822736724549"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="3.624034263960235E-4"/>
</attvalues>
<viz:size value="53.72599"/>
<viz:position x="5630.6333" y="4366.331"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="356" label="356">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="4"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3750213565692807"/>
<attvalue for="harmonicclosnesscentrality" value="0.38936977980259546"/>
<attvalue for="betweenesscentrality" value="0.0013652930235870248"/>
<attvalue for="modularity_class" value="18"/>
<attvalue for="pageranks" value="4.837865055351299E-4"/>
</attvalues>
<viz:size value="59.392242"/>
<viz:position x="-57.404854" y="2600.4421"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="357" label="357">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380266"/>
<attvalue for="betweenesscentrality" value="0.0013573395637129068"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="5.459273157959865E-4"/>
</attvalues>
<viz:size value="62.29302"/>
<viz:position x="2211.0068" y="-1352.1827"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="358" label="358">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380266"/>
<attvalue for="betweenesscentrality" value="0.0011862975769408048"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="5.343694080214025E-4"/>
</attvalues>
<viz:size value="61.753487"/>
<viz:position x="1372.101" y="-949.0959"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="359" label="359">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567336"/>
<attvalue for="betweenesscentrality" value="0.001171205811215105"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="4.526007633747595E-4"/>
</attvalues>
<viz:size value="57.93647"/>
<viz:position x="3604.4895" y="-2151.536"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="360" label="360">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="13"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37617823479005996"/>
<attvalue for="harmonicclosnesscentrality" value="0.39210326499621717"/>
<attvalue for="betweenesscentrality" value="0.0018819887786551551"/>
<attvalue for="modularity_class" value="15"/>
<attvalue for="pageranks" value="9.767405861555514E-4"/>
</attvalues>
<viz:size value="82.403694"/>
<viz:position x="-3332.5325" y="3212.5337"/>
<viz:color r="76" g="70" b="62"/>
</node>
<node id="361" label="361">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380266"/>
<attvalue for="betweenesscentrality" value="0.0018874618812808222"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="5.991618893106844E-4"/>
</attvalues>
<viz:size value="64.778046"/>
<viz:position x="4338.112" y="-3835.842"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="362" label="362">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380266"/>
<attvalue for="betweenesscentrality" value="0.0016677588340557575"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="5.839819822984998E-4"/>
</attvalues>
<viz:size value="64.06944"/>
<viz:position x="3923.8901" y="-2602.5823"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="363" label="363">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380266"/>
<attvalue for="betweenesscentrality" value="0.0013670330622648106"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="5.464563183353651E-4"/>
</attvalues>
<viz:size value="62.317715"/>
<viz:position x="3711.4524" y="-3042.164"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="364" label="364">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="10"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3756631867191511"/>
<attvalue for="harmonicclosnesscentrality" value="0.39088838268794085"/>
<attvalue for="betweenesscentrality" value="0.0015546839896917876"/>
<attvalue for="modularity_class" value="15"/>
<attvalue for="pageranks" value="6.632548689175455E-4"/>
</attvalues>
<viz:size value="67.76996"/>
<viz:position x="-4661.2104" y="7395.1875"/>
<viz:color r="76" g="70" b="62"/>
</node>
<node id="365" label="365">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380266"/>
<attvalue for="betweenesscentrality" value="0.001258273550046424"/>
<attvalue for="modularity_class" value="15"/>
<attvalue for="pageranks" value="5.125938627884633E-4"/>
</attvalues>
<viz:size value="60.73699"/>
<viz:position x="-5420.6665" y="7801.7974"/>
<viz:color r="76" g="70" b="62"/>
</node>
<node id="366" label="366">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="11"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3756631867191511"/>
<attvalue for="harmonicclosnesscentrality" value="0.39088838268794085"/>
<attvalue for="betweenesscentrality" value="0.0018887432385072585"/>
<attvalue for="modularity_class" value="3"/>
<attvalue for="pageranks" value="7.085550671935254E-4"/>
</attvalues>
<viz:size value="69.8846"/>
<viz:position x="-5276.573" y="5061.5786"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="367" label="367">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37553464499572287"/>
<attvalue for="harmonicclosnesscentrality" value="0.3905846621108717"/>
<attvalue for="betweenesscentrality" value="0.0013714065722258391"/>
<attvalue for="modularity_class" value="15"/>
<attvalue for="pageranks" value="6.060374941973124E-4"/>
</attvalues>
<viz:size value="65.09901"/>
<viz:position x="-5919.0957" y="7694.63"/>
<viz:color r="76" g="70" b="62"/>
</node>
<node id="368" label="368">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="19"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37643628880123475"/>
<attvalue for="harmonicclosnesscentrality" value="0.3927107061503553"/>
<attvalue for="betweenesscentrality" value="0.0030118198715471303"/>
<attvalue for="modularity_class" value="15"/>
<attvalue for="pageranks" value="0.0011691335108509945"/>
</attvalues>
<viz:size value="91.384735"/>
<viz:position x="-4330.9717" y="3552.306"/>
<viz:color r="76" g="70" b="62"/>
</node>
<node id="369" label="369">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567336"/>
<attvalue for="betweenesscentrality" value="0.0013049604119693566"/>
<attvalue for="modularity_class" value="4"/>
<attvalue for="pageranks" value="5.366496374667417E-4"/>
</attvalues>
<viz:size value="61.859932"/>
<viz:position x="213.6226" y="9717.397"/>
<viz:color r="0" g="189" b="148"/>
</node>
<node id="370" label="370">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380266"/>
<attvalue for="betweenesscentrality" value="0.0018364022056649818"/>
<attvalue for="modularity_class" value="4"/>
<attvalue for="pageranks" value="6.546456910763276E-4"/>
</attvalues>
<viz:size value="67.36807"/>
<viz:position x="866.6725" y="9255.622"/>
<viz:color r="0" g="189" b="148"/>
</node>
<node id="371" label="371">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380266"/>
<attvalue for="betweenesscentrality" value="0.0018364022056649818"/>
<attvalue for="modularity_class" value="4"/>
<attvalue for="pageranks" value="6.546456910763276E-4"/>
</attvalues>
<viz:size value="67.36807"/>
<viz:position x="791.5168" y="9302.929"/>
<viz:color r="0" g="189" b="148"/>
</node>
<node id="372" label="372">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="4"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3750213565692807"/>
<attvalue for="harmonicclosnesscentrality" value="0.38936977980259546"/>
<attvalue for="betweenesscentrality" value="0.0011245996814678927"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="3.798246134546625E-4"/>
</attvalues>
<viz:size value="54.539227"/>
<viz:position x="7404.765" y="4507.054"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="373" label="373">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="11"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37579181646978255"/>
<attvalue for="harmonicclosnesscentrality" value="0.3911921032650099"/>
<attvalue for="betweenesscentrality" value="0.002882568673661705"/>
<attvalue for="modularity_class" value="8"/>
<attvalue for="pageranks" value="9.045637523018808E-4"/>
</attvalues>
<viz:size value="79.034424"/>
<viz:position x="-16428.361" y="-11437.978"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="374" label="374">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380266"/>
<attvalue for="betweenesscentrality" value="0.0021396728775948167"/>
<attvalue for="modularity_class" value="4"/>
<attvalue for="pageranks" value="7.141158760617739E-4"/>
</attvalues>
<viz:size value="70.14418"/>
<viz:position x="-328.1191" y="9496.617"/>
<viz:color r="0" g="189" b="148"/>
</node>
<node id="375" label="375">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567336"/>
<attvalue for="betweenesscentrality" value="0.0016072801506014673"/>
<attvalue for="modularity_class" value="4"/>
<attvalue for="pageranks" value="5.627761589212577E-4"/>
</attvalues>
<viz:size value="63.079536"/>
<viz:position x="189.29083" y="10556.863"/>
<viz:color r="0" g="189" b="148"/>
</node>
<node id="376" label="376">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="13"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37617823479005996"/>
<attvalue for="harmonicclosnesscentrality" value="0.39210326499621717"/>
<attvalue for="betweenesscentrality" value="0.0026906932398329552"/>
<attvalue for="modularity_class" value="13"/>
<attvalue for="pageranks" value="0.0010118270365194813"/>
</attvalues>
<viz:size value="84.04155"/>
<viz:position x="-6739.4844" y="-795.5456"/>
<viz:color r="255" g="85" b="132"/>
</node>
<node id="377" label="377">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3751495470859682"/>
<attvalue for="harmonicclosnesscentrality" value="0.38967350037966453"/>
<attvalue for="betweenesscentrality" value="9.482027914179395E-4"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="3.746507809172782E-4"/>
</attvalues>
<viz:size value="54.297707"/>
<viz:position x="-10802.255" y="-833.7815"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="378" label="378">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567336"/>
<attvalue for="betweenesscentrality" value="0.0010775974870660234"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="4.58262923219586E-4"/>
</attvalues>
<viz:size value="58.200783"/>
<viz:position x="-11230.193" y="-580.5938"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="379" label="379">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567336"/>
<attvalue for="betweenesscentrality" value="0.0010775974870660234"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="4.58262923219586E-4"/>
</attvalues>
<viz:size value="58.200783"/>
<viz:position x="-11298.623" y="-616.7742"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="380" label="380">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="22"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3763072175552889"/>
<attvalue for="harmonicclosnesscentrality" value="0.39240698557328624"/>
<attvalue for="betweenesscentrality" value="0.002828404527801034"/>
<attvalue for="modularity_class" value="18"/>
<attvalue for="pageranks" value="0.0010742792862558394"/>
</attvalues>
<viz:size value="86.95687"/>
<viz:position x="-7999.0996" y="-4636.8965"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="381" label="381">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="36"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37747205503009457"/>
<attvalue for="harmonicclosnesscentrality" value="0.39514047076690795"/>
<attvalue for="betweenesscentrality" value="0.008381469104794845"/>
<attvalue for="modularity_class" value="12"/>
<attvalue for="pageranks" value="0.0022565197559967108"/>
</attvalues>
<viz:size value="142.14471"/>
<viz:position x="-23598.924" y="-6758.9653"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="382" label="382">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567336"/>
<attvalue for="betweenesscentrality" value="0.0010775974870660234"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="4.58262923219586E-4"/>
</attvalues>
<viz:size value="58.200783"/>
<viz:position x="-11122.842" y="-646.5045"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="383" label="383">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567336"/>
<attvalue for="betweenesscentrality" value="0.0013639184716252178"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="4.909807196777813E-4"/>
</attvalues>
<viz:size value="59.728073"/>
<viz:position x="-10931.639" y="-5324.3315"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="384" label="384">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="14"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37617823479005996"/>
<attvalue for="harmonicclosnesscentrality" value="0.39210326499621717"/>
<attvalue for="betweenesscentrality" value="0.0028723455763001752"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="9.997988946048447E-4"/>
</attvalues>
<viz:size value="83.48007"/>
<viz:position x="-18559.986" y="1808.6481"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="385" label="385">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="16"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37617823479005996"/>
<attvalue for="harmonicclosnesscentrality" value="0.39210326499621717"/>
<attvalue for="betweenesscentrality" value="0.0038235710791586406"/>
<attvalue for="modularity_class" value="12"/>
<attvalue for="pageranks" value="0.0011958194770206301"/>
</attvalues>
<viz:size value="92.630455"/>
<viz:position x="-22120.576" y="-6188.159"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="386" label="386">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="14"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37604934041459653"/>
<attvalue for="harmonicclosnesscentrality" value="0.39179954441914805"/>
<attvalue for="betweenesscentrality" value="0.0030431102872588173"/>
<attvalue for="modularity_class" value="10"/>
<attvalue for="pageranks" value="0.0010334598749352718"/>
</attvalues>
<viz:size value="85.05139"/>
<viz:position x="-13585.112" y="-3235.8691"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="387" label="387">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567336"/>
<attvalue for="betweenesscentrality" value="0.0010774935854482132"/>
<attvalue for="modularity_class" value="18"/>
<attvalue for="pageranks" value="4.3802728051679557E-4"/>
</attvalues>
<viz:size value="57.256165"/>
<viz:position x="-8290.332" y="-3168.1672"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="388" label="388">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="17"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567336"/>
<attvalue for="betweenesscentrality" value="0.0014058492189198673"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="4.6788516055088256E-4"/>
</attvalues>
<viz:size value="58.649956"/>
<viz:position x="-13129.067" y="-1508.6907"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="389" label="389">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="5"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3750213565692807"/>
<attvalue for="harmonicclosnesscentrality" value="0.38936977980259546"/>
<attvalue for="betweenesscentrality" value="9.165532889846701E-4"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="3.2235863506265385E-4"/>
</attvalues>
<viz:size value="51.85667"/>
<viz:position x="-7838.44" y="-904.85376"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="390" label="390">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="19"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37669469709970826"/>
<attvalue for="harmonicclosnesscentrality" value="0.3933181473044935"/>
<attvalue for="betweenesscentrality" value="0.0037225419632738468"/>
<attvalue for="modularity_class" value="13"/>
<attvalue for="pageranks" value="0.0013382300365283072"/>
</attvalues>
<viz:size value="99.27829"/>
<viz:position x="-7062.8066" y="-6424.1826"/>
<viz:color r="255" g="85" b="132"/>
</node>
<node id="391" label="391">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="50"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37747205503009457"/>
<attvalue for="harmonicclosnesscentrality" value="0.39514047076690795"/>
<attvalue for="betweenesscentrality" value="0.008290040671114532"/>
<attvalue for="modularity_class" value="8"/>
<attvalue for="pageranks" value="0.002222800351464126"/>
</attvalues>
<viz:size value="140.57066"/>
<viz:position x="-19163.2" y="-14086.118"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="392" label="392">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567336"/>
<attvalue for="betweenesscentrality" value="0.0014190034487871841"/>
<attvalue for="modularity_class" value="13"/>
<attvalue for="pageranks" value="4.8008414991246274E-4"/>
</attvalues>
<viz:size value="59.219414"/>
<viz:position x="-9797.844" y="-3353.7104"/>
<viz:color r="255" g="85" b="132"/>
</node>
<node id="393" label="393">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380266"/>
<attvalue for="betweenesscentrality" value="0.0014783618685302485"/>
<attvalue for="modularity_class" value="13"/>
<attvalue for="pageranks" value="5.451470283770889E-4"/>
</attvalues>
<viz:size value="62.25659"/>
<viz:position x="-10033.354" y="-3239.0837"/>
<viz:color r="255" g="85" b="132"/>
</node>
<node id="394" label="394">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3751495470859682"/>
<attvalue for="harmonicclosnesscentrality" value="0.38967350037966453"/>
<attvalue for="betweenesscentrality" value="0.001141414702417492"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="4.072204371447268E-4"/>
</attvalues>
<viz:size value="55.81808"/>
<viz:position x="-7651.2837" y="-18.504007"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="395" label="395">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="9"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380266"/>
<attvalue for="betweenesscentrality" value="0.0017597646448559716"/>
<attvalue for="modularity_class" value="18"/>
<attvalue for="pageranks" value="5.864929465363925E-4"/>
</attvalues>
<viz:size value="64.18665"/>
<viz:position x="-10827.926" y="-3934.583"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="396" label="396">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="11"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380266"/>
<attvalue for="betweenesscentrality" value="0.0013858307917218094"/>
<attvalue for="modularity_class" value="18"/>
<attvalue for="pageranks" value="5.418263977172088E-4"/>
</attvalues>
<viz:size value="62.101585"/>
<viz:position x="-11007.06" y="-3686.0344"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="397" label="397">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="9"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3751495470859682"/>
<attvalue for="harmonicclosnesscentrality" value="0.38967350037966453"/>
<attvalue for="betweenesscentrality" value="0.0013668317119863603"/>
<attvalue for="modularity_class" value="12"/>
<attvalue for="pageranks" value="4.293137281529736E-4"/>
</attvalues>
<viz:size value="56.84941"/>
<viz:position x="-11089.54" y="-2303.116"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="398" label="398">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567336"/>
<attvalue for="betweenesscentrality" value="0.0010639324553200615"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="4.6010571573100234E-4"/>
</attvalues>
<viz:size value="58.286804"/>
<viz:position x="-10016.054" y="-1883.3214"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="399" label="399">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="9"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3756631867191511"/>
<attvalue for="harmonicclosnesscentrality" value="0.39088838268794085"/>
<attvalue for="betweenesscentrality" value="0.001561139571540025"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="6.457124696314513E-4"/>
</attvalues>
<viz:size value="66.951065"/>
<viz:position x="-17886.984" y="1077.6267"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="400" label="400">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="16"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37643628880123475"/>
<attvalue for="harmonicclosnesscentrality" value="0.3927107061503553"/>
<attvalue for="betweenesscentrality" value="0.002768887704792038"/>
<attvalue for="modularity_class" value="18"/>
<attvalue for="pageranks" value="0.0011217213748499542"/>
</attvalues>
<viz:size value="89.1715"/>
<viz:position x="-2283.1167" y="6587.779"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="401" label="401">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="16"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37617823479005996"/>
<attvalue for="harmonicclosnesscentrality" value="0.39210326499621717"/>
<attvalue for="betweenesscentrality" value="0.0025873949178574374"/>
<attvalue for="modularity_class" value="18"/>
<attvalue for="pageranks" value="9.781119374959084E-4"/>
</attvalues>
<viz:size value="82.46771"/>
<viz:position x="-3420.6965" y="7440.193"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="402" label="402">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="9"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3756631867191511"/>
<attvalue for="harmonicclosnesscentrality" value="0.39088838268794085"/>
<attvalue for="betweenesscentrality" value="0.001765725038413377"/>
<attvalue for="modularity_class" value="5"/>
<attvalue for="pageranks" value="7.112310430429593E-4"/>
</attvalues>
<viz:size value="70.00952"/>
<viz:position x="-3377.2173" y="-4118.0"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="403" label="403">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="14"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3756631867191511"/>
<attvalue for="harmonicclosnesscentrality" value="0.39088838268794085"/>
<attvalue for="betweenesscentrality" value="0.0025805440356945044"/>
<attvalue for="modularity_class" value="5"/>
<attvalue for="pageranks" value="7.876344001391574E-4"/>
</attvalues>
<viz:size value="73.57608"/>
<viz:position x="-3878.619" y="-3785.4365"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="404" label="404">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567336"/>
<attvalue for="betweenesscentrality" value="0.0012806615852121127"/>
<attvalue for="modularity_class" value="7"/>
<attvalue for="pageranks" value="5.009776206473752E-4"/>
</attvalues>
<viz:size value="60.194733"/>
<viz:position x="-901.5312" y="-13159.515"/>
<viz:color r="255" g="136" b="5"/>
</node>
<node id="405" label="405">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380266"/>
<attvalue for="betweenesscentrality" value="0.0012357639630050116"/>
<attvalue for="modularity_class" value="7"/>
<attvalue for="pageranks" value="5.516799904948389E-4"/>
</attvalues>
<viz:size value="62.561558"/>
<viz:position x="-2456.435" y="-13748.046"/>
<viz:color r="255" g="136" b="5"/>
</node>
<node id="406" label="406">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567336"/>
<attvalue for="betweenesscentrality" value="0.0010633958426183713"/>
<attvalue for="modularity_class" value="7"/>
<attvalue for="pageranks" value="4.711476178085235E-4"/>
</attvalues>
<viz:size value="58.80225"/>
<viz:position x="-1463.0121" y="-13759.088"/>
<viz:color r="255" g="136" b="5"/>
</node>
<node id="407" label="407">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="9"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567336"/>
<attvalue for="betweenesscentrality" value="0.0019065533325925534"/>
<attvalue for="modularity_class" value="16"/>
<attvalue for="pageranks" value="5.67998373437568E-4"/>
</attvalues>
<viz:size value="63.32331"/>
<viz:position x="8283.231" y="-17368.508"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="408" label="408">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380266"/>
<attvalue for="betweenesscentrality" value="0.002162595825663685"/>
<attvalue for="modularity_class" value="17"/>
<attvalue for="pageranks" value="6.892713697844642E-4"/>
</attvalues>
<viz:size value="68.98442"/>
<viz:position x="2793.1626" y="13211.533"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="409" label="409">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380266"/>
<attvalue for="betweenesscentrality" value="0.0012007952571658539"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="5.788446894700505E-4"/>
</attvalues>
<viz:size value="63.829628"/>
<viz:position x="7042.8394" y="5159.624"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="410" label="410">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567336"/>
<attvalue for="betweenesscentrality" value="0.001144071332454721"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="4.896313347330919E-4"/>
</attvalues>
<viz:size value="59.665085"/>
<viz:position x="6769.484" y="5174.8315"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="411" label="411">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380266"/>
<attvalue for="betweenesscentrality" value="0.0012999135260665375"/>
<attvalue for="modularity_class" value="15"/>
<attvalue for="pageranks" value="5.223610671877545E-4"/>
</attvalues>
<viz:size value="61.19293"/>
<viz:position x="-1437.6185" y="2186.6208"/>
<viz:color r="76" g="70" b="62"/>
</node>
<node id="412" label="412">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567336"/>
<attvalue for="betweenesscentrality" value="0.0010379870728522396"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="4.4447074678773444E-4"/>
</attvalues>
<viz:size value="57.556953"/>
<viz:position x="1153.671" y="-977.0921"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="413" label="413">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380266"/>
<attvalue for="betweenesscentrality" value="0.00134015499497906"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="5.357931137162683E-4"/>
</attvalues>
<viz:size value="61.819946"/>
<viz:position x="208.31798" y="-884.66785"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="414" label="414">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="3"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3748932536293766"/>
<attvalue for="harmonicclosnesscentrality" value="0.38906605922552623"/>
<attvalue for="betweenesscentrality" value="1.8140175213826373E-4"/>
<attvalue for="modularity_class" value="11"/>
<attvalue for="pageranks" value="1.9898351133743403E-4"/>
</attvalues>
<viz:size value="46.09743"/>
<viz:position x="6963.426" y="-3070.9092"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="415" label="415">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="23"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37669469709970826"/>
<attvalue for="harmonicclosnesscentrality" value="0.3933181473044935"/>
<attvalue for="betweenesscentrality" value="0.0025245543457678714"/>
<attvalue for="modularity_class" value="18"/>
<attvalue for="pageranks" value="0.0012681505320321076"/>
</attvalues>
<viz:size value="96.00692"/>
<viz:position x="-528.2685" y="-1820.934"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="416" label="416">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37553464499572287"/>
<attvalue for="harmonicclosnesscentrality" value="0.3905846621108717"/>
<attvalue for="betweenesscentrality" value="0.0014225417837249745"/>
<attvalue for="modularity_class" value="0"/>
<attvalue for="pageranks" value="6.620004555330748E-4"/>
</attvalues>
<viz:size value="67.711395"/>
<viz:position x="11005.343" y="7076.3477"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="417" label="417">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3750213565692807"/>
<attvalue for="harmonicclosnesscentrality" value="0.38936977980259546"/>
<attvalue for="betweenesscentrality" value="0.001005920251631932"/>
<attvalue for="modularity_class" value="0"/>
<attvalue for="pageranks" value="3.4935945358940317E-4"/>
</attvalues>
<viz:size value="53.11709"/>
<viz:position x="11206.616" y="6522.5684"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="418" label="418">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567336"/>
<attvalue for="betweenesscentrality" value="0.001253912861414892"/>
<attvalue for="modularity_class" value="0"/>
<attvalue for="pageranks" value="4.957776885273173E-4"/>
</attvalues>
<viz:size value="59.951996"/>
<viz:position x="11201.618" y="6965.299"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="419" label="419">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567336"/>
<attvalue for="betweenesscentrality" value="0.0015511644045354345"/>
<attvalue for="modularity_class" value="17"/>
<attvalue for="pageranks" value="5.551407466892075E-4"/>
</attvalues>
<viz:size value="62.723106"/>
<viz:position x="3275.5522" y="12606.792"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="420" label="420">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="18"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37604934041459653"/>
<attvalue for="harmonicclosnesscentrality" value="0.39179954441914805"/>
<attvalue for="betweenesscentrality" value="0.003418290558287055"/>
<attvalue for="modularity_class" value="17"/>
<attvalue for="pageranks" value="0.0011033804295431892"/>
</attvalues>
<viz:size value="88.31534"/>
<viz:position x="2538.5662" y="13136.639"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="421" label="421">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="18"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37643628880123475"/>
<attvalue for="harmonicclosnesscentrality" value="0.3927107061503553"/>
<attvalue for="betweenesscentrality" value="0.0029937276982619555"/>
<attvalue for="modularity_class" value="13"/>
<attvalue for="pageranks" value="0.0011962340240178801"/>
</attvalues>
<viz:size value="92.64981"/>
<viz:position x="59.437756" y="-7751.2505"/>
<viz:color r="255" g="85" b="132"/>
</node>
<node id="422" label="422">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="5"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3748932536293766"/>
<attvalue for="harmonicclosnesscentrality" value="0.38906605922552634"/>
<attvalue for="betweenesscentrality" value="9.111617312072891E-4"/>
<attvalue for="modularity_class" value="7"/>
<attvalue for="pageranks" value="4.557573547843128E-4"/>
</attvalues>
<viz:size value="58.08382"/>
<viz:position x="647.7775" y="1576.3918"/>
<viz:color r="255" g="136" b="5"/>
</node>
<node id="423" label="423">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="20"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37695346041559336"/>
<attvalue for="harmonicclosnesscentrality" value="0.3939255884586316"/>
<attvalue for="betweenesscentrality" value="0.0028850633868054247"/>
<attvalue for="modularity_class" value="18"/>
<attvalue for="pageranks" value="0.0014482895112631915"/>
</attvalues>
<viz:size value="104.41594"/>
<viz:position x="4110.6533" y="5825.911"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="424" label="424">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="10"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380266"/>
<attvalue for="betweenesscentrality" value="0.001259124610384451"/>
<attvalue for="modularity_class" value="0"/>
<attvalue for="pageranks" value="5.407064195879674E-4"/>
</attvalues>
<viz:size value="62.049305"/>
<viz:position x="9801.025" y="12723.182"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="425" label="425">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="17"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3763072175552889"/>
<attvalue for="harmonicclosnesscentrality" value="0.39240698557328624"/>
<attvalue for="betweenesscentrality" value="0.0034814021712882056"/>
<attvalue for="modularity_class" value="7"/>
<attvalue for="pageranks" value="0.0011779554623229833"/>
</attvalues>
<viz:size value="91.79655"/>
<viz:position x="-2999.0562" y="-12475.797"/>
<viz:color r="255" g="136" b="5"/>
</node>
<node id="426" label="426">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="16"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37579181646978255"/>
<attvalue for="harmonicclosnesscentrality" value="0.3911921032650099"/>
<attvalue for="betweenesscentrality" value="0.002125728961626586"/>
<attvalue for="modularity_class" value="7"/>
<attvalue for="pageranks" value="8.099607899083655E-4"/>
</attvalues>
<viz:size value="74.61829"/>
<viz:position x="475.59546" y="-12719.018"/>
<viz:color r="255" g="136" b="5"/>
</node>
<node id="427" label="427">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="17"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37617823479005996"/>
<attvalue for="harmonicclosnesscentrality" value="0.39210326499621717"/>
<attvalue for="betweenesscentrality" value="0.002716440933523095"/>
<attvalue for="modularity_class" value="7"/>
<attvalue for="pageranks" value="0.001041627221387128"/>
</attvalues>
<viz:size value="85.43265"/>
<viz:position x="-3144.8179" y="-11517.297"/>
<viz:color r="255" g="136" b="5"/>
</node>
<node id="428" label="428">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37553464499572287"/>
<attvalue for="harmonicclosnesscentrality" value="0.3905846621108717"/>
<attvalue for="betweenesscentrality" value="0.001157344158586865"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="6.073097425637008E-4"/>
</attvalues>
<viz:size value="65.158394"/>
<viz:position x="9780.174" y="8037.1807"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="429" label="429">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="16"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3759205343380716"/>
<attvalue for="harmonicclosnesscentrality" value="0.391495823842079"/>
<attvalue for="betweenesscentrality" value="0.0032262893954001872"/>
<attvalue for="modularity_class" value="5"/>
<attvalue for="pageranks" value="9.868288759905708E-4"/>
</attvalues>
<viz:size value="82.87462"/>
<viz:position x="-3333.374" y="-950.1773"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="430" label="430">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="18"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3759205343380716"/>
<attvalue for="harmonicclosnesscentrality" value="0.391495823842079"/>
<attvalue for="betweenesscentrality" value="0.002358314094256443"/>
<attvalue for="modularity_class" value="18"/>
<attvalue for="pageranks" value="9.341153626920813E-4"/>
</attvalues>
<viz:size value="80.41392"/>
<viz:position x="-987.46234" y="4641.0815"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="431" label="431">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="16"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3759205343380716"/>
<attvalue for="harmonicclosnesscentrality" value="0.391495823842079"/>
<attvalue for="betweenesscentrality" value="0.0020696455559404744"/>
<attvalue for="modularity_class" value="2"/>
<attvalue for="pageranks" value="8.362073874540391E-4"/>
</attvalues>
<viz:size value="75.843506"/>
<viz:position x="1980.941" y="87.60833"/>
<viz:color r="211" g="179" b="176"/>
</node>
<node id="432" label="432">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="19"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37682403433476397"/>
<attvalue for="harmonicclosnesscentrality" value="0.39362186788156256"/>
<attvalue for="betweenesscentrality" value="0.004461502850287275"/>
<attvalue for="modularity_class" value="15"/>
<attvalue for="pageranks" value="0.0014279768422463023"/>
</attvalues>
<viz:size value="103.46773"/>
<viz:position x="-7864.275" y="4550.0586"/>
<viz:color r="76" g="70" b="62"/>
</node>
<node id="433" label="433">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="5"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3751495470859682"/>
<attvalue for="harmonicclosnesscentrality" value="0.38967350037966453"/>
<attvalue for="betweenesscentrality" value="0.001053831007824958"/>
<attvalue for="modularity_class" value="3"/>
<attvalue for="pageranks" value="4.525762123726298E-4"/>
</attvalues>
<viz:size value="57.935326"/>
<viz:position x="6097.5874" y="-957.56433"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="434" label="434">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="17"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380266"/>
<attvalue for="betweenesscentrality" value="0.0018176516749709836"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="5.777752027039818E-4"/>
</attvalues>
<viz:size value="63.7797"/>
<viz:position x="9876.988" y="-7937.7217"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="435" label="435">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="18"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37604934041459653"/>
<attvalue for="harmonicclosnesscentrality" value="0.39179954441914805"/>
<attvalue for="betweenesscentrality" value="0.002934146325499929"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="9.874768688847148E-4"/>
</attvalues>
<viz:size value="82.90488"/>
<viz:position x="-9276.063" y="721.58026"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="436" label="436">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="20"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37669469709970826"/>
<attvalue for="harmonicclosnesscentrality" value="0.3933181473044935"/>
<attvalue for="betweenesscentrality" value="0.004013472590551344"/>
<attvalue for="modularity_class" value="2"/>
<attvalue for="pageranks" value="0.0014084217330316757"/>
</attvalues>
<viz:size value="102.554886"/>
<viz:position x="14163.272" y="-8009.2964"/>
<viz:color r="211" g="179" b="176"/>
</node>
<node id="437" label="437">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="18"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37669469709970826"/>
<attvalue for="harmonicclosnesscentrality" value="0.3933181473044935"/>
<attvalue for="betweenesscentrality" value="0.00408600432645015"/>
<attvalue for="modularity_class" value="18"/>
<attvalue for="pageranks" value="0.0014228579267174617"/>
</attvalues>
<viz:size value="103.228775"/>
<viz:position x="1877.2849" y="4764.576"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="438" label="438">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="9"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567335"/>
<attvalue for="betweenesscentrality" value="0.0011128291412678241"/>
<attvalue for="modularity_class" value="16"/>
<attvalue for="pageranks" value="4.902859893923738E-4"/>
</attvalues>
<viz:size value="59.69564"/>
<viz:position x="8318.257" y="-17431.967"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="439" label="439">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="20"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37669469709970826"/>
<attvalue for="harmonicclosnesscentrality" value="0.3933181473044935"/>
<attvalue for="betweenesscentrality" value="0.0051820984791861174"/>
<attvalue for="modularity_class" value="2"/>
<attvalue for="pageranks" value="0.0014732183529783689"/>
</attvalues>
<viz:size value="105.579636"/>
<viz:position x="12332.576" y="-8783.253"/>
<viz:color r="211" g="179" b="176"/>
</node>
<node id="440" label="440">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380266"/>
<attvalue for="betweenesscentrality" value="0.0015985006453305716"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="6.033050677097052E-4"/>
</attvalues>
<viz:size value="64.97145"/>
<viz:position x="6847.9976" y="5407.528"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="441" label="441">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="18"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37682403433476397"/>
<attvalue for="harmonicclosnesscentrality" value="0.39362186788156256"/>
<attvalue for="betweenesscentrality" value="0.0031286425671453234"/>
<attvalue for="modularity_class" value="18"/>
<attvalue for="pageranks" value="0.0013914208975796029"/>
</attvalues>
<viz:size value="101.76128"/>
<viz:position x="3674.0544" y="4399.5996"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="442" label="442">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="15"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3763072175552889"/>
<attvalue for="harmonicclosnesscentrality" value="0.39240698557328624"/>
<attvalue for="betweenesscentrality" value="0.0030589303417490124"/>
<attvalue for="modularity_class" value="13"/>
<attvalue for="pageranks" value="0.0011511211647988997"/>
</attvalues>
<viz:size value="90.543915"/>
<viz:position x="682.8977" y="-6614.61"/>
<viz:color r="255" g="85" b="132"/>
</node>
<node id="443" label="443">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="47"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37579181646978255"/>
<attvalue for="harmonicclosnesscentrality" value="0.3911921032650099"/>
<attvalue for="betweenesscentrality" value="0.0033853415420502246"/>
<attvalue for="modularity_class" value="8"/>
<attvalue for="pageranks" value="9.506585674948618E-4"/>
</attvalues>
<viz:size value="81.18617"/>
<viz:position x="-6226.973" y="-8473.731"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="444" label="444">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="38"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37734227264913184"/>
<attvalue for="harmonicclosnesscentrality" value="0.3948367501898389"/>
<attvalue for="betweenesscentrality" value="0.00535839693345867"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="0.001791548146384132"/>
</attvalues>
<viz:size value="120.4395"/>
<viz:position x="-9624.375" y="-6568.948"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="445" label="445">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="14"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3759205343380716"/>
<attvalue for="harmonicclosnesscentrality" value="0.391495823842079"/>
<attvalue for="betweenesscentrality" value="0.003411225803198274"/>
<attvalue for="modularity_class" value="18"/>
<attvalue for="pageranks" value="0.0010396116349659455"/>
</attvalues>
<viz:size value="85.33856"/>
<viz:position x="-5136.481" y="1375.0496"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="446" label="446">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="13"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3759205343380716"/>
<attvalue for="harmonicclosnesscentrality" value="0.391495823842079"/>
<attvalue for="betweenesscentrality" value="0.0033768324576611028"/>
<attvalue for="modularity_class" value="18"/>
<attvalue for="pageranks" value="0.001036895548672194"/>
</attvalues>
<viz:size value="85.21177"/>
<viz:position x="-4973.4414" y="1537.5165"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="447" label="447">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="4"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3750213565692807"/>
<attvalue for="harmonicclosnesscentrality" value="0.38936977980259546"/>
<attvalue for="betweenesscentrality" value="0.0011351094195925276"/>
<attvalue for="modularity_class" value="1"/>
<attvalue for="pageranks" value="4.071036774259851E-4"/>
</attvalues>
<viz:size value="55.81263"/>
<viz:position x="4638.2534" y="1809.583"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="448" label="448">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="4"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3750213565692807"/>
<attvalue for="harmonicclosnesscentrality" value="0.38936977980259546"/>
<attvalue for="betweenesscentrality" value="0.0011223284453199087"/>
<attvalue for="modularity_class" value="5"/>
<attvalue for="pageranks" value="3.5396869286220643E-4"/>
</attvalues>
<viz:size value="53.332253"/>
<viz:position x="-3001.5063" y="-4842.3496"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="449" label="449">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="4"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3750213565692807"/>
<attvalue for="harmonicclosnesscentrality" value="0.38936977980259546"/>
<attvalue for="betweenesscentrality" value="0.0010130688169312054"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="3.40877295799097E-4"/>
</attvalues>
<viz:size value="52.721138"/>
<viz:position x="5496.134" y="6374.1978"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="450" label="450">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="17"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37643628880123475"/>
<attvalue for="harmonicclosnesscentrality" value="0.3927107061503553"/>
<attvalue for="betweenesscentrality" value="0.002464962859390894"/>
<attvalue for="modularity_class" value="13"/>
<attvalue for="pageranks" value="0.001093158907950915"/>
</attvalues>
<viz:size value="87.83818"/>
<viz:position x="-1349.2274" y="-4168.936"/>
<viz:color r="255" g="85" b="132"/>
</node>
<node id="451" label="451">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="4"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3750213565692807"/>
<attvalue for="harmonicclosnesscentrality" value="0.38936977980259546"/>
<attvalue for="betweenesscentrality" value="0.001082051991799564"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="3.6911955350399877E-4"/>
</attvalues>
<viz:size value="54.039505"/>
<viz:position x="3252.0498" y="-7187.433"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="452" label="452">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="21"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3763072175552889"/>
<attvalue for="harmonicclosnesscentrality" value="0.39240698557328624"/>
<attvalue for="betweenesscentrality" value="0.0038887378787056826"/>
<attvalue for="modularity_class" value="2"/>
<attvalue for="pageranks" value="0.001198706080304513"/>
</attvalues>
<viz:size value="92.765205"/>
<viz:position x="2430.8247" y="-6661.4297"/>
<viz:color r="211" g="179" b="176"/>
</node>
<node id="453" label="453">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="13"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37617823479005996"/>
<attvalue for="harmonicclosnesscentrality" value="0.39210326499621717"/>
<attvalue for="betweenesscentrality" value="0.005028275649773481"/>
<attvalue for="modularity_class" value="2"/>
<attvalue for="pageranks" value="0.0013731649391139538"/>
</attvalues>
<viz:size value="100.90907"/>
<viz:position x="23631.121" y="-4412.0244"/>
<viz:color r="211" g="179" b="176"/>
</node>
<node id="454" label="454">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="19"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37695346041559336"/>
<attvalue for="harmonicclosnesscentrality" value="0.3939255884586316"/>
<attvalue for="betweenesscentrality" value="0.005763628703236008"/>
<attvalue for="modularity_class" value="13"/>
<attvalue for="pageranks" value="0.001661823443159176"/>
</attvalues>
<viz:size value="114.383865"/>
<viz:position x="10498.317" y="-11376.716"/>
<viz:color r="255" g="85" b="132"/>
</node>
<node id="455" label="455">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="10"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37553464499572287"/>
<attvalue for="harmonicclosnesscentrality" value="0.3905846621108717"/>
<attvalue for="betweenesscentrality" value="0.002221310230656113"/>
<attvalue for="modularity_class" value="13"/>
<attvalue for="pageranks" value="6.942841601679691E-4"/>
</attvalues>
<viz:size value="69.21842"/>
<viz:position x="11138.03" y="-10035.105"/>
<viz:color r="255" g="85" b="132"/>
</node>
<node id="456" label="456">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="14"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37553464499572287"/>
<attvalue for="harmonicclosnesscentrality" value="0.3905846621108717"/>
<attvalue for="betweenesscentrality" value="0.0019765926852347445"/>
<attvalue for="modularity_class" value="16"/>
<attvalue for="pageranks" value="6.388248665951905E-4"/>
</attvalues>
<viz:size value="66.62955"/>
<viz:position x="7380.777" y="-13197.511"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="457" label="457">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="15"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37643628880123475"/>
<attvalue for="harmonicclosnesscentrality" value="0.3927107061503553"/>
<attvalue for="betweenesscentrality" value="0.0041401696044195925"/>
<attvalue for="modularity_class" value="13"/>
<attvalue for="pageranks" value="0.001298174115464464"/>
</attvalues>
<viz:size value="97.40845"/>
<viz:position x="10308.894" y="-10851.679"/>
<viz:color r="255" g="85" b="132"/>
</node>
<node id="458" label="458">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="21"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37643628880123475"/>
<attvalue for="harmonicclosnesscentrality" value="0.3927107061503553"/>
<attvalue for="betweenesscentrality" value="0.003725249849328192"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="0.001241129631222324"/>
</attvalues>
<viz:size value="94.74556"/>
<viz:position x="3302.7559" y="-4706.431"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="459" label="459">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="13"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3759205343380716"/>
<attvalue for="harmonicclosnesscentrality" value="0.391495823842079"/>
<attvalue for="betweenesscentrality" value="0.0025242011690491495"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="9.024764992379039E-4"/>
</attvalues>
<viz:size value="78.937"/>
<viz:position x="1837.9755" y="-2374.705"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="460" label="460">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="16"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3759205343380716"/>
<attvalue for="harmonicclosnesscentrality" value="0.391495823842079"/>
<attvalue for="betweenesscentrality" value="0.003321449381201555"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="9.813214591214865E-4"/>
</attvalues>
<viz:size value="82.61754"/>
<viz:position x="4531.103" y="-4743.596"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="461" label="461">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380266"/>
<attvalue for="betweenesscentrality" value="0.0013278719070918396"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="5.46400539170318E-4"/>
</attvalues>
<viz:size value="62.31511"/>
<viz:position x="3501.0107" y="-2077.645"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="462" label="462">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="29"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3763072175552889"/>
<attvalue for="harmonicclosnesscentrality" value="0.39240698557328624"/>
<attvalue for="betweenesscentrality" value="0.0027532588845828737"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="0.0011188518485791214"/>
</attvalues>
<viz:size value="89.03755"/>
<viz:position x="1433.273" y="-3061.598"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="463" label="463">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="16"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3759205343380716"/>
<attvalue for="harmonicclosnesscentrality" value="0.391495823842079"/>
<attvalue for="betweenesscentrality" value="0.002088663623729426"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="8.273089337648342E-4"/>
</attvalues>
<viz:size value="75.428116"/>
<viz:position x="1555.1573" y="-4136.168"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="464" label="464">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="29"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37643628880123475"/>
<attvalue for="harmonicclosnesscentrality" value="0.3927107061503553"/>
<attvalue for="betweenesscentrality" value="0.003911532440422881"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="0.0012720732809247417"/>
</attvalues>
<viz:size value="96.19003"/>
<viz:position x="2387.482" y="-4347.684"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="465" label="465">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="14"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3763072175552889"/>
<attvalue for="harmonicclosnesscentrality" value="0.39240698557328624"/>
<attvalue for="betweenesscentrality" value="0.002755947275103243"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="0.0010946938988086914"/>
</attvalues>
<viz:size value="87.90984"/>
<viz:position x="1727.4424" y="-5717.6816"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="466" label="466">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="15"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37604934041459653"/>
<attvalue for="harmonicclosnesscentrality" value="0.39179954441914805"/>
<attvalue for="betweenesscentrality" value="0.0025909472002812518"/>
<attvalue for="modularity_class" value="18"/>
<attvalue for="pageranks" value="9.186899009775967E-4"/>
</attvalues>
<viz:size value="79.69385"/>
<viz:position x="875.56366" y="4563.1846"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="467" label="467">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="5"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3751495470859682"/>
<attvalue for="harmonicclosnesscentrality" value="0.38967350037966453"/>
<attvalue for="betweenesscentrality" value="9.945869989398729E-4"/>
<attvalue for="modularity_class" value="7"/>
<attvalue for="pageranks" value="3.9621401235820955E-4"/>
</attvalues>
<viz:size value="55.304295"/>
<viz:position x="-1366.9602" y="-13380.55"/>
<viz:color r="255" g="136" b="5"/>
</node>
<node id="468" label="468">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380266"/>
<attvalue for="betweenesscentrality" value="0.0014779025825879333"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="5.76075216752508E-4"/>
</attvalues>
<viz:size value="63.700344"/>
<viz:position x="7842.927" y="6307.9897"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="469" label="469">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3751495470859682"/>
<attvalue for="harmonicclosnesscentrality" value="0.38967350037966453"/>
<attvalue for="betweenesscentrality" value="9.946813936482155E-4"/>
<attvalue for="modularity_class" value="10"/>
<attvalue for="pageranks" value="3.798152300268213E-4"/>
</attvalues>
<viz:size value="54.538788"/>
<viz:position x="-11554.319" y="-1239.2275"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="470" label="470">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3750213565692807"/>
<attvalue for="harmonicclosnesscentrality" value="0.38936977980259546"/>
<attvalue for="betweenesscentrality" value="9.482567369780953E-4"/>
<attvalue for="modularity_class" value="5"/>
<attvalue for="pageranks" value="3.298967743245876E-4"/>
</attvalues>
<viz:size value="52.208557"/>
<viz:position x="-1951.6635" y="-1655.2207"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="471" label="471">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="13"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37604934041459653"/>
<attvalue for="harmonicclosnesscentrality" value="0.39179954441914805"/>
<attvalue for="betweenesscentrality" value="0.00266677501402252"/>
<attvalue for="modularity_class" value="13"/>
<attvalue for="pageranks" value="9.76957635493695E-4"/>
</attvalues>
<viz:size value="82.41383"/>
<viz:position x="4720.8745" y="529.02094"/>
<viz:color r="255" g="85" b="132"/>
</node>
<node id="472" label="472">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380266"/>
<attvalue for="betweenesscentrality" value="0.0018902922222108133"/>
<attvalue for="modularity_class" value="13"/>
<attvalue for="pageranks" value="5.921219698266451E-4"/>
</attvalues>
<viz:size value="64.44942"/>
<viz:position x="11162.184" y="-10897.665"/>
<viz:color r="255" g="85" b="132"/>
</node>
<node id="473" label="473">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="17"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37669469709970826"/>
<attvalue for="harmonicclosnesscentrality" value="0.3933181473044935"/>
<attvalue for="betweenesscentrality" value="0.005788924638508787"/>
<attvalue for="modularity_class" value="8"/>
<attvalue for="pageranks" value="0.0015978355226071534"/>
</attvalues>
<viz:size value="111.39685"/>
<viz:position x="-18929.092" y="-14458.88"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="474" label="474">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="17"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37669469709970826"/>
<attvalue for="harmonicclosnesscentrality" value="0.3933181473044935"/>
<attvalue for="betweenesscentrality" value="0.005472987878655735"/>
<attvalue for="modularity_class" value="16"/>
<attvalue for="pageranks" value="0.001539618180117653"/>
</attvalues>
<viz:size value="108.67923"/>
<viz:position x="9646.31" y="-20106.414"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="475" label="475">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567336"/>
<attvalue for="betweenesscentrality" value="0.001947147255114625"/>
<attvalue for="modularity_class" value="18"/>
<attvalue for="pageranks" value="6.25576678262428E-4"/>
</attvalues>
<viz:size value="66.01111"/>
<viz:position x="-1372.2516" y="4428.4087"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="476" label="476">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="4"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3750213565692807"/>
<attvalue for="harmonicclosnesscentrality" value="0.38936977980259546"/>
<attvalue for="betweenesscentrality" value="0.001052431858697546"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="3.4860477288080056E-4"/>
</attvalues>
<viz:size value="53.08186"/>
<viz:position x="2154.4475" y="-1725.9014"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="477" label="477">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="4"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3750213565692807"/>
<attvalue for="harmonicclosnesscentrality" value="0.3893697798025954"/>
<attvalue for="betweenesscentrality" value="0.0010146738871345234"/>
<attvalue for="modularity_class" value="15"/>
<attvalue for="pageranks" value="3.224456643397555E-4"/>
</attvalues>
<viz:size value="51.860733"/>
<viz:position x="-2786.925" y="5968.221"/>
<viz:color r="76" g="70" b="62"/>
</node>
<node id="478" label="478">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="4"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3750213565692807"/>
<attvalue for="harmonicclosnesscentrality" value="0.3893697798025954"/>
<attvalue for="betweenesscentrality" value="0.0010826777534099124"/>
<attvalue for="modularity_class" value="15"/>
<attvalue for="pageranks" value="3.4269747609025786E-4"/>
</attvalues>
<viz:size value="52.806103"/>
<viz:position x="-3078.5815" y="5071.8755"/>
<viz:color r="76" g="70" b="62"/>
</node>
<node id="479" label="479">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="4"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3750213565692807"/>
<attvalue for="harmonicclosnesscentrality" value="0.3893697798025954"/>
<attvalue for="betweenesscentrality" value="9.928798716631814E-4"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="3.362901522396292E-4"/>
</attvalues>
<viz:size value="52.507004"/>
<viz:position x="3037.9954" y="-1695.5199"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="480" label="480">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="10"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380255"/>
<attvalue for="betweenesscentrality" value="0.0017617293390680092"/>
<attvalue for="modularity_class" value="0"/>
<attvalue for="pageranks" value="6.165688306116961E-4"/>
</attvalues>
<viz:size value="65.590614"/>
<viz:position x="7175.0293" y="12024.252"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="481" label="481">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="12"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37604934041459653"/>
<attvalue for="harmonicclosnesscentrality" value="0.391799544419148"/>
<attvalue for="betweenesscentrality" value="0.0033373860822389126"/>
<attvalue for="modularity_class" value="2"/>
<attvalue for="pageranks" value="0.0010509113268455316"/>
</attvalues>
<viz:size value="85.86604"/>
<viz:position x="15673.33" y="-7598.6826"/>
<viz:color r="211" g="179" b="176"/>
</node>
<node id="482" label="482">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="20"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3759205343380716"/>
<attvalue for="harmonicclosnesscentrality" value="0.39149582384207887"/>
<attvalue for="betweenesscentrality" value="0.002748483821520747"/>
<attvalue for="modularity_class" value="2"/>
<attvalue for="pageranks" value="9.02360815364837E-4"/>
</attvalues>
<viz:size value="78.931595"/>
<viz:position x="11403.177" y="-8568.687"/>
<viz:color r="211" g="179" b="176"/>
</node>
<node id="483" label="483">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="25"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37669469709970826"/>
<attvalue for="harmonicclosnesscentrality" value="0.3933181473044934"/>
<attvalue for="betweenesscentrality" value="0.0046031691933196605"/>
<attvalue for="modularity_class" value="13"/>
<attvalue for="pageranks" value="0.0014376937569412934"/>
</attvalues>
<viz:size value="103.92132"/>
<viz:position x="7014.248" y="-9880.432"/>
<viz:color r="255" g="85" b="132"/>
</node>
<node id="484" label="484">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="17"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3763072175552889"/>
<attvalue for="harmonicclosnesscentrality" value="0.3924069855732861"/>
<attvalue for="betweenesscentrality" value="0.002519527576517954"/>
<attvalue for="modularity_class" value="4"/>
<attvalue for="pageranks" value="0.0011058757430533865"/>
</attvalues>
<viz:size value="88.431816"/>
<viz:position x="156.7977" y="5597.601"/>
<viz:color r="0" g="189" b="148"/>
</node>
<node id="485" label="485">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="19"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3759205343380716"/>
<attvalue for="harmonicclosnesscentrality" value="0.39149582384207887"/>
<attvalue for="betweenesscentrality" value="0.002127658669685954"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="8.261339924621995E-4"/>
</attvalues>
<viz:size value="75.37327"/>
<viz:position x="-13201.105" y="-1677.839"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="486" label="486">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37553464499572287"/>
<attvalue for="harmonicclosnesscentrality" value="0.39058466211087167"/>
<attvalue for="betweenesscentrality" value="0.0011909402561245108"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="6.09478599497615E-4"/>
</attvalues>
<viz:size value="65.25964"/>
<viz:position x="9736.623" y="7642.915"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="487" label="487">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="9"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37553464499572287"/>
<attvalue for="harmonicclosnesscentrality" value="0.39058466211087167"/>
<attvalue for="betweenesscentrality" value="0.0016322848855617959"/>
<attvalue for="modularity_class" value="0"/>
<attvalue for="pageranks" value="6.290979633377122E-4"/>
</attvalues>
<viz:size value="66.17548"/>
<viz:position x="10413.324" y="14032.258"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="488" label="488">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="9"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37553464499572287"/>
<attvalue for="harmonicclosnesscentrality" value="0.39058466211087167"/>
<attvalue for="betweenesscentrality" value="0.0015425658606317185"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="6.032649811558704E-4"/>
</attvalues>
<viz:size value="64.96958"/>
<viz:position x="-14011.867" y="-1656.3895"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="489" label="489">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="21"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37553464499572287"/>
<attvalue for="harmonicclosnesscentrality" value="0.39058466211087167"/>
<attvalue for="betweenesscentrality" value="0.002302722236025831"/>
<attvalue for="modularity_class" value="2"/>
<attvalue for="pageranks" value="6.809295810538389E-4"/>
</attvalues>
<viz:size value="68.595024"/>
<viz:position x="1782.1349" y="-7680.957"/>
<viz:color r="211" g="179" b="176"/>
</node>
<node id="490" label="490">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37553464499572287"/>
<attvalue for="harmonicclosnesscentrality" value="0.39058466211087167"/>
<attvalue for="betweenesscentrality" value="0.002052346520296623"/>
<attvalue for="modularity_class" value="7"/>
<attvalue for="pageranks" value="7.03520537175787E-4"/>
</attvalues>
<viz:size value="69.64958"/>
<viz:position x="-5392.2593" y="-15948.031"/>
<viz:color r="255" g="136" b="5"/>
</node>
<node id="491" label="491">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="9"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3751495470859682"/>
<attvalue for="harmonicclosnesscentrality" value="0.3896735003796644"/>
<attvalue for="betweenesscentrality" value="9.830882975517684E-4"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="3.7733215768137195E-4"/>
</attvalues>
<viz:size value="54.422874"/>
<viz:position x="-11662.855" y="-1856.1847"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="492" label="492">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="23"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3772125794810105"/>
<attvalue for="harmonicclosnesscentrality" value="0.3945330296127697"/>
<attvalue for="betweenesscentrality" value="0.005525943497607216"/>
<attvalue for="modularity_class" value="15"/>
<attvalue for="pageranks" value="0.0016999965841928736"/>
</attvalues>
<viz:size value="116.16581"/>
<viz:position x="-8265.326" y="5284.3267"/>
<viz:color r="76" g="70" b="62"/>
</node>
<node id="493" label="493">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="11"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380255"/>
<attvalue for="betweenesscentrality" value="0.001328329392579976"/>
<attvalue for="modularity_class" value="15"/>
<attvalue for="pageranks" value="5.127236842089973E-4"/>
</attvalues>
<viz:size value="60.74305"/>
<viz:position x="-5095.424" y="7278.318"/>
<viz:color r="76" g="70" b="62"/>
</node>
<node id="494" label="494">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="15"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3756631867191511"/>
<attvalue for="harmonicclosnesscentrality" value="0.39088838268794074"/>
<attvalue for="betweenesscentrality" value="0.0018422161058382796"/>
<attvalue for="modularity_class" value="7"/>
<attvalue for="pageranks" value="7.354982307832128E-4"/>
</attvalues>
<viz:size value="71.14233"/>
<viz:position x="-268.8021" y="-13778.626"/>
<viz:color r="255" g="136" b="5"/>
</node>
<node id="495" label="495">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="16"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37617823479005996"/>
<attvalue for="harmonicclosnesscentrality" value="0.39210326499621706"/>
<attvalue for="betweenesscentrality" value="0.0033115347337298413"/>
<attvalue for="modularity_class" value="7"/>
<attvalue for="pageranks" value="0.0011076202428138974"/>
</attvalues>
<viz:size value="88.51326"/>
<viz:position x="-3644.5256" y="-13693.521"/>
<viz:color r="255" g="136" b="5"/>
</node>
<node id="496" label="496">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="33"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3765654486189741"/>
<attvalue for="harmonicclosnesscentrality" value="0.39301442672742426"/>
<attvalue for="betweenesscentrality" value="0.0035657699020972457"/>
<attvalue for="modularity_class" value="18"/>
<attvalue for="pageranks" value="0.0012543013888709638"/>
</attvalues>
<viz:size value="95.36043"/>
<viz:position x="-4388.2925" y="-1313.2369"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="497" label="497">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="30"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3759205343380716"/>
<attvalue for="harmonicclosnesscentrality" value="0.39149582384207887"/>
<attvalue for="betweenesscentrality" value="0.002958586127630124"/>
<attvalue for="modularity_class" value="7"/>
<attvalue for="pageranks" value="9.775575415645215E-4"/>
</attvalues>
<viz:size value="82.44183"/>
<viz:position x="-1481.0707" y="-16228.999"/>
<viz:color r="255" g="136" b="5"/>
</node>
<node id="498" label="498">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567335"/>
<attvalue for="betweenesscentrality" value="0.001065684703716883"/>
<attvalue for="modularity_class" value="7"/>
<attvalue for="pageranks" value="4.630760277112009E-4"/>
</attvalues>
<viz:size value="58.42546"/>
<viz:position x="-1601.288" y="-14754.004"/>
<viz:color r="255" g="136" b="5"/>
</node>
<node id="499" label="499">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="14"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3759205343380716"/>
<attvalue for="harmonicclosnesscentrality" value="0.39149582384207887"/>
<attvalue for="betweenesscentrality" value="0.0019326194574048976"/>
<attvalue for="modularity_class" value="8"/>
<attvalue for="pageranks" value="8.199487301989922E-4"/>
</attvalues>
<viz:size value="75.08453"/>
<viz:position x="-5115.818" y="-5597.8345"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="500" label="500">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="17"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3765654486189741"/>
<attvalue for="harmonicclosnesscentrality" value="0.39301442672742426"/>
<attvalue for="betweenesscentrality" value="0.0038311016131106356"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="0.0012555779615573003"/>
</attvalues>
<viz:size value="95.42003"/>
<viz:position x="-18934.486" y="1715.0668"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="501" label="501">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567335"/>
<attvalue for="betweenesscentrality" value="0.0010775974870660245"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="4.58262923219586E-4"/>
</attvalues>
<viz:size value="58.200783"/>
<viz:position x="-11262.414" y="-721.0819"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="502" label="502">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="21"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37617823479005996"/>
<attvalue for="harmonicclosnesscentrality" value="0.39210326499621706"/>
<attvalue for="betweenesscentrality" value="0.0023825429928811316"/>
<attvalue for="modularity_class" value="13"/>
<attvalue for="pageranks" value="9.836788522598417E-4"/>
</attvalues>
<viz:size value="82.72758"/>
<viz:position x="-6067.453" y="-1974.2826"/>
<viz:color r="255" g="85" b="132"/>
</node>
<node id="503" label="503">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="14"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37604934041459653"/>
<attvalue for="harmonicclosnesscentrality" value="0.391799544419148"/>
<attvalue for="betweenesscentrality" value="0.0030315602272235"/>
<attvalue for="modularity_class" value="13"/>
<attvalue for="pageranks" value="0.0010071111471461097"/>
</attvalues>
<viz:size value="83.82141"/>
<viz:position x="6733.9067" y="-5921.642"/>
<viz:color r="255" g="85" b="132"/>
</node>
<node id="504" label="504">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="12"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37579181646978255"/>
<attvalue for="harmonicclosnesscentrality" value="0.3911921032650098"/>
<attvalue for="betweenesscentrality" value="0.0016048273194940181"/>
<attvalue for="modularity_class" value="13"/>
<attvalue for="pageranks" value="7.428140535221796E-4"/>
</attvalues>
<viz:size value="71.48383"/>
<viz:position x="-1685.7384" y="317.8937"/>
<viz:color r="255" g="85" b="132"/>
</node>
<node id="505" label="505">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="10"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37553464499572287"/>
<attvalue for="harmonicclosnesscentrality" value="0.39058466211087167"/>
<attvalue for="betweenesscentrality" value="0.0014031305974581902"/>
<attvalue for="modularity_class" value="15"/>
<attvalue for="pageranks" value="5.97843816281401E-4"/>
</attvalues>
<viz:size value="64.71652"/>
<viz:position x="-5687.1855" y="7951.7407"/>
<viz:color r="76" g="70" b="62"/>
</node>
<node id="506" label="506">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="20"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37643628880123475"/>
<attvalue for="harmonicclosnesscentrality" value="0.3927107061503552"/>
<attvalue for="betweenesscentrality" value="0.003049778040362464"/>
<attvalue for="modularity_class" value="18"/>
<attvalue for="pageranks" value="0.001171750476977105"/>
</attvalues>
<viz:size value="91.5069"/>
<viz:position x="2045.8962" y="3983.3875"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="507" label="507">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380255"/>
<attvalue for="betweenesscentrality" value="0.0014812546341605435"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="5.696800447602484E-4"/>
</attvalues>
<viz:size value="63.401814"/>
<viz:position x="7937.9014" y="6283.43"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="508" label="508">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567335"/>
<attvalue for="betweenesscentrality" value="0.001595345491369525"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="5.481965361640871E-4"/>
</attvalues>
<viz:size value="62.39895"/>
<viz:position x="9309.112" y="5959.969"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="509" label="509">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="14"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37579181646978255"/>
<attvalue for="harmonicclosnesscentrality" value="0.3911921032650098"/>
<attvalue for="betweenesscentrality" value="0.0022808944475242276"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="8.304636977489602E-4"/>
</attvalues>
<viz:size value="75.575386"/>
<viz:position x="-6342.069" y="-98.195885"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="510" label="510">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="18"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37617823479005996"/>
<attvalue for="harmonicclosnesscentrality" value="0.39210326499621706"/>
<attvalue for="betweenesscentrality" value="0.0023611885270887942"/>
<attvalue for="modularity_class" value="15"/>
<attvalue for="pageranks" value="9.806038456363511E-4"/>
</attvalues>
<viz:size value="82.58403"/>
<viz:position x="-1990.96" y="-974.89746"/>
<viz:color r="76" g="70" b="62"/>
</node>
<node id="511" label="511">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="19"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37604934041459653"/>
<attvalue for="harmonicclosnesscentrality" value="0.391799544419148"/>
<attvalue for="betweenesscentrality" value="0.003514569598220084"/>
<attvalue for="modularity_class" value="8"/>
<attvalue for="pageranks" value="0.0010453048482360653"/>
</attvalues>
<viz:size value="85.604324"/>
<viz:position x="-5234.239" y="-2739.6765"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="512" label="512">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="21"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37669469709970826"/>
<attvalue for="harmonicclosnesscentrality" value="0.3933181473044934"/>
<attvalue for="betweenesscentrality" value="0.004170984108984668"/>
<attvalue for="modularity_class" value="0"/>
<attvalue for="pageranks" value="0.0013924650766794638"/>
</attvalues>
<viz:size value="101.81001"/>
<viz:position x="8449.354" y="13718.688"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="513" label="513">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380255"/>
<attvalue for="betweenesscentrality" value="0.0013594807130226161"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="5.540716841771634E-4"/>
</attvalues>
<viz:size value="62.673206"/>
<viz:position x="8508.959" y="7753.8135"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="514" label="514">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567335"/>
<attvalue for="betweenesscentrality" value="0.0011051278933081696"/>
<attvalue for="modularity_class" value="0"/>
<attvalue for="pageranks" value="4.752411481174909E-4"/>
</attvalues>
<viz:size value="58.99334"/>
<viz:position x="7221.536" y="7706.193"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="515" label="515">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="10"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37579181646978255"/>
<attvalue for="harmonicclosnesscentrality" value="0.3911921032650098"/>
<attvalue for="betweenesscentrality" value="0.002030217800926919"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="8.188305639426312E-4"/>
</attvalues>
<viz:size value="75.03234"/>
<viz:position x="8142.2456" y="8038.46"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="516" label="516">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3751495470859682"/>
<attvalue for="harmonicclosnesscentrality" value="0.3896735003796644"/>
<attvalue for="betweenesscentrality" value="9.929690461422625E-4"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="3.7228246409082955E-4"/>
</attvalues>
<viz:size value="54.187153"/>
<viz:position x="-1.9450022" y="-836.9679"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="517" label="517">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3751495470859682"/>
<attvalue for="harmonicclosnesscentrality" value="0.3896735003796644"/>
<attvalue for="betweenesscentrality" value="0.0012299646860681347"/>
<attvalue for="modularity_class" value="4"/>
<attvalue for="pageranks" value="4.563598821039046E-4"/>
</attvalues>
<viz:size value="58.111946"/>
<viz:position x="-51.852325" y="9189.821"/>
<viz:color r="0" g="189" b="148"/>
</node>
<node id="518" label="518">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567335"/>
<attvalue for="betweenesscentrality" value="0.0013161444600159886"/>
<attvalue for="modularity_class" value="4"/>
<attvalue for="pageranks" value="5.066550035961712E-4"/>
</attvalues>
<viz:size value="60.459763"/>
<viz:position x="703.83575" y="7840.1553"/>
<viz:color r="0" g="189" b="148"/>
</node>
<node id="519" label="519">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="18"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3756631867191511"/>
<attvalue for="harmonicclosnesscentrality" value="0.39088838268794074"/>
<attvalue for="betweenesscentrality" value="0.0027710658734453617"/>
<attvalue for="modularity_class" value="13"/>
<attvalue for="pageranks" value="8.401059964419921E-4"/>
</attvalues>
<viz:size value="76.02549"/>
<viz:position x="758.2781" y="7275.395"/>
<viz:color r="255" g="85" b="132"/>
</node>
<node id="520" label="520">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3751495470859682"/>
<attvalue for="harmonicclosnesscentrality" value="0.3896735003796644"/>
<attvalue for="betweenesscentrality" value="0.0012289341782488453"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="4.262244453528688E-4"/>
</attvalues>
<viz:size value="56.705204"/>
<viz:position x="-9272.503" y="-1122.1584"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="521" label="521">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="20"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37579181646978255"/>
<attvalue for="harmonicclosnesscentrality" value="0.3911921032650098"/>
<attvalue for="betweenesscentrality" value="0.002168513507415415"/>
<attvalue for="modularity_class" value="8"/>
<attvalue for="pageranks" value="7.878820999207404E-4"/>
</attvalues>
<viz:size value="73.58765"/>
<viz:position x="-5866.317" y="-8574.301"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="522" label="522">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="97"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3786441262722098"/>
<attvalue for="harmonicclosnesscentrality" value="0.39787395596052955"/>
<attvalue for="betweenesscentrality" value="0.01224279663448007"/>
<attvalue for="modularity_class" value="10"/>
<attvalue for="pageranks" value="0.0031115630372593366"/>
</attvalues>
<viz:size value="182.05875"/>
<viz:position x="-24218.75" y="15490.914"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="523" label="523">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="12"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3751495470859682"/>
<attvalue for="harmonicclosnesscentrality" value="0.3896735003796644"/>
<attvalue for="betweenesscentrality" value="9.696083348030457E-4"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="3.673909767543005E-4"/>
</attvalues>
<viz:size value="53.958813"/>
<viz:position x="-11876.994" y="-1541.583"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="524" label="524">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="18"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37579181646978255"/>
<attvalue for="harmonicclosnesscentrality" value="0.3911921032650098"/>
<attvalue for="betweenesscentrality" value="0.0021284834059524618"/>
<attvalue for="modularity_class" value="7"/>
<attvalue for="pageranks" value="8.230070423188979E-4"/>
</attvalues>
<viz:size value="75.2273"/>
<viz:position x="-586.1464" y="-13803.325"/>
<viz:color r="255" g="136" b="5"/>
</node>
<node id="525" label="525">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="13"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37579181646978255"/>
<attvalue for="harmonicclosnesscentrality" value="0.3911921032650098"/>
<attvalue for="betweenesscentrality" value="0.0022191990505033257"/>
<attvalue for="modularity_class" value="8"/>
<attvalue for="pageranks" value="8.185755756334456E-4"/>
</attvalues>
<viz:size value="75.02044"/>
<viz:position x="-1409.1515" y="-11966.885"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="526" label="526">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567335"/>
<attvalue for="betweenesscentrality" value="0.0012806615852121092"/>
<attvalue for="modularity_class" value="7"/>
<attvalue for="pageranks" value="5.009776206473752E-4"/>
</attvalues>
<viz:size value="60.194733"/>
<viz:position x="-962.31616" y="-13082.705"/>
<viz:color r="255" g="136" b="5"/>
</node>
<node id="527" label="527">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567335"/>
<attvalue for="betweenesscentrality" value="0.0012806615852121092"/>
<attvalue for="modularity_class" value="7"/>
<attvalue for="pageranks" value="5.009776206473752E-4"/>
</attvalues>
<viz:size value="60.194733"/>
<viz:position x="-942.59033" y="-13226.227"/>
<viz:color r="255" g="136" b="5"/>
</node>
<node id="528" label="528">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567335"/>
<attvalue for="betweenesscentrality" value="0.0012806615852121092"/>
<attvalue for="modularity_class" value="7"/>
<attvalue for="pageranks" value="5.009776206473752E-4"/>
</attvalues>
<viz:size value="60.194733"/>
<viz:position x="-1004.1604" y="-13183.717"/>
<viz:color r="255" g="136" b="5"/>
</node>
<node id="529" label="529">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3751495470859682"/>
<attvalue for="harmonicclosnesscentrality" value="0.3896735003796644"/>
<attvalue for="betweenesscentrality" value="0.001141414702417489"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="4.072204371447268E-4"/>
</attvalues>
<viz:size value="55.81808"/>
<viz:position x="-7858.435" y="-144.92537"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="530" label="530">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="5"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3748932536293766"/>
<attvalue for="harmonicclosnesscentrality" value="0.38906605922552623"/>
<attvalue for="betweenesscentrality" value="1.511685573296703E-5"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="1.4787059146627389E-4"/>
</attvalues>
<viz:size value="43.711445"/>
<viz:position x="2108.103" y="-1351.0889"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="531" label="531">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380255"/>
<attvalue for="betweenesscentrality" value="0.0013156479764074318"/>
<attvalue for="modularity_class" value="7"/>
<attvalue for="pageranks" value="5.539469122542524E-4"/>
</attvalues>
<viz:size value="62.66738"/>
<viz:position x="-1065.2482" y="-14544.132"/>
<viz:color r="255" g="136" b="5"/>
</node>
<node id="532" label="532">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="9"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37553464499572287"/>
<attvalue for="harmonicclosnesscentrality" value="0.39058466211087167"/>
<attvalue for="betweenesscentrality" value="0.0017744500713136662"/>
<attvalue for="modularity_class" value="7"/>
<attvalue for="pageranks" value="6.514321760590551E-4"/>
</attvalues>
<viz:size value="67.21806"/>
<viz:position x="-2927.102" y="-13617.582"/>
<viz:color r="255" g="136" b="5"/>
</node>
<node id="533" label="533">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380255"/>
<attvalue for="betweenesscentrality" value="7.749497404451829E-4"/>
<attvalue for="modularity_class" value="2"/>
<attvalue for="pageranks" value="5.067675996417568E-4"/>
</attvalues>
<viz:size value="60.46502"/>
<viz:position x="13998.373" y="-7189.3535"/>
<viz:color r="211" g="179" b="176"/>
</node>
<node id="534" label="534">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="20"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37682403433476397"/>
<attvalue for="harmonicclosnesscentrality" value="0.39362186788156245"/>
<attvalue for="betweenesscentrality" value="0.004735527035040309"/>
<attvalue for="modularity_class" value="13"/>
<attvalue for="pageranks" value="0.0014594724639991489"/>
</attvalues>
<viz:size value="104.937965"/>
<viz:position x="-6094.777" y="257.39166"/>
<viz:color r="255" g="85" b="132"/>
</node>
<node id="535" label="535">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380255"/>
<attvalue for="betweenesscentrality" value="0.0017031869588741188"/>
<attvalue for="modularity_class" value="13"/>
<attvalue for="pageranks" value="5.647235468107356E-4"/>
</attvalues>
<viz:size value="63.17044"/>
<viz:position x="-1940.8904" y="-10146.612"/>
<viz:color r="255" g="85" b="132"/>
</node>
<node id="536" label="536">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37553464499572287"/>
<attvalue for="harmonicclosnesscentrality" value="0.39058466211087167"/>
<attvalue for="betweenesscentrality" value="0.0011909402561245108"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="6.09478599497615E-4"/>
</attvalues>
<viz:size value="65.25964"/>
<viz:position x="9602.155" y="7671.6343"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="537" label="537">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="9"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567335"/>
<attvalue for="betweenesscentrality" value="0.0014142061930307297"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="4.716605798398844E-4"/>
</attvalues>
<viz:size value="58.826195"/>
<viz:position x="-570.3471" y="-511.87134"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="538" label="538">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="9"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380255"/>
<attvalue for="betweenesscentrality" value="0.0015941277897843365"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="5.514578841326774E-4"/>
</attvalues>
<viz:size value="62.55119"/>
<viz:position x="2383.5022" y="-3132.821"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="539" label="539">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37553464499572287"/>
<attvalue for="harmonicclosnesscentrality" value="0.39058466211087167"/>
<attvalue for="betweenesscentrality" value="0.0012169862867334896"/>
<attvalue for="modularity_class" value="15"/>
<attvalue for="pageranks" value="5.804637303832308E-4"/>
</attvalues>
<viz:size value="63.905205"/>
<viz:position x="-4908.56" y="7420.5234"/>
<viz:color r="76" g="70" b="62"/>
</node>
<node id="540" label="540">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37553464499572287"/>
<attvalue for="harmonicclosnesscentrality" value="0.39058466211087167"/>
<attvalue for="betweenesscentrality" value="0.001190940256124511"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="6.09478599497615E-4"/>
</attvalues>
<viz:size value="65.25964"/>
<viz:position x="9499.026" y="7585.1206"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="541" label="541">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="4"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3748932536293766"/>
<attvalue for="harmonicclosnesscentrality" value="0.38906605922552623"/>
<attvalue for="betweenesscentrality" value="9.111617312072891E-4"/>
<attvalue for="modularity_class" value="7"/>
<attvalue for="pageranks" value="4.557573547843128E-4"/>
</attvalues>
<viz:size value="58.08382"/>
<viz:position x="306.69177" y="1392.775"/>
<viz:color r="255" g="136" b="5"/>
</node>
<node id="542" label="542">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="9"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3751495470859682"/>
<attvalue for="harmonicclosnesscentrality" value="0.3896735003796644"/>
<attvalue for="betweenesscentrality" value="9.719510870326153E-4"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="4.017281619204016E-4"/>
</attvalues>
<viz:size value="55.5617"/>
<viz:position x="9006.787" y="4874.7036"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="543" label="543">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380255"/>
<attvalue for="betweenesscentrality" value="0.00193647204545567"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="6.460041276570671E-4"/>
</attvalues>
<viz:size value="66.964676"/>
<viz:position x="-9455.302" y="-1124.2675"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="544" label="544">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567335"/>
<attvalue for="betweenesscentrality" value="0.0013042261470929996"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="4.958912755919748E-4"/>
</attvalues>
<viz:size value="59.957302"/>
<viz:position x="-9852.165" y="-861.00165"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="545" label="545">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="18"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37643628880123475"/>
<attvalue for="harmonicclosnesscentrality" value="0.3927107061503552"/>
<attvalue for="betweenesscentrality" value="0.0032313527686471297"/>
<attvalue for="modularity_class" value="10"/>
<attvalue for="pageranks" value="0.001163605607241198"/>
</attvalues>
<viz:size value="91.126686"/>
<viz:position x="-7005.2485" y="-2367.93"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="546" label="546">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567335"/>
<attvalue for="betweenesscentrality" value="0.0010639324553200619"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="4.6010571573100234E-4"/>
</attvalues>
<viz:size value="58.286804"/>
<viz:position x="-10047.238" y="-1675.93"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="547" label="547">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567335"/>
<attvalue for="betweenesscentrality" value="0.0011590825190101852"/>
<attvalue for="modularity_class" value="15"/>
<attvalue for="pageranks" value="4.5484357842298954E-4"/>
</attvalues>
<viz:size value="58.041164"/>
<viz:position x="-5641.3345" y="6980.7383"/>
<viz:color r="76" g="70" b="62"/>
</node>
<node id="548" label="548">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37553464499572287"/>
<attvalue for="harmonicclosnesscentrality" value="0.39058466211087167"/>
<attvalue for="betweenesscentrality" value="0.0020696803097586826"/>
<attvalue for="modularity_class" value="2"/>
<attvalue for="pageranks" value="6.575466644108452E-4"/>
</attvalues>
<viz:size value="67.503494"/>
<viz:position x="-2868.3071" y="3917.7283"/>
<viz:color r="211" g="179" b="176"/>
</node>
<node id="549" label="549">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="21"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3756631867191511"/>
<attvalue for="harmonicclosnesscentrality" value="0.39088838268794074"/>
<attvalue for="betweenesscentrality" value="0.0024526445617237347"/>
<attvalue for="modularity_class" value="0"/>
<attvalue for="pageranks" value="7.893568917292103E-4"/>
</attvalues>
<viz:size value="73.65649"/>
<viz:position x="9791.003" y="11481.107"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="550" label="550">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567335"/>
<attvalue for="betweenesscentrality" value="0.001063932455320062"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="4.6010571573100234E-4"/>
</attvalues>
<viz:size value="58.286804"/>
<viz:position x="-10077.261" y="-1786.6365"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="551" label="551">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567335"/>
<attvalue for="betweenesscentrality" value="0.0012986439904614784"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="4.8263228380046586E-4"/>
</attvalues>
<viz:size value="59.338364"/>
<viz:position x="-12124.444" y="-2140.989"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="552" label="552">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380255"/>
<attvalue for="betweenesscentrality" value="0.0012221235705252116"/>
<attvalue for="modularity_class" value="2"/>
<attvalue for="pageranks" value="5.302489873169466E-4"/>
</attvalues>
<viz:size value="61.561142"/>
<viz:position x="13002.11" y="-8193.946"/>
<viz:color r="211" g="179" b="176"/>
</node>
<node id="553" label="553">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="12"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37604934041459653"/>
<attvalue for="harmonicclosnesscentrality" value="0.391799544419148"/>
<attvalue for="betweenesscentrality" value="0.0028755480168281187"/>
<attvalue for="modularity_class" value="2"/>
<attvalue for="pageranks" value="9.941958622954818E-4"/>
</attvalues>
<viz:size value="83.21852"/>
<viz:position x="15388.931" y="-7018.641"/>
<viz:color r="211" g="179" b="176"/>
</node>
<node id="554" label="554">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="12"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37604934041459653"/>
<attvalue for="harmonicclosnesscentrality" value="0.391799544419148"/>
<attvalue for="betweenesscentrality" value="0.0028755480168281187"/>
<attvalue for="modularity_class" value="2"/>
<attvalue for="pageranks" value="9.941958622954818E-4"/>
</attvalues>
<viz:size value="83.21852"/>
<viz:position x="15406.3955" y="-6878.673"/>
<viz:color r="211" g="179" b="176"/>
</node>
<node id="555" label="555">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567335"/>
<attvalue for="betweenesscentrality" value="0.0011590825190101852"/>
<attvalue for="modularity_class" value="15"/>
<attvalue for="pageranks" value="4.5484357842298954E-4"/>
</attvalues>
<viz:size value="58.041164"/>
<viz:position x="-5707.096" y="6933.9883"/>
<viz:color r="76" g="70" b="62"/>
</node>
<node id="556" label="556">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="15"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3759205343380716"/>
<attvalue for="harmonicclosnesscentrality" value="0.39149582384207887"/>
<attvalue for="betweenesscentrality" value="0.0021719182782904208"/>
<attvalue for="modularity_class" value="16"/>
<attvalue for="pageranks" value="8.548074987174012E-4"/>
</attvalues>
<viz:size value="76.71177"/>
<viz:position x="3359.0134" y="-11691.467"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="557" label="557">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="11"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37553464499572287"/>
<attvalue for="harmonicclosnesscentrality" value="0.39058466211087167"/>
<attvalue for="betweenesscentrality" value="0.0014323900285564768"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="5.913584474707861E-4"/>
</attvalues>
<viz:size value="64.41378"/>
<viz:position x="-16293.333" y="1430.0306"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="558" label="558">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="22"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3751495470859682"/>
<attvalue for="harmonicclosnesscentrality" value="0.3896735003796644"/>
<attvalue for="betweenesscentrality" value="0.00142523576594289"/>
<attvalue for="modularity_class" value="8"/>
<attvalue for="pageranks" value="4.5109229623739653E-4"/>
</attvalues>
<viz:size value="57.86605"/>
<viz:position x="5357.627" y="-13528.099"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="559" label="559">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380255"/>
<attvalue for="betweenesscentrality" value="0.0010992263261056293"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="5.591148186257071E-4"/>
</attvalues>
<viz:size value="62.908623"/>
<viz:position x="3298.631" y="7222.2236"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="560" label="560">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3750213565692807"/>
<attvalue for="harmonicclosnesscentrality" value="0.38936977980259535"/>
<attvalue for="betweenesscentrality" value="0.001354782792447672"/>
<attvalue for="modularity_class" value="4"/>
<attvalue for="pageranks" value="4.514822271392604E-4"/>
</attvalues>
<viz:size value="57.884254"/>
<viz:position x="861.00305" y="5852.5815"/>
<viz:color r="0" g="189" b="148"/>
</node>
<node id="561" label="561">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380255"/>
<attvalue for="betweenesscentrality" value="0.002062648444777377"/>
<attvalue for="modularity_class" value="4"/>
<attvalue for="pageranks" value="7.103598340236787E-4"/>
</attvalues>
<viz:size value="69.96885"/>
<viz:position x="712.9373" y="7461.2095"/>
<viz:color r="0" g="189" b="148"/>
</node>
<node id="562" label="562">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3751495470859682"/>
<attvalue for="harmonicclosnesscentrality" value="0.3896735003796644"/>
<attvalue for="betweenesscentrality" value="0.0013820294515545163"/>
<attvalue for="modularity_class" value="5"/>
<attvalue for="pageranks" value="4.37903490222828E-4"/>
</attvalues>
<viz:size value="57.25039"/>
<viz:position x="4009.5022" y="2830.7417"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="563" label="563">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3748932536293766"/>
<attvalue for="harmonicclosnesscentrality" value="0.38906605922552623"/>
<attvalue for="betweenesscentrality" value="9.111617312072891E-4"/>
<attvalue for="modularity_class" value="7"/>
<attvalue for="pageranks" value="4.557573547843128E-4"/>
</attvalues>
<viz:size value="58.08382"/>
<viz:position x="769.43896" y="1646.2979"/>
<viz:color r="255" g="136" b="5"/>
</node>
<node id="564" label="564">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3748932536293766"/>
<attvalue for="harmonicclosnesscentrality" value="0.38906605922552623"/>
<attvalue for="betweenesscentrality" value="9.111617312072891E-4"/>
<attvalue for="modularity_class" value="7"/>
<attvalue for="pageranks" value="4.557573547843128E-4"/>
</attvalues>
<viz:size value="58.08382"/>
<viz:position x="436.30322" y="1480.4178"/>
<viz:color r="255" g="136" b="5"/>
</node>
<node id="565" label="565">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="24"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3770829754337743"/>
<attvalue for="harmonicclosnesscentrality" value="0.3942293090357006"/>
<attvalue for="betweenesscentrality" value="0.003619921501525382"/>
<attvalue for="modularity_class" value="13"/>
<attvalue for="pageranks" value="0.0015478440885774237"/>
</attvalues>
<viz:size value="109.063225"/>
<viz:position x="-3456.9353" y="1130.4471"/>
<viz:color r="255" g="85" b="132"/>
</node>
<node id="566" label="566">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380255"/>
<attvalue for="betweenesscentrality" value="0.0015660246737714387"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="5.642342002897039E-4"/>
</attvalues>
<viz:size value="63.1476"/>
<viz:position x="3039.8037" y="-2535.695"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="567" label="567">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="11"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37553464499572287"/>
<attvalue for="harmonicclosnesscentrality" value="0.39058466211087167"/>
<attvalue for="betweenesscentrality" value="0.001758127139886367"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="6.443789225384338E-4"/>
</attvalues>
<viz:size value="66.88881"/>
<viz:position x="3861.9756" y="-3231.4058"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="568" label="568">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="10"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37553464499572287"/>
<attvalue for="harmonicclosnesscentrality" value="0.39058466211087167"/>
<attvalue for="betweenesscentrality" value="0.0017031723328283148"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="6.486678667583487E-4"/>
</attvalues>
<viz:size value="67.08902"/>
<viz:position x="4704.2905" y="-1411.178"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="569" label="569">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="4"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3748932536293766"/>
<attvalue for="harmonicclosnesscentrality" value="0.38906605922552623"/>
<attvalue for="betweenesscentrality" value="1.511685573296703E-5"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="1.4787059146627389E-4"/>
</attvalues>
<viz:size value="43.711445"/>
<viz:position x="2082.85" y="-1218.5737"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="570" label="570">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="21"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3765654486189741"/>
<attvalue for="harmonicclosnesscentrality" value="0.39301442672742426"/>
<attvalue for="betweenesscentrality" value="0.0039396171281172775"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="0.00130016348741602"/>
</attvalues>
<viz:size value="97.50131"/>
<viz:position x="328.94043" y="-6869.7695"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="571" label="571">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="9"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3751495470859682"/>
<attvalue for="harmonicclosnesscentrality" value="0.3896735003796644"/>
<attvalue for="betweenesscentrality" value="0.0010145355038240214"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="3.905584140812428E-4"/>
</attvalues>
<viz:size value="55.040287"/>
<viz:position x="8506.386" y="5840.991"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="572" label="572">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567335"/>
<attvalue for="betweenesscentrality" value="0.0010633958426183728"/>
<attvalue for="modularity_class" value="7"/>
<attvalue for="pageranks" value="4.711476178085235E-4"/>
</attvalues>
<viz:size value="58.80225"/>
<viz:position x="-1360.1339" y="-13665.821"/>
<viz:color r="255" g="136" b="5"/>
</node>
<node id="573" label="573">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="20"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37604934041459653"/>
<attvalue for="harmonicclosnesscentrality" value="0.391799544419148"/>
<attvalue for="betweenesscentrality" value="0.0031429844758967857"/>
<attvalue for="modularity_class" value="13"/>
<attvalue for="pageranks" value="9.953237158293988E-4"/>
</attvalues>
<viz:size value="83.271164"/>
<viz:position x="3355.6958" y="-10188.248"/>
<viz:color r="255" g="85" b="132"/>
</node>
<node id="574" label="574">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="17"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37604934041459653"/>
<attvalue for="harmonicclosnesscentrality" value="0.391799544419148"/>
<attvalue for="betweenesscentrality" value="0.0027226832673615427"/>
<attvalue for="modularity_class" value="13"/>
<attvalue for="pageranks" value="9.595420694084059E-4"/>
</attvalues>
<viz:size value="81.60085"/>
<viz:position x="-2126.8203" y="-3910.1353"/>
<viz:color r="255" g="85" b="132"/>
</node>
<node id="575" label="575">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="20"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3759205343380716"/>
<attvalue for="harmonicclosnesscentrality" value="0.39149582384207887"/>
<attvalue for="betweenesscentrality" value="0.0033343090383124913"/>
<attvalue for="modularity_class" value="13"/>
<attvalue for="pageranks" value="9.487654244223737E-4"/>
</attvalues>
<viz:size value="81.09779"/>
<viz:position x="-2116.6682" y="-6235.7505"/>
<viz:color r="255" g="85" b="132"/>
</node>
<node id="576" label="576">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="18"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3756631867191511"/>
<attvalue for="harmonicclosnesscentrality" value="0.39088838268794074"/>
<attvalue for="betweenesscentrality" value="0.0014753902484568426"/>
<attvalue for="modularity_class" value="13"/>
<attvalue for="pageranks" value="6.406087166104695E-4"/>
</attvalues>
<viz:size value="66.712814"/>
<viz:position x="-2324.9688" y="-1602.3121"/>
<viz:color r="255" g="85" b="132"/>
</node>
<node id="577" label="577">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380255"/>
<attvalue for="betweenesscentrality" value="0.0011312122977957355"/>
<attvalue for="modularity_class" value="15"/>
<attvalue for="pageranks" value="4.930472641595095E-4"/>
</attvalues>
<viz:size value="59.82454"/>
<viz:position x="-4774.3433" y="7030.831"/>
<viz:color r="76" g="70" b="62"/>
</node>
<node id="578" label="578">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="9"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37553464499572287"/>
<attvalue for="harmonicclosnesscentrality" value="0.39058466211087167"/>
<attvalue for="betweenesscentrality" value="0.0019865220379992276"/>
<attvalue for="modularity_class" value="5"/>
<attvalue for="pageranks" value="6.619742779037284E-4"/>
</attvalues>
<viz:size value="67.710175"/>
<viz:position x="-3723.2827" y="-5943.8325"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="579" label="579">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567335"/>
<attvalue for="betweenesscentrality" value="0.0018424682762465964"/>
<attvalue for="modularity_class" value="5"/>
<attvalue for="pageranks" value="5.448693948830254E-4"/>
</attvalues>
<viz:size value="62.243637"/>
<viz:position x="-3209.262" y="-1105.538"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="580" label="580">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3751495470859682"/>
<attvalue for="harmonicclosnesscentrality" value="0.3896735003796644"/>
<attvalue for="betweenesscentrality" value="0.0014086188080869046"/>
<attvalue for="modularity_class" value="5"/>
<attvalue for="pageranks" value="5.092103169466629E-4"/>
</attvalues>
<viz:size value="60.579044"/>
<viz:position x="-814.5703" y="196.60867"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="581" label="581">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="17"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37553464499572287"/>
<attvalue for="harmonicclosnesscentrality" value="0.39058466211087167"/>
<attvalue for="betweenesscentrality" value="0.002106337181277907"/>
<attvalue for="modularity_class" value="13"/>
<attvalue for="pageranks" value="6.670460621124184E-4"/>
</attvalues>
<viz:size value="67.94693"/>
<viz:position x="-2293.8442" y="-6539.658"/>
<viz:color r="255" g="85" b="132"/>
</node>
<node id="582" label="582">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="17"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380255"/>
<attvalue for="betweenesscentrality" value="0.0024515215970342097"/>
<attvalue for="modularity_class" value="0"/>
<attvalue for="pageranks" value="6.771690514872732E-4"/>
</attvalues>
<viz:size value="68.41948"/>
<viz:position x="12210.37" y="12796.716"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="583" label="583">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3750213565692807"/>
<attvalue for="harmonicclosnesscentrality" value="0.38936977980259535"/>
<attvalue for="betweenesscentrality" value="9.460172878531674E-4"/>
<attvalue for="modularity_class" value="1"/>
<attvalue for="pageranks" value="3.567692930267632E-4"/>
</attvalues>
<viz:size value="53.462986"/>
<viz:position x="4811.2466" y="2137.5974"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="584" label="584">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="5"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3751495470859682"/>
<attvalue for="harmonicclosnesscentrality" value="0.3896735003796644"/>
<attvalue for="betweenesscentrality" value="0.0011739281220678223"/>
<attvalue for="modularity_class" value="1"/>
<attvalue for="pageranks" value="4.7612519676197763E-4"/>
</attvalues>
<viz:size value="59.034607"/>
<viz:position x="4931.478" y="2015.4581"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="585" label="585">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3751495470859682"/>
<attvalue for="harmonicclosnesscentrality" value="0.3896735003796644"/>
<attvalue for="betweenesscentrality" value="9.946813936482135E-4"/>
<attvalue for="modularity_class" value="10"/>
<attvalue for="pageranks" value="3.798152300268213E-4"/>
</attvalues>
<viz:size value="54.538788"/>
<viz:position x="-11333.179" y="-1272.3329"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="586" label="586">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="11"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37579181646978255"/>
<attvalue for="harmonicclosnesscentrality" value="0.3911921032650098"/>
<attvalue for="betweenesscentrality" value="0.0021667637862752337"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="7.673789329214313E-4"/>
</attvalues>
<viz:size value="72.63054"/>
<viz:position x="-17424.168" y="1710.4337"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="587" label="587">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="10"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37553464499572287"/>
<attvalue for="harmonicclosnesscentrality" value="0.39058466211087167"/>
<attvalue for="betweenesscentrality" value="0.0018503509002633209"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="6.73552107767094E-4"/>
</attvalues>
<viz:size value="68.25064"/>
<viz:position x="10269.221" y="8733.567"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="588" label="588">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380255"/>
<attvalue for="betweenesscentrality" value="0.0021285396563049622"/>
<attvalue for="modularity_class" value="16"/>
<attvalue for="pageranks" value="6.618482315352001E-4"/>
</attvalues>
<viz:size value="67.70429"/>
<viz:position x="10853.713" y="4337.7607"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="589" label="589">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567335"/>
<attvalue for="betweenesscentrality" value="0.0017323231007408903"/>
<attvalue for="modularity_class" value="17"/>
<attvalue for="pageranks" value="6.163444364931374E-4"/>
</attvalues>
<viz:size value="65.58014"/>
<viz:position x="3422.1948" y="13136.163"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="590" label="590">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37553464499572287"/>
<attvalue for="harmonicclosnesscentrality" value="0.39058466211087167"/>
<attvalue for="betweenesscentrality" value="0.0011573441585868644"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="6.073097425637008E-4"/>
</attvalues>
<viz:size value="65.158394"/>
<viz:position x="9602.366" y="8101.939"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="591" label="591">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="9"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3756631867191511"/>
<attvalue for="harmonicclosnesscentrality" value="0.39088838268794074"/>
<attvalue for="betweenesscentrality" value="0.0022162126586946188"/>
<attvalue for="modularity_class" value="0"/>
<attvalue for="pageranks" value="7.357405639969173E-4"/>
</attvalues>
<viz:size value="71.15364"/>
<viz:position x="10147.898" y="14237.618"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="592" label="592">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567335"/>
<attvalue for="betweenesscentrality" value="0.001530813312619614"/>
<attvalue for="modularity_class" value="7"/>
<attvalue for="pageranks" value="5.463128931675312E-4"/>
</attvalues>
<viz:size value="62.31102"/>
<viz:position x="-1735.904" y="-12974.639"/>
<viz:color r="255" g="136" b="5"/>
</node>
<node id="593" label="593">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="35"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37579181646978255"/>
<attvalue for="harmonicclosnesscentrality" value="0.3911921032650098"/>
<attvalue for="betweenesscentrality" value="0.0033486171911379947"/>
<attvalue for="modularity_class" value="8"/>
<attvalue for="pageranks" value="9.303408456150854E-4"/>
</attvalues>
<viz:size value="80.23772"/>
<viz:position x="-17396.951" y="-12897.808"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="594" label="594">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567335"/>
<attvalue for="betweenesscentrality" value="0.001063932455320062"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="4.6010571573100234E-4"/>
</attvalues>
<viz:size value="58.286804"/>
<viz:position x="-9921.141" y="-1854.6266"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="595" label="595">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="43"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3765654486189741"/>
<attvalue for="harmonicclosnesscentrality" value="0.39301442672742426"/>
<attvalue for="betweenesscentrality" value="0.0052547500166675995"/>
<attvalue for="modularity_class" value="8"/>
<attvalue for="pageranks" value="0.0014891219581761122"/>
</attvalues>
<viz:size value="106.32202"/>
<viz:position x="-18364.357" y="-13227.265"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="596" label="596">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567335"/>
<attvalue for="betweenesscentrality" value="0.001063932455320062"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="4.6010571573100234E-4"/>
</attvalues>
<viz:size value="58.286804"/>
<viz:position x="-10110.837" y="-1862.2012"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="597" label="597">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="9"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3750213565692807"/>
<attvalue for="harmonicclosnesscentrality" value="0.38936977980259535"/>
<attvalue for="betweenesscentrality" value="9.27213135452783E-4"/>
<attvalue for="modularity_class" value="4"/>
<attvalue for="pageranks" value="3.470255991667876E-4"/>
</attvalues>
<viz:size value="53.008144"/>
<viz:position x="251.46002" y="8304.3955"/>
<viz:color r="0" g="189" b="148"/>
</node>
<node id="598" label="598">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37553464499572287"/>
<attvalue for="harmonicclosnesscentrality" value="0.39058466211087167"/>
<attvalue for="betweenesscentrality" value="0.0020455061880214273"/>
<attvalue for="modularity_class" value="0"/>
<attvalue for="pageranks" value="7.562065398711304E-4"/>
</attvalues>
<viz:size value="72.10901"/>
<viz:position x="4313.596" y="12777.234"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="599" label="599">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="20"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3759205343380716"/>
<attvalue for="harmonicclosnesscentrality" value="0.39149582384207887"/>
<attvalue for="betweenesscentrality" value="0.00363314381603354"/>
<attvalue for="modularity_class" value="0"/>
<attvalue for="pageranks" value="0.0010964042261454793"/>
</attvalues>
<viz:size value="87.989685"/>
<viz:position x="13679.967" y="6350.516"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="600" label="600">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="9"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380255"/>
<attvalue for="betweenesscentrality" value="0.002358595325237353"/>
<attvalue for="modularity_class" value="0"/>
<attvalue for="pageranks" value="6.924768659824911E-4"/>
</attvalues>
<viz:size value="69.134056"/>
<viz:position x="13832.605" y="6439.4116"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="601" label="601">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3750213565692807"/>
<attvalue for="harmonicclosnesscentrality" value="0.38936977980259535"/>
<attvalue for="betweenesscentrality" value="9.796509198855875E-4"/>
<attvalue for="modularity_class" value="3"/>
<attvalue for="pageranks" value="3.675637724057787E-4"/>
</attvalues>
<viz:size value="53.96688"/>
<viz:position x="5719.3184" y="-930.5947"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="602" label="602">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380255"/>
<attvalue for="betweenesscentrality" value="0.0016047405338873077"/>
<attvalue for="modularity_class" value="7"/>
<attvalue for="pageranks" value="6.228979770117217E-4"/>
</attvalues>
<viz:size value="65.88606"/>
<viz:position x="-1699.0599" y="-13090.499"/>
<viz:color r="255" g="136" b="5"/>
</node>
<node id="603" label="603">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="11"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3759205343380716"/>
<attvalue for="harmonicclosnesscentrality" value="0.39149582384207887"/>
<attvalue for="betweenesscentrality" value="0.0018215344210451173"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="8.800134310195578E-4"/>
</attvalues>
<viz:size value="77.8884"/>
<viz:position x="9786.681" y="8926.47"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="604" label="604">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="9"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3756631867191511"/>
<attvalue for="harmonicclosnesscentrality" value="0.39088838268794074"/>
<attvalue for="betweenesscentrality" value="0.0013904063662446486"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="6.960102285282937E-4"/>
</attvalues>
<viz:size value="69.298996"/>
<viz:position x="10041.803" y="8292.628"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="605" label="605">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="10"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3751495470859682"/>
<attvalue for="harmonicclosnesscentrality" value="0.3896735003796644"/>
<attvalue for="betweenesscentrality" value="0.0014331356916440327"/>
<attvalue for="modularity_class" value="17"/>
<attvalue for="pageranks" value="5.072190821779066E-4"/>
</attvalues>
<viz:size value="60.48609"/>
<viz:position x="3365.3027" y="12848.566"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="606" label="606">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="12"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380255"/>
<attvalue for="betweenesscentrality" value="0.001386377012997963"/>
<attvalue for="modularity_class" value="5"/>
<attvalue for="pageranks" value="5.503340496512493E-4"/>
</attvalues>
<viz:size value="62.49873"/>
<viz:position x="-3095.8845" y="-3711.6326"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="607" label="607">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380255"/>
<attvalue for="betweenesscentrality" value="0.001176085915098155"/>
<attvalue for="modularity_class" value="7"/>
<attvalue for="pageranks" value="5.515335547228428E-4"/>
</attvalues>
<viz:size value="62.55472"/>
<viz:position x="-1136.7164" y="-14049.67"/>
<viz:color r="255" g="136" b="5"/>
</node>
<node id="608" label="608">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380255"/>
<attvalue for="betweenesscentrality" value="0.0012639077320917508"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="5.212311199044729E-4"/>
</attvalues>
<viz:size value="61.140182"/>
<viz:position x="-525.33575" y="-766.8637"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="609" label="609">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="20"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37604934041459653"/>
<attvalue for="harmonicclosnesscentrality" value="0.391799544419148"/>
<attvalue for="betweenesscentrality" value="0.0016767813565886107"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="8.693022894290787E-4"/>
</attvalues>
<viz:size value="77.3884"/>
<viz:position x="-911.75287" y="-997.44226"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="610" label="610">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="30"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37579181646978255"/>
<attvalue for="harmonicclosnesscentrality" value="0.3911921032650098"/>
<attvalue for="betweenesscentrality" value="0.0029989765986963707"/>
<attvalue for="modularity_class" value="15"/>
<attvalue for="pageranks" value="9.184474469007955E-4"/>
</attvalues>
<viz:size value="79.682526"/>
<viz:position x="-9335.588" y="15278.773"/>
<viz:color r="76" g="70" b="62"/>
</node>
<node id="611" label="611">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="11"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3756631867191511"/>
<attvalue for="harmonicclosnesscentrality" value="0.39088838268794074"/>
<attvalue for="betweenesscentrality" value="0.0026929481160049363"/>
<attvalue for="modularity_class" value="15"/>
<attvalue for="pageranks" value="8.167325181685353E-4"/>
</attvalues>
<viz:size value="74.9344"/>
<viz:position x="-9221.59" y="15073.367"/>
<viz:color r="76" g="70" b="62"/>
</node>
<node id="612" label="612">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="17"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37553464499572287"/>
<attvalue for="harmonicclosnesscentrality" value="0.39058466211087167"/>
<attvalue for="betweenesscentrality" value="0.0015947352554297287"/>
<attvalue for="modularity_class" value="15"/>
<attvalue for="pageranks" value="6.182596362707618E-4"/>
</attvalues>
<viz:size value="65.66954"/>
<viz:position x="-5165.4097" y="3640.0217"/>
<viz:color r="76" g="70" b="62"/>
</node>
<node id="613" label="613">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380255"/>
<attvalue for="betweenesscentrality" value="0.001656706354164296"/>
<attvalue for="modularity_class" value="15"/>
<attvalue for="pageranks" value="5.731718326071144E-4"/>
</attvalues>
<viz:size value="63.56481"/>
<viz:position x="-6784.3413" y="9535.862"/>
<viz:color r="76" g="70" b="62"/>
</node>
<node id="614" label="614">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567335"/>
<attvalue for="betweenesscentrality" value="0.0018854066396490647"/>
<attvalue for="modularity_class" value="17"/>
<attvalue for="pageranks" value="6.306593482453456E-4"/>
</attvalues>
<viz:size value="66.24837"/>
<viz:position x="3254.4495" y="13298.215"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="615" label="615">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3751495470859682"/>
<attvalue for="harmonicclosnesscentrality" value="0.3896735003796644"/>
<attvalue for="betweenesscentrality" value="0.0013723803139087918"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="4.51071331687824E-4"/>
</attvalues>
<viz:size value="57.865074"/>
<viz:position x="8330.611" y="5322.752"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="616" label="616">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="12"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567335"/>
<attvalue for="betweenesscentrality" value="0.001077493585448209"/>
<attvalue for="modularity_class" value="18"/>
<attvalue for="pageranks" value="4.3802728051679557E-4"/>
</attvalues>
<viz:size value="57.256165"/>
<viz:position x="-8312.27" y="-3036.0503"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="617" label="617">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="25"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3770829754337743"/>
<attvalue for="harmonicclosnesscentrality" value="0.3942293090357006"/>
<attvalue for="betweenesscentrality" value="0.006544520380900237"/>
<attvalue for="modularity_class" value="15"/>
<attvalue for="pageranks" value="0.0017602561653148354"/>
</attvalues>
<viz:size value="118.97877"/>
<viz:position x="-13203.014" y="12138.694"/>
<viz:color r="76" g="70" b="62"/>
</node>
<node id="618" label="618">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37553464499572287"/>
<attvalue for="harmonicclosnesscentrality" value="0.39058466211087167"/>
<attvalue for="betweenesscentrality" value="0.0021049434940693793"/>
<attvalue for="modularity_class" value="15"/>
<attvalue for="pageranks" value="6.72760227251055E-4"/>
</attvalues>
<viz:size value="68.21367"/>
<viz:position x="-7107.9517" y="9806.185"/>
<viz:color r="76" g="70" b="62"/>
</node>
<node id="619" label="619">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="13"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3759205343380716"/>
<attvalue for="harmonicclosnesscentrality" value="0.39149582384207887"/>
<attvalue for="betweenesscentrality" value="0.002903024987951213"/>
<attvalue for="modularity_class" value="15"/>
<attvalue for="pageranks" value="9.238575461635804E-4"/>
</attvalues>
<viz:size value="79.935074"/>
<viz:position x="-7217.1484" y="9536.354"/>
<viz:color r="76" g="70" b="62"/>
</node>
<node id="620" label="620">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="12"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3759205343380716"/>
<attvalue for="harmonicclosnesscentrality" value="0.39149582384207887"/>
<attvalue for="betweenesscentrality" value="0.002711322570087421"/>
<attvalue for="modularity_class" value="15"/>
<attvalue for="pageranks" value="8.834450121076502E-4"/>
</attvalues>
<viz:size value="78.048584"/>
<viz:position x="-5422.256" y="3667.9985"/>
<viz:color r="76" g="70" b="62"/>
</node>
<node id="621" label="621">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3751495470859682"/>
<attvalue for="harmonicclosnesscentrality" value="0.3896735003796644"/>
<attvalue for="betweenesscentrality" value="9.929690461422623E-4"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="3.7228246409082955E-4"/>
</attvalues>
<viz:size value="54.187153"/>
<viz:position x="9.471782" y="-728.0433"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="622" label="622">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380255"/>
<attvalue for="betweenesscentrality" value="0.001760742839065447"/>
<attvalue for="modularity_class" value="0"/>
<attvalue for="pageranks" value="6.470465756040676E-4"/>
</attvalues>
<viz:size value="67.013336"/>
<viz:position x="6637.892" y="12232.3125"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="623" label="623">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="84"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37553464499572287"/>
<attvalue for="harmonicclosnesscentrality" value="0.39058466211087167"/>
<attvalue for="betweenesscentrality" value="0.0025978399228341583"/>
<attvalue for="modularity_class" value="17"/>
<attvalue for="pageranks" value="8.200999164579844E-4"/>
</attvalues>
<viz:size value="75.0916"/>
<viz:position x="2780.379" y="13927.409"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="624" label="624">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="11"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37579181646978255"/>
<attvalue for="harmonicclosnesscentrality" value="0.3911921032650098"/>
<attvalue for="betweenesscentrality" value="0.002003662908451478"/>
<attvalue for="modularity_class" value="0"/>
<attvalue for="pageranks" value="8.06033798726023E-4"/>
</attvalues>
<viz:size value="74.43498"/>
<viz:position x="10336.147" y="14257.705"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="625" label="625">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="11"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3759205343380716"/>
<attvalue for="harmonicclosnesscentrality" value="0.39149582384207887"/>
<attvalue for="betweenesscentrality" value="0.002464137483016446"/>
<attvalue for="modularity_class" value="0"/>
<attvalue for="pageranks" value="9.114277622054109E-4"/>
</attvalues>
<viz:size value="79.35484"/>
<viz:position x="10679.102" y="14179.941"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="626" label="626">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37553464499572287"/>
<attvalue for="harmonicclosnesscentrality" value="0.39058466211087167"/>
<attvalue for="betweenesscentrality" value="0.002052346520296625"/>
<attvalue for="modularity_class" value="7"/>
<attvalue for="pageranks" value="7.03520537175787E-4"/>
</attvalues>
<viz:size value="69.64958"/>
<viz:position x="-5352.7993" y="-16075.354"/>
<viz:color r="255" g="136" b="5"/>
</node>
<node id="627" label="627">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="13"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37579181646978255"/>
<attvalue for="harmonicclosnesscentrality" value="0.3911921032650098"/>
<attvalue for="betweenesscentrality" value="0.002873293235250691"/>
<attvalue for="modularity_class" value="7"/>
<attvalue for="pageranks" value="8.950863444921637E-4"/>
</attvalues>
<viz:size value="78.59202"/>
<viz:position x="-1619.7046" y="-16345.385"/>
<viz:color r="255" g="136" b="5"/>
</node>
<node id="628" label="628">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37553464499572287"/>
<attvalue for="harmonicclosnesscentrality" value="0.39058466211087167"/>
<attvalue for="betweenesscentrality" value="0.002052346520296625"/>
<attvalue for="modularity_class" value="7"/>
<attvalue for="pageranks" value="7.03520537175787E-4"/>
</attvalues>
<viz:size value="69.64958"/>
<viz:position x="-5480.115" y="-16059.635"/>
<viz:color r="255" g="136" b="5"/>
</node>
<node id="629" label="629">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="5"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3751495470859682"/>
<attvalue for="harmonicclosnesscentrality" value="0.3896735003796644"/>
<attvalue for="betweenesscentrality" value="0.0013921496931794675"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="5.096307088277418E-4"/>
</attvalues>
<viz:size value="60.598667"/>
<viz:position x="3297.9653" y="3177.9282"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="630" label="630">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="9"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3756631867191511"/>
<attvalue for="harmonicclosnesscentrality" value="0.39088838268794074"/>
<attvalue for="betweenesscentrality" value="0.0018169766127802867"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="6.768671324006237E-4"/>
</attvalues>
<viz:size value="68.40539"/>
<viz:position x="-17207.031" y="1710.1678"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="631" label="631">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3751495470859682"/>
<attvalue for="harmonicclosnesscentrality" value="0.3896735003796644"/>
<attvalue for="betweenesscentrality" value="9.720436308389949E-4"/>
<attvalue for="modularity_class" value="10"/>
<attvalue for="pageranks" value="3.723930156904433E-4"/>
</attvalues>
<viz:size value="54.192314"/>
<viz:position x="-8305.927" y="-2609.6208"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="632" label="632">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="15"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37553464499572287"/>
<attvalue for="harmonicclosnesscentrality" value="0.39058466211087167"/>
<attvalue for="betweenesscentrality" value="0.0013611886969482519"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="5.779729040550189E-4"/>
</attvalues>
<viz:size value="63.788933"/>
<viz:position x="-17107.076" y="1447.8364"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="633" label="633">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="9"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37553464499572287"/>
<attvalue for="harmonicclosnesscentrality" value="0.39058466211087167"/>
<attvalue for="betweenesscentrality" value="0.0017752069219311728"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="6.502388081764266E-4"/>
</attvalues>
<viz:size value="67.16235"/>
<viz:position x="10355.74" y="8629.807"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="634" label="634">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="11"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567335"/>
<attvalue for="betweenesscentrality" value="0.0010775974870660269"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="4.58262923219586E-4"/>
</attvalues>
<viz:size value="58.200783"/>
<viz:position x="-11129.48" y="-749.96747"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="635" label="635">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567335"/>
<attvalue for="betweenesscentrality" value="0.0015223590861132654"/>
<attvalue for="modularity_class" value="16"/>
<attvalue for="pageranks" value="5.28799109295955E-4"/>
</attvalues>
<viz:size value="61.49346"/>
<viz:position x="10471.388" y="4447.171"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="636" label="636">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3751495470859682"/>
<attvalue for="harmonicclosnesscentrality" value="0.3896735003796644"/>
<attvalue for="betweenesscentrality" value="0.0013665494696624696"/>
<attvalue for="modularity_class" value="10"/>
<attvalue for="pageranks" value="4.3034435671300526E-4"/>
</attvalues>
<viz:size value="56.897522"/>
<viz:position x="-11261.147" y="-2332.8787"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="637" label="637">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="26"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37747205503009457"/>
<attvalue for="harmonicclosnesscentrality" value="0.39514047076690784"/>
<attvalue for="betweenesscentrality" value="0.006912846464402897"/>
<attvalue for="modularity_class" value="15"/>
<attvalue for="pageranks" value="0.0019684255718802742"/>
</attvalues>
<viz:size value="128.69626"/>
<viz:position x="-12814.764" y="11844.616"/>
<viz:color r="76" g="70" b="62"/>
</node>
<node id="638" label="638">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567335"/>
<attvalue for="betweenesscentrality" value="0.0015492843743667704"/>
<attvalue for="modularity_class" value="4"/>
<attvalue for="pageranks" value="5.310072495313049E-4"/>
</attvalues>
<viz:size value="61.596542"/>
<viz:position x="816.32733" y="7747.202"/>
<viz:color r="0" g="189" b="148"/>
</node>
<node id="639" label="639">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="9"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37553464499572287"/>
<attvalue for="harmonicclosnesscentrality" value="0.39058466211087167"/>
<attvalue for="betweenesscentrality" value="0.0014097081124837215"/>
<attvalue for="modularity_class" value="2"/>
<attvalue for="pageranks" value="6.194685980965114E-4"/>
</attvalues>
<viz:size value="65.72598"/>
<viz:position x="14271.116" y="-7179.9897"/>
<viz:color r="211" g="179" b="176"/>
</node>
<node id="640" label="640">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="5"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3751495470859682"/>
<attvalue for="harmonicclosnesscentrality" value="0.3896735003796644"/>
<attvalue for="betweenesscentrality" value="9.945869989398785E-4"/>
<attvalue for="modularity_class" value="7"/>
<attvalue for="pageranks" value="3.9621401235820955E-4"/>
</attvalues>
<viz:size value="55.304295"/>
<viz:position x="-1288.5079" y="-13450.789"/>
<viz:color r="255" g="136" b="5"/>
</node>
<node id="641" label="641">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3750213565692807"/>
<attvalue for="harmonicclosnesscentrality" value="0.38936977980259535"/>
<attvalue for="betweenesscentrality" value="9.272131354527828E-4"/>
<attvalue for="modularity_class" value="4"/>
<attvalue for="pageranks" value="3.470255991667876E-4"/>
</attvalues>
<viz:size value="53.008144"/>
<viz:position x="398.5481" y="8622.405"/>
<viz:color r="0" g="189" b="148"/>
</node>
<node id="642" label="642">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="11"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380255"/>
<attvalue for="betweenesscentrality" value="0.0013293867271542807"/>
<attvalue for="modularity_class" value="10"/>
<attvalue for="pageranks" value="5.404369718565456E-4"/>
</attvalues>
<viz:size value="62.036728"/>
<viz:position x="-11968.145" y="-407.5929"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="643" label="643">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567335"/>
<attvalue for="betweenesscentrality" value="0.0014766120328597134"/>
<attvalue for="modularity_class" value="1"/>
<attvalue for="pageranks" value="5.877105454824476E-4"/>
</attvalues>
<viz:size value="64.24349"/>
<viz:position x="5281.511" y="1870.9517"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="644" label="644">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="14"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3751495470859682"/>
<attvalue for="harmonicclosnesscentrality" value="0.3896735003796644"/>
<attvalue for="betweenesscentrality" value="9.967916277141347E-4"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="3.86955964388618E-4"/>
</attvalues>
<viz:size value="54.872124"/>
<viz:position x="-11495.792" y="-2080.3066"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="645" label="645">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3751495470859682"/>
<attvalue for="harmonicclosnesscentrality" value="0.3896735003796644"/>
<attvalue for="betweenesscentrality" value="9.7758296993431E-4"/>
<attvalue for="modularity_class" value="7"/>
<attvalue for="pageranks" value="3.8893968011762664E-4"/>
</attvalues>
<viz:size value="54.96472"/>
<viz:position x="-1665.9552" y="-13768.233"/>
<viz:color r="255" g="136" b="5"/>
</node>
<node id="646" label="646">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3751495470859682"/>
<attvalue for="harmonicclosnesscentrality" value="0.3896735003796644"/>
<attvalue for="betweenesscentrality" value="0.0012636867156223487"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="4.6713748534892046E-4"/>
</attvalues>
<viz:size value="58.61505"/>
<viz:position x="9572.373" y="3527.5588"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="647" label="647">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567335"/>
<attvalue for="betweenesscentrality" value="0.0014294779421134356"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="5.32659959839972E-4"/>
</attvalues>
<viz:size value="61.67369"/>
<viz:position x="8515.887" y="5218.148"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="648" label="648">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380255"/>
<attvalue for="betweenesscentrality" value="0.0015579481448076415"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="5.814531541074502E-4"/>
</attvalues>
<viz:size value="63.951393"/>
<viz:position x="7660.5728" y="8243.727"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="649" label="649">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3751495470859682"/>
<attvalue for="harmonicclosnesscentrality" value="0.3896735003796644"/>
<attvalue for="betweenesscentrality" value="9.778858100491774E-4"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="3.8963929894484495E-4"/>
</attvalues>
<viz:size value="54.997383"/>
<viz:position x="8243.815" y="6687.081"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="650" label="650">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="12"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567335"/>
<attvalue for="betweenesscentrality" value="7.857844963687536E-4"/>
<attvalue for="modularity_class" value="8"/>
<attvalue for="pageranks" value="4.302338729475648E-4"/>
</attvalues>
<viz:size value="56.892365"/>
<viz:position x="-15687.56" y="-10520.105"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="651" label="651">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="10"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37553464499572287"/>
<attvalue for="harmonicclosnesscentrality" value="0.39058466211087167"/>
<attvalue for="betweenesscentrality" value="0.002280636923429185"/>
<attvalue for="modularity_class" value="16"/>
<attvalue for="pageranks" value="7.585046669886993E-4"/>
</attvalues>
<viz:size value="72.21628"/>
<viz:position x="10699.058" y="4395.35"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="652" label="652">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="132"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37579181646978255"/>
<attvalue for="harmonicclosnesscentrality" value="0.3911921032650098"/>
<attvalue for="betweenesscentrality" value="0.003087678595440875"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="9.770448325454285E-4"/>
</attvalues>
<viz:size value="82.41789"/>
<viz:position x="5147.887" y="9966.789"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="653" label="653">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3751495470859682"/>
<attvalue for="harmonicclosnesscentrality" value="0.3896735003796644"/>
<attvalue for="betweenesscentrality" value="0.0012049591653174403"/>
<attvalue for="modularity_class" value="13"/>
<attvalue for="pageranks" value="3.961734784950206E-4"/>
</attvalues>
<viz:size value="55.302402"/>
<viz:position x="-6870.749" y="-1074.4489"/>
<viz:color r="255" g="85" b="132"/>
</node>
<node id="654" label="654">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567335"/>
<attvalue for="betweenesscentrality" value="0.001563295159756587"/>
<attvalue for="modularity_class" value="7"/>
<attvalue for="pageranks" value="5.658385622273778E-4"/>
</attvalues>
<viz:size value="63.222492"/>
<viz:position x="-816.53186" y="-11630.635"/>
<viz:color r="255" g="136" b="5"/>
</node>
<node id="655" label="655">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567335"/>
<attvalue for="betweenesscentrality" value="0.0012630255139849907"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="5.310028812178459E-4"/>
</attvalues>
<viz:size value="61.596336"/>
<viz:position x="12212.587" y="3035.9595"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="656" label="656">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380255"/>
<attvalue for="betweenesscentrality" value="0.0017187434791520358"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="6.522955945438036E-4"/>
</attvalues>
<viz:size value="67.25837"/>
<viz:position x="12314.59" y="3131.8633"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="657" label="657">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="9"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380255"/>
<attvalue for="betweenesscentrality" value="0.0018177567763203218"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="6.927231833742193E-4"/>
</attvalues>
<viz:size value="69.14555"/>
<viz:position x="12325.546" y="2831.2979"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="658" label="658">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567335"/>
<attvalue for="betweenesscentrality" value="0.0011324101602339903"/>
<attvalue for="modularity_class" value="0"/>
<attvalue for="pageranks" value="4.8419051051743487E-4"/>
</attvalues>
<viz:size value="59.411102"/>
<viz:position x="8743.635" y="6697.109"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="659" label="659">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380255"/>
<attvalue for="betweenesscentrality" value="0.0016567063541642962"/>
<attvalue for="modularity_class" value="15"/>
<attvalue for="pageranks" value="5.731718326071144E-4"/>
</attvalues>
<viz:size value="63.56481"/>
<viz:position x="-6706.243" y="9571.521"/>
<viz:color r="76" g="70" b="62"/>
</node>
<node id="660" label="660">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567335"/>
<attvalue for="betweenesscentrality" value="0.0015469112831754336"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="5.641040496594121E-4"/>
</attvalues>
<viz:size value="63.14152"/>
<viz:position x="9515.713" y="4566.2188"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="661" label="661">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="16"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567335"/>
<attvalue for="betweenesscentrality" value="0.0012313773019898922"/>
<attvalue for="modularity_class" value="15"/>
<attvalue for="pageranks" value="4.4995868663529125E-4"/>
</attvalues>
<viz:size value="57.813133"/>
<viz:position x="-5823.27" y="6611.214"/>
<viz:color r="76" g="70" b="62"/>
</node>
<node id="662" label="662">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567335"/>
<attvalue for="betweenesscentrality" value="0.0014838432034731937"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="5.106817558436748E-4"/>
</attvalues>
<viz:size value="60.64773"/>
<viz:position x="8803.691" y="5648.3257"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="663" label="663">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37553464499572287"/>
<attvalue for="harmonicclosnesscentrality" value="0.39058466211087167"/>
<attvalue for="betweenesscentrality" value="0.0016861114716524904"/>
<attvalue for="modularity_class" value="2"/>
<attvalue for="pageranks" value="6.421998251444762E-4"/>
</attvalues>
<viz:size value="66.78709"/>
<viz:position x="14091.21" y="-7169.9033"/>
<viz:color r="211" g="179" b="176"/>
</node>
<node id="664" label="664">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="14"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3756631867191511"/>
<attvalue for="harmonicclosnesscentrality" value="0.39088838268794074"/>
<attvalue for="betweenesscentrality" value="0.002091820152071207"/>
<attvalue for="modularity_class" value="2"/>
<attvalue for="pageranks" value="7.413001268745703E-4"/>
</attvalues>
<viz:size value="71.41316"/>
<viz:position x="14801.123" y="-6934.604"/>
<viz:color r="211" g="179" b="176"/>
</node>
<node id="665" label="665">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567335"/>
<attvalue for="betweenesscentrality" value="0.001154221373450469"/>
<attvalue for="modularity_class" value="2"/>
<attvalue for="pageranks" value="4.6770654271633517E-4"/>
</attvalues>
<viz:size value="58.641617"/>
<viz:position x="13634.639" y="-6142.962"/>
<viz:color r="211" g="179" b="176"/>
</node>
<node id="666" label="666">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="5"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3751495470859682"/>
<attvalue for="harmonicclosnesscentrality" value="0.3896735003796644"/>
<attvalue for="betweenesscentrality" value="9.400914738370994E-4"/>
<attvalue for="modularity_class" value="2"/>
<attvalue for="pageranks" value="4.132870053287264E-4"/>
</attvalues>
<viz:size value="56.101273"/>
<viz:position x="12052.276" y="-6167.775"/>
<viz:color r="211" g="179" b="176"/>
</node>
<node id="667" label="667">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37553464499572287"/>
<attvalue for="harmonicclosnesscentrality" value="0.39058466211087167"/>
<attvalue for="betweenesscentrality" value="0.0017635498038575297"/>
<attvalue for="modularity_class" value="2"/>
<attvalue for="pageranks" value="6.435772557873973E-4"/>
</attvalues>
<viz:size value="66.85139"/>
<viz:position x="14441.11" y="-7080.3696"/>
<viz:color r="211" g="179" b="176"/>
</node>
<node id="668" label="668">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380255"/>
<attvalue for="betweenesscentrality" value="4.650952499978815E-4"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="4.4745911335686327E-4"/>
</attvalues>
<viz:size value="57.696453"/>
<viz:position x="-16623.332" y="627.94995"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="669" label="669">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37553464499572287"/>
<attvalue for="harmonicclosnesscentrality" value="0.39058466211087167"/>
<attvalue for="betweenesscentrality" value="0.0013762569812051663"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="5.747721034300953E-4"/>
</attvalues>
<viz:size value="63.63952"/>
<viz:position x="-16819.58" y="717.3959"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="670" label="670">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37553464499572287"/>
<attvalue for="harmonicclosnesscentrality" value="0.39058466211087167"/>
<attvalue for="betweenesscentrality" value="0.0013762569812051663"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="5.747721034300953E-4"/>
</attvalues>
<viz:size value="63.63952"/>
<viz:position x="-16769.066" y="620.94275"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="671" label="671">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37553464499572287"/>
<attvalue for="harmonicclosnesscentrality" value="0.39058466211087167"/>
<attvalue for="betweenesscentrality" value="0.0013762569812051663"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="5.747721034300953E-4"/>
</attvalues>
<viz:size value="63.63952"/>
<viz:position x="-16666.895" y="754.64966"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="672" label="672">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="4"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3750213565692807"/>
<attvalue for="harmonicclosnesscentrality" value="0.38936977980259535"/>
<attvalue for="betweenesscentrality" value="0.001057136337862428"/>
<attvalue for="modularity_class" value="7"/>
<attvalue for="pageranks" value="3.7319454585237847E-4"/>
</attvalues>
<viz:size value="54.22973"/>
<viz:position x="-1389.0408" y="-11104.692"/>
<viz:color r="255" g="136" b="5"/>
</node>
<node id="673" label="673">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="4"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3750213565692807"/>
<attvalue for="harmonicclosnesscentrality" value="0.38936977980259535"/>
<attvalue for="betweenesscentrality" value="0.0013488989815116689"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="4.7341163270173424E-4"/>
</attvalues>
<viz:size value="58.907936"/>
<viz:position x="3007.9172" y="2974.0015"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="674" label="674">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="4"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3750213565692807"/>
<attvalue for="harmonicclosnesscentrality" value="0.38936977980259535"/>
<attvalue for="betweenesscentrality" value="0.0011242033141837782"/>
<attvalue for="modularity_class" value="2"/>
<attvalue for="pageranks" value="3.622347211712012E-4"/>
</attvalues>
<viz:size value="53.718117"/>
<viz:position x="1109.5515" y="-108.75184"/>
<viz:color r="211" g="179" b="176"/>
</node>
<node id="675" label="675">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="4"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3750213565692807"/>
<attvalue for="harmonicclosnesscentrality" value="0.38936977980259535"/>
<attvalue for="betweenesscentrality" value="0.0012002038466541226"/>
<attvalue for="modularity_class" value="13"/>
<attvalue for="pageranks" value="3.7398064256452556E-4"/>
</attvalues>
<viz:size value="54.266426"/>
<viz:position x="6110.583" y="-6796.16"/>
<viz:color r="255" g="85" b="132"/>
</node>
<node id="676" label="676">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="4"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3750213565692807"/>
<attvalue for="harmonicclosnesscentrality" value="0.38936977980259535"/>
<attvalue for="betweenesscentrality" value="9.87343865507561E-4"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="3.291819901938439E-4"/>
</attvalues>
<viz:size value="52.17519"/>
<viz:position x="6192.0137" y="5593.5503"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="677" label="677">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="4"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3750213565692807"/>
<attvalue for="harmonicclosnesscentrality" value="0.38936977980259535"/>
<attvalue for="betweenesscentrality" value="0.0011350519544035382"/>
<attvalue for="modularity_class" value="1"/>
<attvalue for="pageranks" value="4.005495278238668E-4"/>
</attvalues>
<viz:size value="55.50668"/>
<viz:position x="5032.29" y="1644.5142"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="678" label="678">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="4"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3750213565692807"/>
<attvalue for="harmonicclosnesscentrality" value="0.38936977980259535"/>
<attvalue for="betweenesscentrality" value="0.0011275424134130447"/>
<attvalue for="modularity_class" value="5"/>
<attvalue for="pageranks" value="3.56415164355018E-4"/>
</attvalues>
<viz:size value="53.446457"/>
<viz:position x="-1792.4296" y="-2078.2776"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="679" label="679">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="4"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3750213565692807"/>
<attvalue for="harmonicclosnesscentrality" value="0.38936977980259535"/>
<attvalue for="betweenesscentrality" value="0.0013607516129380969"/>
<attvalue for="modularity_class" value="5"/>
<attvalue for="pageranks" value="4.7329772977312346E-4"/>
</attvalues>
<viz:size value="58.90262"/>
<viz:position x="-642.1038" y="362.93686"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="680" label="680">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="4"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3748932536293766"/>
<attvalue for="harmonicclosnesscentrality" value="0.38906605922552623"/>
<attvalue for="betweenesscentrality" value="9.111617312072891E-4"/>
<attvalue for="modularity_class" value="7"/>
<attvalue for="pageranks" value="4.557573547843128E-4"/>
</attvalues>
<viz:size value="58.08382"/>
<viz:position x="547.20917" y="1525.9186"/>
<viz:color r="255" g="136" b="5"/>
</node>
<node id="681" label="681">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567335"/>
<attvalue for="betweenesscentrality" value="0.0010509581984222776"/>
<attvalue for="modularity_class" value="10"/>
<attvalue for="pageranks" value="4.456383009645766E-4"/>
</attvalues>
<viz:size value="57.611458"/>
<viz:position x="-9733.489" y="-1492.6722"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="682" label="682">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="9"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567335"/>
<attvalue for="betweenesscentrality" value="0.001966291055054939"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="6.282077569706571E-4"/>
</attvalues>
<viz:size value="66.13393"/>
<viz:position x="11999.9375" y="2879.8823"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="683" label="683">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3751495470859682"/>
<attvalue for="harmonicclosnesscentrality" value="0.3896735003796644"/>
<attvalue for="betweenesscentrality" value="0.0012112715917578366"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="4.623072457728759E-4"/>
</attvalues>
<viz:size value="58.389572"/>
<viz:position x="12146.329" y="2826.3638"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="684" label="684">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="12"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37553464499572287"/>
<attvalue for="harmonicclosnesscentrality" value="0.39058466211087167"/>
<attvalue for="betweenesscentrality" value="0.0012299912925651576"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="5.677767645418772E-4"/>
</attvalues>
<viz:size value="63.31297"/>
<viz:position x="-14161.6" y="-1717.1437"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="685" label="685">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380255"/>
<attvalue for="betweenesscentrality" value="0.0010992263261056289"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="5.591148186257072E-4"/>
</attvalues>
<viz:size value="62.908623"/>
<viz:position x="3612.8867" y="7109.099"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="686" label="686">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="15"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567335"/>
<attvalue for="betweenesscentrality" value="0.0019471472551146432"/>
<attvalue for="modularity_class" value="18"/>
<attvalue for="pageranks" value="6.25576678262428E-4"/>
</attvalues>
<viz:size value="66.01111"/>
<viz:position x="-1432.9056" y="4405.114"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="687" label="687">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567335"/>
<attvalue for="betweenesscentrality" value="0.0017079455914284188"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="5.476835106702073E-4"/>
</attvalues>
<viz:size value="62.375"/>
<viz:position x="-9367.122" y="-1090.8188"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="688" label="688">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567335"/>
<attvalue for="betweenesscentrality" value="0.001396486851517621"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="5.241544598369329E-4"/>
</attvalues>
<viz:size value="61.27665"/>
<viz:position x="7341.9453" y="7171.34"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="689" label="689">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380255"/>
<attvalue for="betweenesscentrality" value="0.001504465839867308"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="5.496080116982377E-4"/>
</attvalues>
<viz:size value="62.464836"/>
<viz:position x="-269.82028" y="-804.7978"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="690" label="690">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3751495470859682"/>
<attvalue for="harmonicclosnesscentrality" value="0.3896735003796644"/>
<attvalue for="betweenesscentrality" value="0.001263686715622349"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="4.6713748534892046E-4"/>
</attvalues>
<viz:size value="58.61505"/>
<viz:position x="9487.822" y="3603.2236"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="691" label="691">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37553464499572287"/>
<attvalue for="harmonicclosnesscentrality" value="0.39058466211087167"/>
<attvalue for="betweenesscentrality" value="0.001988830659307791"/>
<attvalue for="modularity_class" value="0"/>
<attvalue for="pageranks" value="6.817274190391451E-4"/>
</attvalues>
<viz:size value="68.63226"/>
<viz:position x="9655.991" y="11275.892"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="692" label="692">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567335"/>
<attvalue for="betweenesscentrality" value="0.001573168354834774"/>
<attvalue for="modularity_class" value="7"/>
<attvalue for="pageranks" value="5.828286496389668E-4"/>
</attvalues>
<viz:size value="64.015594"/>
<viz:position x="422.2877" y="-9583.398"/>
<viz:color r="255" g="136" b="5"/>
</node>
<node id="693" label="693">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="15"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37579181646978255"/>
<attvalue for="harmonicclosnesscentrality" value="0.3911921032650098"/>
<attvalue for="betweenesscentrality" value="0.0018650453329312968"/>
<attvalue for="modularity_class" value="13"/>
<attvalue for="pageranks" value="7.447720584109165E-4"/>
</attvalues>
<viz:size value="71.57524"/>
<viz:position x="-5593.7246" y="-5968.7466"/>
<viz:color r="255" g="85" b="132"/>
</node>
<node id="694" label="694">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3750213565692807"/>
<attvalue for="harmonicclosnesscentrality" value="0.38936977980259535"/>
<attvalue for="betweenesscentrality" value="0.0010188653874740677"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="3.4370474423426337E-4"/>
</attvalues>
<viz:size value="52.853123"/>
<viz:position x="1181.0099" y="-218.6812"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="695" label="695">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380255"/>
<attvalue for="betweenesscentrality" value="0.0018143225630506941"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="6.807419637651927E-4"/>
</attvalues>
<viz:size value="68.586266"/>
<viz:position x="12489.8125" y="2901.3662"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="696" label="696">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="9"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3756631867191511"/>
<attvalue for="harmonicclosnesscentrality" value="0.39088838268794074"/>
<attvalue for="betweenesscentrality" value="0.0024148477354999243"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="7.654483224133881E-4"/>
</attvalues>
<viz:size value="72.54042"/>
<viz:position x="-360.74127" y="-1000.8221"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="697" label="697">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3751495470859682"/>
<attvalue for="harmonicclosnesscentrality" value="0.3896735003796644"/>
<attvalue for="betweenesscentrality" value="0.0013631691503324587"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="4.957247159300442E-4"/>
</attvalues>
<viz:size value="59.949524"/>
<viz:position x="10037.416" y="3090.8228"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="698" label="698">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380255"/>
<attvalue for="betweenesscentrality" value="0.00142557987523468"/>
<attvalue for="modularity_class" value="17"/>
<attvalue for="pageranks" value="5.995627370111425E-4"/>
</attvalues>
<viz:size value="64.79676"/>
<viz:position x="7546.9985" y="6982.1714"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="699" label="699">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="10"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37553464499572287"/>
<attvalue for="harmonicclosnesscentrality" value="0.39058466211087167"/>
<attvalue for="betweenesscentrality" value="0.001959206882544384"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="6.573390845573297E-4"/>
</attvalues>
<viz:size value="67.493805"/>
<viz:position x="-412.91226" y="-898.8919"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="700" label="700">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="105"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37851353681669253"/>
<attvalue for="harmonicclosnesscentrality" value="0.3975702353834605"/>
<attvalue for="betweenesscentrality" value="0.011914329059743958"/>
<attvalue for="modularity_class" value="10"/>
<attvalue for="pageranks" value="0.003024140427889879"/>
</attvalues>
<viz:size value="177.9778"/>
<viz:position x="-24218.75" y="16288.01"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="701" label="701">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567335"/>
<attvalue for="betweenesscentrality" value="0.0011898922468672657"/>
<attvalue for="modularity_class" value="15"/>
<attvalue for="pageranks" value="4.460478182276288E-4"/>
</attvalues>
<viz:size value="57.630573"/>
<viz:position x="-5041.355" y="7091.839"/>
<viz:color r="76" g="70" b="62"/>
</node>
<node id="702" label="702">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="9"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3756631867191511"/>
<attvalue for="harmonicclosnesscentrality" value="0.39088838268794074"/>
<attvalue for="betweenesscentrality" value="0.0026788012311560272"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="9.063214020908186E-4"/>
</attvalues>
<viz:size value="79.11647"/>
<viz:position x="12033.607" y="3206.8813"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="703" label="703">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3750213565692807"/>
<attvalue for="harmonicclosnesscentrality" value="0.38936977980259535"/>
<attvalue for="betweenesscentrality" value="0.0010375526699106356"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="3.62387009253247E-4"/>
</attvalues>
<viz:size value="53.725224"/>
<viz:position x="5712.485" y="5771.33"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="704" label="704">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="10"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37579181646978255"/>
<attvalue for="harmonicclosnesscentrality" value="0.3911921032650098"/>
<attvalue for="betweenesscentrality" value="0.00221389668997978"/>
<attvalue for="modularity_class" value="2"/>
<attvalue for="pageranks" value="8.074493378105594E-4"/>
</attvalues>
<viz:size value="74.50105"/>
<viz:position x="-2687.23" y="3592.756"/>
<viz:color r="211" g="179" b="176"/>
</node>
<node id="705" label="705">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567335"/>
<attvalue for="betweenesscentrality" value="9.942796471856495E-4"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="4.4995184493425416E-4"/>
</attvalues>
<viz:size value="57.812813"/>
<viz:position x="-8453.194" y="-1439.3049"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="706" label="706">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567335"/>
<attvalue for="betweenesscentrality" value="0.0015093838781619065"/>
<attvalue for="modularity_class" value="7"/>
<attvalue for="pageranks" value="5.234461620928385E-4"/>
</attvalues>
<viz:size value="61.243584"/>
<viz:position x="-1544.909" y="-12519.714"/>
<viz:color r="255" g="136" b="5"/>
</node>
<node id="707" label="707">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567335"/>
<attvalue for="betweenesscentrality" value="0.0010633958426183733"/>
<attvalue for="modularity_class" value="7"/>
<attvalue for="pageranks" value="4.711476178085235E-4"/>
</attvalues>
<viz:size value="58.80225"/>
<viz:position x="-1282.3494" y="-13728.684"/>
<viz:color r="255" g="136" b="5"/>
</node>
<node id="708" label="708">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3750213565692807"/>
<attvalue for="harmonicclosnesscentrality" value="0.38936977980259535"/>
<attvalue for="betweenesscentrality" value="9.789163619620615E-4"/>
<attvalue for="modularity_class" value="17"/>
<attvalue for="pageranks" value="3.752650211672233E-4"/>
</attvalues>
<viz:size value="54.326378"/>
<viz:position x="3090.869" y="12055.73"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="709" label="709">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380255"/>
<attvalue for="betweenesscentrality" value="0.0021883317684556597"/>
<attvalue for="modularity_class" value="17"/>
<attvalue for="pageranks" value="7.645556292739758E-4"/>
</attvalues>
<viz:size value="72.49875"/>
<viz:position x="3412.5437" y="12970.971"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="710" label="710">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380255"/>
<attvalue for="betweenesscentrality" value="0.0018086864176282496"/>
<attvalue for="modularity_class" value="18"/>
<attvalue for="pageranks" value="6.523089976670207E-4"/>
</attvalues>
<viz:size value="67.25899"/>
<viz:position x="-1049.1917" y="4372.3438"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="711" label="711">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="23"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37643628880123475"/>
<attvalue for="harmonicclosnesscentrality" value="0.3927107061503552"/>
<attvalue for="betweenesscentrality" value="0.004408305630816763"/>
<attvalue for="modularity_class" value="0"/>
<attvalue for="pageranks" value="0.001384187152924079"/>
</attvalues>
<viz:size value="101.4236"/>
<viz:position x="11397.46" y="13742.002"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="712" label="712">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380255"/>
<attvalue for="betweenesscentrality" value="0.0018086864176282496"/>
<attvalue for="modularity_class" value="18"/>
<attvalue for="pageranks" value="6.523089976670207E-4"/>
</attvalues>
<viz:size value="67.25899"/>
<viz:position x="-996.8282" y="4308.7397"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="713" label="713">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567335"/>
<attvalue for="betweenesscentrality" value="0.0010633958426183733"/>
<attvalue for="modularity_class" value="7"/>
<attvalue for="pageranks" value="4.711476178085235E-4"/>
</attvalues>
<viz:size value="58.80225"/>
<viz:position x="-1335.921" y="-13832.384"/>
<viz:color r="255" g="136" b="5"/>
</node>
<node id="714" label="714">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567335"/>
<attvalue for="betweenesscentrality" value="0.001126615852385682"/>
<attvalue for="modularity_class" value="7"/>
<attvalue for="pageranks" value="4.6655713783569423E-4"/>
</attvalues>
<viz:size value="58.587963"/>
<viz:position x="-1202.8328" y="-14384.357"/>
<viz:color r="255" g="136" b="5"/>
</node>
<node id="715" label="715">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567335"/>
<attvalue for="betweenesscentrality" value="0.0010633958426183733"/>
<attvalue for="modularity_class" value="7"/>
<attvalue for="pageranks" value="4.711476178085235E-4"/>
</attvalues>
<viz:size value="58.80225"/>
<viz:position x="-1375.0779" y="-13757.682"/>
<viz:color r="255" g="136" b="5"/>
</node>
<node id="716" label="716">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="14"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37553464499572287"/>
<attvalue for="harmonicclosnesscentrality" value="0.39058466211087167"/>
<attvalue for="betweenesscentrality" value="0.0015836167604470716"/>
<attvalue for="modularity_class" value="15"/>
<attvalue for="pageranks" value="6.329649147704883E-4"/>
</attvalues>
<viz:size value="66.355995"/>
<viz:position x="-4973.6836" y="5901.527"/>
<viz:color r="76" g="70" b="62"/>
</node>
<node id="717" label="717">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380255"/>
<attvalue for="betweenesscentrality" value="0.0017200803062702619"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="6.553738500988905E-4"/>
</attvalues>
<viz:size value="67.40206"/>
<viz:position x="12467.863" y="3029.2307"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="718" label="718">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="11"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380255"/>
<attvalue for="betweenesscentrality" value="0.0017789157631245241"/>
<attvalue for="modularity_class" value="2"/>
<attvalue for="pageranks" value="6.382781152756676E-4"/>
</attvalues>
<viz:size value="66.60402"/>
<viz:position x="6530.2363" y="857.3785"/>
<viz:color r="211" g="179" b="176"/>
</node>
<node id="719" label="719">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567335"/>
<attvalue for="betweenesscentrality" value="0.0010639324553200625"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="4.6010571573100234E-4"/>
</attvalues>
<viz:size value="58.286804"/>
<viz:position x="-10106.511" y="-1717.7604"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="720" label="720">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.37553464499572287"/>
<attvalue for="harmonicclosnesscentrality" value="0.39058466211087167"/>
<attvalue for="betweenesscentrality" value="0.0019443996718123695"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="7.225923056120714E-4"/>
</attvalues>
<viz:size value="70.53987"/>
<viz:position x="9477.695" y="6144.4766"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="721" label="721">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3750213565692807"/>
<attvalue for="harmonicclosnesscentrality" value="0.38936977980259535"/>
<attvalue for="betweenesscentrality" value="9.319277490599358E-4"/>
<attvalue for="modularity_class" value="0"/>
<attvalue for="pageranks" value="3.2781233229764787E-4"/>
</attvalues>
<viz:size value="52.11125"/>
<viz:position x="6620.6206" y="8263.066"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="722" label="722">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380255"/>
<attvalue for="betweenesscentrality" value="0.002275771207560116"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="6.549521738266985E-4"/>
</attvalues>
<viz:size value="67.38238"/>
<viz:position x="5659.7197" y="-6620.2656"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="723" label="723">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="46"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3751495470859682"/>
<attvalue for="harmonicclosnesscentrality" value="0.3896735003796644"/>
<attvalue for="betweenesscentrality" value="0.0012206807434898554"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="4.194908256929407E-4"/>
</attvalues>
<viz:size value="56.39087"/>
<viz:position x="-9163.63" y="-567.79236"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="724" label="724">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567335"/>
<attvalue for="betweenesscentrality" value="0.0012986439904614845"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="4.8263228380046586E-4"/>
</attvalues>
<viz:size value="59.338364"/>
<viz:position x="-12180.285" y="-2072.6963"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="725" label="725">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3754061912091671"/>
<attvalue for="harmonicclosnesscentrality" value="0.39028094153380255"/>
<attvalue for="betweenesscentrality" value="0.001263907732091747"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="5.212311199044728E-4"/>
</attvalues>
<viz:size value="61.140182"/>
<viz:position x="-415.32367" y="-684.8034"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="726" label="726">
<attvalues>
<attvalue for="0" value="publication"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="3.0"/>
<attvalue for="closnesscentrality" value="0.3752778252692768"/>
<attvalue for="harmonicclosnesscentrality" value="0.3899772209567335"/>
<attvalue for="betweenesscentrality" value="0.0017197774872984466"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="5.454078449849183E-4"/>
</attvalues>
<viz:size value="62.26877"/>
<viz:position x="6042.0444" y="9080.578"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="abstract_0" label="abstract_0">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Zero-shot sketch-based image retrieval (SBIR) is an emerging task in computer vision, allowing to retrieve natural images relevant to sketch queries that might not been seen in the training phase. Existing works either require aligned sketch-image pairs or inefficient memory fusion layer for mapping the visual information to a semantic space. In this work, we propose a semantically aligned paired cycle-consistent generative (SEM-PCYC) model for zero-shot SBIR, where each branch maps the visual information to a common semantic space via an adversarial training. Each of these branches maintains a cycle consistency that only requires supervision at category levels, and avoids the need of highly-priced aligned sketch-image pairs. A classification criteria on the generators' outputs ensures the visual to semantic space mapping to be discriminating. Furthermore, we propose to combine textual and hierarchical side information via a feature selection auto-encoder that selects discriminating side information within a same end-to-end model. Our results demonstrate a significant boost in zero-shot SBIR performance over the state-of-the-art on the challenging Sketchy and TU-Berlin datasets."/>
<attvalue for="2" value="8"/>
<attvalue for="degree" value="9"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2727724617870014"/>
<attvalue for="harmonicclosnesscentrality" value="0.2781321184510262"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="2.5283427784651073E-4"/>
</attvalues>
<viz:size value="48.611225"/>
<viz:position x="1638.0375" y="-485.98935"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="abstract_1" label="abstract_1">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Action recognition is a very challenging and important problem in computer vision. Researchers working on this field aspire to provide computers with the abil ity to visually perceive human actions - that is, to observe, interpret, and under stand human-related events that occur in the physical environment merely from visual data. The applications of this technology are numerous: human-machine interaction, e-health, monitoring/surveillance, and content-based video retrieval, among others. Hand-crafted methods dominated the field until the apparition of the first successful deep learning-based action recognition works. Although ear lier deep-based methods underperformed with respect to hand-crafted approaches, these slowly but steadily improved to become state-of-the-art, eventually achieving better results than hand-crafted ones. Still, hand-crafted approaches can be advan tageous in certain scenarios, specially when not enough data is available to train very large deep models or simply to be combined with deep-based methods to fur ther boost the performance. Hence, showing how hand-crafted features can provide extra knowledge the deep networks are notable to easily learn about human actions.This Thesis concurs in time with this change of paradigm and, hence, reflects it into two distinguished parts. In the first part, we focus on improving current suc cessful hand-crafted approaches for action recognition and we do so from three dif ferent perspectives. Using the dense trajectories framework as a backbone: first, we explore the use of multi-modal and multi-view input data to enrich the trajectory de scriptors. Second, we focus on the classification part of action recognition pipelines and propose an ensemble learning approach, where each classifier leams from a different set of local spatiotemporal features to then combine their outputs following an strategy based on the Dempster-Shaffer Theory. And third, we propose a novel hand-crafted feature extraction method that constructs a rnid-level feature descrip tion to better modellong-term spatiotemporal dynarnics within action videos. Moving to the second part of the Thesis, we start with a comprehensive study of the current deep-learning based action recognition methods. We review both fun damental and cutting edge methodologies reported during the last few years and introduce a taxonomy of deep-leaming methods dedicated to action recognition. In particular, we analyze and discuss how these handle the temporal dimension of data. Last but not least, we propose a residual recurrent network for action recogni tion that naturally integrates all our previous findings in a powerful and prornising framework."/>
<attvalue for="2" value="17"/>
<attvalue for="degree" value="18"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2727724617870014"/>
<attvalue for="harmonicclosnesscentrality" value="0.2781321184510262"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="2.5821154527616786E-4"/>
</attvalues>
<viz:size value="48.86224"/>
<viz:position x="-3504.4226" y="535.4353"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="abstract_2" label="abstract_2">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Recently, deep convolutional neural networks (CNNs) have provided outstanding performance in single image super-resolution (SISR). Despite their remarkable performance, the lack of high-frequency information in the recovered images remains a core problem. Moreover, as the networks increase in depth and width, deep CNN-based SR methods are faced with the challenge of computational complexity in practice. A promising and under-explored solution is to adapt the amount of compute based on the different frequency bands of the input. To this end, we present a novel Frequency-based Enhancement Block (FEB) which explicitly enhances the information of high frequencies while forwarding low-frequencies to the output. In particular, this block efficiently decomposes features into low- and high-frequency and assigns more computation to high-frequency ones. Thus, it can help the network generate more discriminative representations by explicitly recovering finer details. Our FEB design is simple and generic and can be used as a direct replacement of commonly used SR blocks with no need to change network architectures. We experimentally show that when replacing SR blocks with FEB we consistently improve the reconstruction error, while reducing the number of parameters in the model. Moreover, we propose a lightweight SR model Frequency-based Enhancement Network (FENet) based on FEB that matches the performance of larger models. Extensive experiments demonstrate that our proposal performs favorably against the state-of-the-art SR algorithms in terms of visual quality, memory footprint, and inference time. The code is available at https://github.com/pbehjatii/FENet"/>
<attvalue for="2" value="13"/>
<attvalue for="degree" value="14"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.273724903354533"/>
<attvalue for="harmonicclosnesscentrality" value="0.27972665148063897"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="5"/>
<attvalue for="pageranks" value="1.4757242386324296E-4"/>
</attvalues>
<viz:size value="43.697525"/>
<viz:position x="-2909.0544" y="-9279.329"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="abstract_3" label="abstract_3">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="This work addresses the detection of Helicobacter pylori a bacterium classified since 1994 as class 1 carcinogen to humans. By its highest specificity and sensitivity, the preferred diagnosis technique is the analysis of histological images with immunohistochemical staining, a process in which certain stained antibodies bind to antigens of the biological element of interest. This analysis is a time demanding task, which is currently done by an expert pathologist that visually inspects the digitized samples.We propose to use autoencoders to learn latent patterns of healthy tissue and detect H. pylori as an anomaly in image staining. Unlike existing classification approaches, an autoencoder is able to learn patterns in an unsupervised manner (without the need of image annotations) with high performance. In particular, our model has an overall 91% of accuracy with 86\% sensitivity, 96% specificity and 0.97 AUC in the detection of H. pylori."/>
<attvalue for="2" value="10"/>
<attvalue for="degree" value="11"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2729081188611215"/>
<attvalue for="harmonicclosnesscentrality" value="0.278359908883828"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="2"/>
<attvalue for="pageranks" value="1.9067136248978315E-4"/>
</attvalues>
<viz:size value="45.709415"/>
<viz:position x="12125.8" y="-6156.391"/>
<viz:color r="211" g="179" b="176"/>
</node>
<node id="abstract_4" label="abstract_4">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="This paper presents a multimodal emotion recognition system, which is based on the analysis of audio and visual cues. From the audio channel, Mel-Frequency Cepstral Coefficients, Filter Bank Energies and prosodic features are extracted. For the visual part, two strategies are considered. First, facial landmarks geometric relations, i.e. distances and angles, are computed. Second, we summarize each emotional video into a reduced set of key-frames, which are taught to visually discriminate between the emotions. In order to do so, a convolutional neural network is applied to key-frames summarizing videos. Finally, confidence outputs of all the classifiers from all the modalities are used to define a new feature space to be learned for final emotion label prediction, in a late fusion/stacking fashion. The experiments conducted on the SAVEE, eNTERFACE05, and RML databases show significant performance improvements by our proposed system in comparison to current alternatives, defining the current state-of-the-art in all three databases."/>
<attvalue for="2" value="11"/>
<attvalue for="degree" value="12"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.27297599801019773"/>
<attvalue for="harmonicclosnesscentrality" value="0.2784738041002289"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="18"/>
<attvalue for="pageranks" value="1.6896305694699215E-4"/>
</attvalues>
<viz:size value="44.696056"/>
<viz:position x="-10941.147" y="-4201.046"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="abstract_5" label="abstract_5">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Word spotting can be defined as the pattern recognition tasked aimed at locating and retrieving a specific keyword within a document image collection without explicitly transcribing the whole corpus. Its use is particularly interesting when applied in scenarios where Optical Character Recognition performs poorly or can not be used at all. This thesis focuses on such a scenario, word spotting on historical handwritten documents that have been written by a single author or by multiple authors with a similar calligraphy.This problem requires a visual signature that is robust to image artifacts, flexible to accommodate script variations and efficient to retrieve information in a rapid manner. For this, we have developed a set of word spotting methods that on their foundation use the well known Bag-of-Visual-Words (BoVW) representation. This representation has gained popularity among the document image analysis community to characterize handwritten wordsin an unsupervised manner. However, most approaches on this field rely on a basic BoVW configuration and disregard complex encoding and spatial representations. We determine which BoVW configurations provide the best performance boost to a spotting system.Then, we extend the segmentation-based word spotting, where word candidates are given a priori, to segmentation-free spotting. The proposed approach seeds the document images with overlapping word location candidates and characterizes them with a BoVW signature. Retrieval is achieved comparing the query and candidate signatures and returning the locations that provide a higher consensus. This is a simple but powerful approach that requires a more compact signature than in a segmentation-based scenario. We firstproject the BoVW signature into a reduced semantic topics space and then compress it further using Product Quantizers. The resulting signature only requires a few dozen bytes, allowing us to index thousands of pages on a common desktop computer. The final system still yields a performance comparable to the state-of-the-art despite all the information loss during the compression phases.Afterwards, we also study how to combine different modalities of information in order to create a query-by-X spotting system where, words are indexed using an information modality and queries are retrieved using another. We consider three different information modalities: visual, textual and audio. Our proposal is to create a latent feature space where features which are semantically related are projected onto the same topics. Creating thus a new feature space where information from different modalities can be compared. Later, we consider the codebook generation and descriptor encoding problem. The codebooks used to encode the BoVW signatures are usually created using an unsupervised clustering algorithm and, they require to test multiple parameters to determine which configuration is best for a certain document collection. We propose a semantic clustering algorithm which allows to estimate the best parameter from data. Since gather annotated data is costly, we use synthetically generated word images. The resulting codebook is database agnostic, i. e. a codebook that yields a good performance on document collections that use the same script. We also propose the use of an additional codebook to approximate descriptors and reduce the descriptor encodingcomplexity to sub-linear.Finally, we focus on the problem of signatures dimensionality. We propose a new symbol probability signature where each bin represents the probability that a certain symbol is present a certain location of the word image. This signature is extremely compact and combined with compression techniques can represent word images with just a few bytes per signature."/>
<attvalue for="2" value="31"/>
<attvalue for="degree" value="32"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.27270468381165364"/>
<attvalue for="harmonicclosnesscentrality" value="0.2780182232346253"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="7"/>
<attvalue for="pageranks" value="4.557573547843128E-4"/>
</attvalues>
<viz:size value="58.08382"/>
<viz:position x="207.6984" y="1188.5304"/>
<viz:color r="255" g="136" b="5"/>
</node>
<node id="abstract_6" label="abstract_6">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="PURPOSE:Personalized computational simulations of the heart could open up new improved approaches to diagnosis and surgery assistance systems. While it is fully recognized that myocardial fiber orientation is central for the construction of realistic computational models of cardiac electromechanics, the role of its overall architecture and connectivity remains unclear. Morphological studies show that the distribution of cardiac muscular fibers at the basal ring connects epicardium and endocardium. However, computational models simplify their distribution and disregard the basal loop. This work explores the influence in computational simulations of fiber distribution at different short-axis cuts.METHODS:We have used a highly parallelized computational solver to test different fiber models of ventricular muscular connectivity. We have considered two rule-based mathematical models and an own-designed method preserving basal connectivity as observed in experimental data. Simulated cardiac functional scores (rotation, torsion and longitudinal shortening) were compared to experimental healthy ranges using generalized models (rotation) and Mahalanobis distances (shortening, torsion).RESULTS:The probability of rotation was significantly lower for ruled-based models [95% CI (0.13, 0.20)] in comparison with experimental data [95% CI (0.23, 0.31)]. The Mahalanobis distance for experimental data was in the edge of the region enclosing 99% of the healthy population.CONCLUSIONS:Cardiac electromechanical simulations of the heart with fibers extracted from experimental data produce functional scores closer to healthy ranges than rule-based models disregarding architecture connectivity."/>
<attvalue for="2" value="16"/>
<attvalue for="degree" value="17"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.27297599801019773"/>
<attvalue for="harmonicclosnesscentrality" value="0.2784738041002289"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="2"/>
<attvalue for="pageranks" value="1.7308364374444017E-4"/>
</attvalues>
<viz:size value="44.88841"/>
<viz:position x="12509.374" y="-5459.3467"/>
<viz:color r="211" g="179" b="176"/>
</node>
<node id="abstract_7" label="abstract_7">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Background: Bronchoscopy is a safe technique for diagnosing peripheral pulmonary lesions (PPLs), and virtual bronchoscopic navigation (VBN) helps guide the bronchoscope to PPLs. Objectives: We aimed to compare the diagnostic yield of VBN-guided and unguided ultrathin bronchoscopy (UTB) and explore clinical and technical factors associated with better results. We developed a diagnostic algorithm for deciding whether to use VBN to reach PPLs or choose an alternative diagnostic approach. Methods: We compared diagnostic yield between VBN-UTB (prospective cases) and unguided UTB (historical controls) and analyzed the VBN-UTB subgroup to identify clinical and technical variables that could predict the success of VBN-UTB. Results: Fifty-five cases and 110 controls were included. The overall diagnostic yield did not differ between the VBN-guided and unguided arms (47 and 40%, respectively; p = 0.354). Although the yield was slightly higher for PPLs 20 mm in the VBN-UTB arm, the difference was not significant (p = 0.069). No other clinical characteristics were associated with a higher yield in a subgroup analysis, but an 85% diagnostic yield was observed when segmentation was optimal and the PPL was endobronchial (vs. 30% when segmentation was suboptimal and 20% when segmentation was optimal but the PPL was extrabronchial). Conclusions: VBN-guided UTB is not superior to unguided UTB. A greater impact of VBN-guided over unguided UTB is highly dependent on both segmentation quality and an endobronchial location of the PPL. Segmentation quality should be considered before starting a procedure, when an alternative technique that may improve yield can be chosen, saving time and resources."/>
<attvalue for="2" value="15"/>
<attvalue for="degree" value="16"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.27297599801019773"/>
<attvalue for="harmonicclosnesscentrality" value="0.2784738041002289"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="2"/>
<attvalue for="pageranks" value="1.6899018041304035E-4"/>
</attvalues>
<viz:size value="44.69732"/>
<viz:position x="14227.394" y="-7721.0728"/>
<viz:color r="211" g="179" b="176"/>
</node>
<node id="abstract_8" label="abstract_8">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Visual impairment affects the normal course of activities in everyday life including mobility, education, employment, and social interaction. Most of the existing technical solutions devoted to empowering the visually impaired people are in the areas of navigation (obstacle avoidance), access to printed information and object recognition. Less effort has been dedicated so far in developing solutions to support social interactions. In this paper, we introduce a Social-Aware Assistant (SAA) that provides visually impaired people with cues to enhance their face-to-face conversations. The system consists of a perceptive component (represented by smartglasses with an embedded video camera) and a feedback component (represented by a haptic belt). When the vision system detects a head nodding, the belt vibrates, thus suggesting the user to replicate (mirror) the gesture. In our experiments, sighted persons interacted with blind people wearing the SAA. We instructed the former to mirror the noddings according to the vibratory signal, while the latter interacted naturally. After the face-to-face conversation, the participants had an interview to express their experience regarding the use of this new technological assistant. With the data collected during the experiment, we have assessed quantitatively and qualitatively the device usefulness and user satisfaction."/>
<attvalue for="2" value="11"/>
<attvalue for="degree" value="12"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2727724617870014"/>
<attvalue for="harmonicclosnesscentrality" value="0.2781321184510262"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="2.092502669759408E-4"/>
</attvalues>
<viz:size value="46.57669"/>
<viz:position x="5400.2734" y="4517.3335"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="abstract_9" label="abstract_9">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Abstract Purpose: Methodology evaluation for decision support systems for health is a time consuming-task. To assess performance of polyp detectionmethods in colonoscopy videos, clinicians have to deal with the annotationof thousands of images. Current existing tools could be improved in terms ofexibility and ease of use. Methods:We introduce GTCreator, a exible annotation tool for providing image and text annotations to image-based datasets.It keeps the main basic functionalities of other similar tools while extendingother capabilities such as allowing multiple annotators to work simultaneouslyon the same task or enhanced dataset browsing and easy annotation transfer aiming to speed up annotation processes in large datasets. Results: Thecomparison with other similar tools shows that GTCreator allows to obtainfast and precise annotation of image datasets, being the only one which offersfull annotation editing and browsing capabilites. Conclusions: Our proposedannotation tool has been proven to be efficient for large image dataset annota-tion, as well as showing potential of use in other stages of method evaluationsuch as experimental setup or results analysis."/>
<attvalue for="2" value="8"/>
<attvalue for="degree" value="9"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2733159008840742"/>
<attvalue for="harmonicclosnesscentrality" value="0.2790432801822335"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="16"/>
<attvalue for="pageranks" value="1.553328476504862E-4"/>
</attvalues>
<viz:size value="44.059788"/>
<viz:position x="8979.445" y="-17566.814"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="abstract_10" label="abstract_10">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Background and study aims: To evaluate a new computational histology prediction system based on colorectal polyp textural surface patterns using high definition white light images.Patients and methods: Textural elements (textons) were characterized according to their contrast with respect to the surface, shape and number of bifurcations, assuming that dysplastic polyps are associated with highly contrasted, large tubular patterns with some degree of bifurcation. Computer-aided diagnosis (CAD) was compared with pathological diagnosis and the diagnosis by the endoscopists using Kudo and NICE classification.Results: Images of 225 polyps were evaluated (142 dysplastic and 83 non-dysplastic). CAD system correctly classified 205 (91.1%) polyps, 131/142 (92.3%) dysplastic and 74/83 (89.2%) non-dysplastic. For the subgroup of 100 diminutive (&lt;5 mm) polyps, CAD correctly classified 87 (87%) polyps, 43/50 (86%) dysplastic and 44/50 (88%) non-dysplastic. There were not statistically significant differences in polyp histology prediction based on CAD system and on endoscopist assessment.Conclusion: A computer vision system based on the characterization of the polyp surface in the white light accurately predicts colorectal polyp histology."/>
<attvalue for="2" value="12"/>
<attvalue for="degree" value="13"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.27338398306140244"/>
<attvalue for="harmonicclosnesscentrality" value="0.2791571753986344"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="16"/>
<attvalue for="pageranks" value="1.5490781843048828E-4"/>
</attvalues>
<viz:size value="44.039948"/>
<viz:position x="9008.174" y="-18847.148"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="abstract_11" label="abstract_11">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Many historical manuscripts that hold trustworthy memories of the past societies contain information organized in a structured layout (e.g. census, birth or marriage records). The precious information stored in these documents cannot be effectively used nor accessed without costly annotation efforts. The transcription driven by the semantic categories of words is crucial for the subsequent access. In this paper we describe an approach to extract information from structured historical handwritten text images and build a knowledge representation for the extraction of meaning out of historical data. The method extracts information, such as named entities, without the need of an intermediate transcription step, thanks to the incorporation of context information through language models. Our system has two variants, the first one is based on bigrams, whereas the second one is based on recurrent neural networks. Concretely, our second architecture integrates a Convolutional Neural Network to model visual information from word images together with a Bidirecitonal Long Short Term Memory network to model the relation among the words. This integrated sequential approach is able to extract more information than just the semantic category (e.g. a semantic category can be associated to a person in a record). Our system is generic, it deals with out-of-vocabulary words by design, and it can be applied to structured handwritten texts from different domains. The method has been validated with the ICDAR IEHHR competition protocol, outperforming the existing approaches."/>
<attvalue for="2" value="15"/>
<attvalue for="degree" value="16"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.27345209916531704"/>
<attvalue for="harmonicclosnesscentrality" value="0.2792710706150353"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="13"/>
<attvalue for="pageranks" value="1.44937015648789E-4"/>
</attvalues>
<viz:size value="43.5745"/>
<viz:position x="-1041.738" y="-3629.31"/>
<viz:color r="255" g="85" b="132"/>
</node>
<node id="abstract_12" label="abstract_12">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="In this paper, a dual online subspace-based learning method called dual-generalized discriminative common vectors (Dual-GDCV) is presented. The method extends incremental GDCV by exploiting simultaneously both the concepts of incremental and decremental learning for supervised feature extraction and classification. Our methodology is able to update the feature representation space without recalculating the full projection or accessing the previously processed training data. It allows both adding information and removing unnecessary data from a knowledge base in an efficient way, while retaining the previously acquired knowledge. The proposed method has been theoretically proved and empirically validated in six standard face recognition and classification datasets, under two scenarios: (1) removing and adding samples of existent classes, and (2) removing and adding new classes to a classification problem. Results show a considerable computational gain without compromising the accuracy of the model in comparison with both batch methodologies and other state-of-art adaptive methods."/>
<attvalue for="2" value="7"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.27311185765833024"/>
<attvalue for="harmonicclosnesscentrality" value="0.2787015945330307"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="13"/>
<attvalue for="pageranks" value="1.5614495123657057E-4"/>
</attvalues>
<viz:size value="44.0977"/>
<viz:position x="-770.5169" y="-6265.3477"/>
<viz:color r="255" g="85" b="132"/>
</node>
<node id="abstract_13" label="abstract_13">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Autonomous driving has harsh requirements of small model size and energy efficiency, in order to enable the embedded system to achieve real-time on-board object detection. Recent deep convolutional neural network based object detectors have achieved state-of-the-art accuracy. However, such models are trained with numerous parameters and their high computational costs and large storage prohibit the deployment to memory and computation resource limited systems. Low-precision neural networks are popular techniques for reducing the computation requirements and memory footprint. Among them, binary weight neural network (BWN) is the extreme case which quantizes the float-point into just bit. BWNs are difficult to train and suffer from accuracy deprecation due to the extreme low-bit representation. To address this problem, we propose a knowledge transfer (KT) method to aid the training of BWN using a full-precision teacher network. We built DarkNet-and MobileNet-based binary weight YOLO-v2 detectors and conduct experiments on KITTI benchmark for car, pedestrian and cyclist detection. The experimental results show that the proposed method maintains high detection accuracy while reducing the model size of DarkNet-YOLO from 257 MB to 8.8 MB and MobileNet-YOLO from 193 MB to 7.9 MB."/>
<attvalue for="2" value="12"/>
<attvalue for="degree" value="13"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2729081188611215"/>
<attvalue for="harmonicclosnesscentrality" value="0.278359908883828"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="1"/>
<attvalue for="pageranks" value="1.9423991443073282E-4"/>
</attvalues>
<viz:size value="45.875996"/>
<viz:position x="4514.3022" y="2039.3698"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="abstract_14" label="abstract_14">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="The determination of precise skin lesion boundaries in dermoscopic images using automated methods faces many challenges, most importantly, the presence of hair, inconspicuous lesion edges and low contrast in dermoscopic images, and variability in the color, texture and shapes of skin lesions. Existing deep learning-based skin lesion segmentation algorithms are expensive in terms of computational time and memory. Consequently, running such segmentation algorithms requires a powerful GPU and high bandwidth memory, which are not available in dermoscopy devices. Thus, this article aims to achieve precise skin lesion segmentation with minimum resources: a lightweight, efficient generative adversarial network (GAN) model called SLSNet, which combines 1-D kernel factorized networks, position and channel attention, and multiscale aggregation mechanisms with a GAN model. The 1-D kernel factorized network reduces the computational cost of 2D filtering. The position and channel attention modules enhance the discriminative ability between the lesion and non-lesion feature representations in spatial and channel dimensions, respectively. A multiscale block is also used to aggregate the coarse-to-fine features of input skin images and reduce the effect of the artifacts. SLSNet is evaluated on two publicly available datasets: ISBI 2017 and the ISIC 2018. Although SLSNet has only 2.35 million parameters, the experimental results demonstrate that it achieves segmentation results on a par with the state-of-the-art skin lesion segmentation methods with an accuracy of 97.61%, and Dice and Jaccard similarity coefficients of 90.63% and 81.98%, respectively. SLSNet can run at more than 110 frames per second (FPS) in a single GTX1080Ti GPU, which is faster than well-known deep learning-based image segmentation models, such as FCN. Therefore, SLSNet can be used for practical dermoscopic applications."/>
<attvalue for="2" value="16"/>
<attvalue for="degree" value="17"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.27338398306140244"/>
<attvalue for="harmonicclosnesscentrality" value="0.2791571753986344"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="4"/>
<attvalue for="pageranks" value="1.5600135725432802E-4"/>
</attvalues>
<viz:size value="44.090996"/>
<viz:position x="-2710.3267" y="20371.105"/>
<viz:color r="0" g="189" b="148"/>
</node>
<node id="abstract_15" label="abstract_15">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Hand-written Text Recognition techniques with the aim to automatically identify and transcribe hand-written text have been applied to historical sources including ciphers. In this paper, we compare the performance of two machine learning architectures, an unsupervised method based on clustering and a deep learning method with few-shot learning. Both models are tested on seen and unseen data from historical ciphers with different symbol sets consisting of various types of graphic signs. We compare the models and highlight their differences in performance, with their advantages and shortcomings."/>
<attvalue for="2" value="5"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2733159008840742"/>
<attvalue for="harmonicclosnesscentrality" value="0.2790432801822335"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="1.4996094212211525E-4"/>
</attvalues>
<viz:size value="43.80902"/>
<viz:position x="3434.0107" y="-3881.7456"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="abstract_16" label="abstract_16">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Tabular structures in business documents offer a complementary dimension to the raw textual data. For instance, there is information about the relationships among pieces of information. Nowadays, digital mailroom applications have become a key service for workflow automation. Therefore, the detection and interpretation of tables is crucial. With the recent advances in information extraction, table detection and recognition has gained interest in document image analysis, in particular, with the absence of rule lines and unknown information about rows and columns. However, business documents usually contain sensitive contents limiting the amount of public benchmarking datasets. In this paper, we propose a graph-based approach for detecting tables in document images which do not require the raw content of the document. Hence, the sensitive content can be previously removed and, instead of using the raw image or textual content, we propose a purely structural approach to keep sensitive data anonymous. Our framework uses graph neural networks (GNNs) to describe the local repetitive structures that constitute a table. In particular, our main application domain are business documents. We have carefully validated our approach in two invoice datasets and a modern document benchmark. Our experiments demonstrate that tables can be detected by purely structural approaches."/>
<attvalue for="2" value="13"/>
<attvalue for="degree" value="14"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2730439109341958"/>
<attvalue for="harmonicclosnesscentrality" value="0.27858769931662986"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="1.5839106053592683E-4"/>
</attvalues>
<viz:size value="44.202545"/>
<viz:position x="2401.168" y="-941.7373"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="abstract_17" label="abstract_17">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="The most common tool for population-wide COVID-19 identification is the Reverse Transcription-Polymerase Chain Reaction test that detects the presence of the virus in the throat (or sputum) in swab samples. This test has a sensitivity between 59% and 71%. However, this test does not provide precise information regarding the extension of the pulmonary infection. Moreover, it has been proven that through the reading of a computed tomography (CT) scan, a clinician can provide a more complete perspective of the severity of the disease. Therefore, we propose a comprehensive system for fully-automated COVID-19 detection and lesion segmentation from CT scans, powered by deep learning strategies to support decision-making process for the diagnosis of COVID-19."/>
<attvalue for="2" value="6"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2729081188611215"/>
<attvalue for="harmonicclosnesscentrality" value="0.278359908883828"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="4"/>
<attvalue for="pageranks" value="1.9818125819144147E-4"/>
</attvalues>
<viz:size value="46.059982"/>
<viz:position x="36.665077" y="8929.922"/>
<viz:color r="0" g="189" b="148"/>
</node>
<node id="abstract_18" label="abstract_18">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Hand gesture recognition from sequences of depth maps is a challenging computer vision task because of the low inter-class and high intra-class variability, different execution rates of each gesture, and the high articulated nature of human hand. In this paper, a multilevel temporal sampling (MTS) method is first proposed that is based on the motion energy of key-frames of depth sequences. As a result, long, middle, and short sequences are generated that contain the relevant gesture information. The MTS results in increasing the intra-class similarity while raising the inter-class dissimilarities. The weighted depth motion map (WDMM) is then proposed to extract the spatio-temporal information from generated summarized sequences by an accumulated weighted absolute difference of consecutive frames. The histogram of gradient (HOG) and local binary pattern (LBP) are exploited to extract features from WDMM. The obtained results define the current state-of-the-art on three public benchmark datasets of: MSR Gesture 3D, SKIG, and MSR Action 3D, for 3D hand gesture recognition. We also achieve competitive results on NTU action dataset."/>
<attvalue for="2" value="9"/>
<attvalue for="degree" value="10"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2731798382078407"/>
<attvalue for="harmonicclosnesscentrality" value="0.27881548974943166"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="1.5980396225001939E-4"/>
</attvalues>
<viz:size value="44.2685"/>
<viz:position x="-10373.008" y="-6474.2275"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="abstract_19" label="abstract_19">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="In this paper, we extend the standard belief propagation (BP) sequential technique proposed in the tree-reweighted sequential method [15] to the fully connected CRF models with the geodesic distance affinity. The proposed method has been applied to the stereo matching problem. Also a new approach to the BP marginal solution is proposed that we call one-view occlusion detection (OVOD). In contrast to the standard winner takes all (WTA) estimation, the proposed OVOD solution allows to find occluded regions in the disparity map and simultaneously improve the matching result. As a result we can perform onlyone energy minimization process and avoid the cost calculation for the second view and the left-right check procedure. We show that the OVOD approach considerably improves results for cost augmentation and energy minimization techniques in comparison with the standard one-view affinity space implementation. We apply our method to the Middlebury data set and reach state-ofthe-art especially for median, average and mean squared error metrics."/>
<attvalue for="2" value="8"/>
<attvalue for="degree" value="9"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2730439109341958"/>
<attvalue for="harmonicclosnesscentrality" value="0.27858769931662986"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="5"/>
<attvalue for="pageranks" value="1.669472070653748E-4"/>
</attvalues>
<viz:size value="44.60195"/>
<viz:position x="-716.7712" y="11.616822"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="abstract_20" label="abstract_20">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Color induction is the influence of the surrounding color (inducer) on the perceived color of a central region. There are two different types of color induction: color contrast (the color of the central region shifts away from that of the inducer) and color assimilation (the color shifts towards the color of the inducer). Several studies on these effects have used uniform and striped surrounds, reporting color contrast and color assimilation, respectively. Other authors [J. Vis. 12(1), 22 (2012) [CrossRef] ] have studied color induction using flashed uniform surrounds, reporting that the contrast is higher for shorter flash duration. Extending their study, we present new psychophysical results using both flashed and static (i.e., non-flashed) equiluminant stimuli for both striped and uniform surrounds. Similarly to them, for uniform surround stimuli we observed color contrast, but we did not obtain the maximum contrast for the shortest (10 ms) flashed stimuli, but for 40 ms. We only observed this maximum contrast for red, green, and lime inducers, while for a purple inducer we obtained an asymptotic profile along the flash duration. For striped stimuli, we observed color assimilation only for the static (infinite flash duration) redgreen surround inducers (red first inducer, green second inducer). For the other inducers configurations, we observed color contrast or no induction. Since other studies showed that non-equiluminant striped static stimuli induce color assimilation, our results also suggest that luminance differences could be a key factor to induce it."/>
<attvalue for="2" value="15"/>
<attvalue for="degree" value="16"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2728402734617775"/>
<attvalue for="harmonicclosnesscentrality" value="0.27824601366742713"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="3"/>
<attvalue for="pageranks" value="2.0814092471603256E-4"/>
</attvalues>
<viz:size value="46.524906"/>
<viz:position x="4705.038" y="-650.159"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="abstract_21" label="abstract_21">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="The usage of both off-the-shelf and end-to-end trained deep networks have significantly improved the performance of visual tracking on RGB videos. However, the lack of large labeled datasets hampers the usage of convolutional neural networks for tracking in thermal infrared (TIR) images. Therefore, most state-of-the-art methods on tracking for TIR data are still based on handcrafted features. To address this problem, we propose to use image-to-image translation models. These models allow us to translate the abundantly available labeled RGB data to synthetic TIR data. We explore both the usage of paired and unpaired image translation models for this purpose. These methods provide us with a large labeled dataset of synthetic TIR sequences, on which we can train end-to-end optimal features for tracking. To the best of our knowledge, we are the first to train end-to-end features for TIR tracking. We perform extensive experiments on the VOT-TIR2017 dataset. We show that a network trained on a large dataset of synthetic TIR data obtains better performance than one trained on the available real TIR data. Combining both data sources leads to further improvement. In addition, when we combine the network with motion features, we outperform the state of the art with a relative gain of over 10%, clearly showing the efficiency of using synthetic data to train end-to-end TIR trackers."/>
<attvalue for="2" value="13"/>
<attvalue for="degree" value="14"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2730439109341958"/>
<attvalue for="harmonicclosnesscentrality" value="0.27858769931662986"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="1.6197160716276468E-4"/>
</attvalues>
<viz:size value="44.36969"/>
<viz:position x="9320.026" y="9221.235"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="abstract_22" label="abstract_22">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="PURPOSE:An intraluminal coronary stent is a metal scaffold deployed in a stenotic artery during percutaneous coronary intervention (PCI). In order to have an effective deployment, a stent should be optimally placed with regard to anatomical structures such as bifurcations and stenoses. Intravascular ultrasound (IVUS) is a catheter-based imaging technique generally used for PCI guiding and assessing the correct placement of the stent. A novel approach that automatically detects the boundaries and the position of the stent along the IVUS pullback is presented. Such a technique aims at optimizing the stent deployment.METHODS:The method requires the identification of the stable frames of the sequence and the reliable detection of stent struts. Using these data, a measure of likelihood for a frame to contain a stent is computed. Then, a robust binary representation of the presence of the stent in the pullback is obtained applying an iterative and multiscale quantization of the signal to symbols using the Symbolic Aggregate approXimation algorithm.RESULTS:The technique was extensively validated on a set of 103 IVUS of sequences of in vivo coronary arteries containing metallic and bioabsorbable stents acquired through an international multicentric collaboration across five clinical centers. The method was able to detect the stent position with an overall F-measure of 86.4%, a Jaccard index score of 75% and a mean distance of 2.5 mm from manually annotated stent boundaries, and in bioabsorbable stents with an overall F-measure of 88.6%, a Jaccard score of 77.7 and a mean distance of 1.5 mm from manually annotated stent boundaries. Additionally, a map indicating the distance between the lumen and the stent along the pullback is created in order to show the angular sectors of the sequence in which the malapposition is present.CONCLUSIONS:Results obtained comparing the automatic results vs the manual annotation of two observers shows that the method approaches the interobserver variability. Similar performances are obtained on both metallic and bioabsorbable stents, showing the flexibility and robustness of the method."/>
<attvalue for="2" value="19"/>
<attvalue for="degree" value="20"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2727724617870014"/>
<attvalue for="harmonicclosnesscentrality" value="0.2781321184510262"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="4"/>
<attvalue for="pageranks" value="2.159029674470621E-4"/>
</attvalues>
<viz:size value="46.887245"/>
<viz:position x="383.00867" y="8319.696"/>
<viz:color r="0" g="189" b="148"/>
</node>
<node id="abstract_23" label="abstract_23">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="In this article, a form of the so-called word spotting-method is used on a large set of handwritten documents in order to identify those that contain script of similar execution. The point of departure for the investigation is the mediaeval Swedish manuscript Cod. Holm. D 3. The main scribe of this manuscript has yet not been identified in other documents. The current attempt aims at localising other documents that display a large degree of similarity in the characteristics of the script, these being possible candidates for being executed by the same hand. For this purpose, the method of word spotting has been employed, focusing on individual letters, and therefore the process is referred to as letter spotting in the article. In this process, a set of g:s, h:s and k:s have been selected as templates, and then a search has been made for close matches among the mediaeval Swedish charters. The search resulted in a number of charters that displayed great similarities with the manuscript D 3. The used letter spotting method thus proofed to be a very efficient sorting tool localising similar script samples."/>
<attvalue for="2" value="11"/>
<attvalue for="degree" value="12"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.27311185765833024"/>
<attvalue for="harmonicclosnesscentrality" value="0.2787015945330307"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="1.6419079481470677E-4"/>
</attvalues>
<viz:size value="44.47328"/>
<viz:position x="3954.0352" y="-5718.0396"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="abstract_24" label="abstract_24">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Objectives:Spanish and English contrast in adjectivenoun word order: for example, brown dress (English) vs. vestido marrn (dress brown, Spanish). According to the Matrix Language model (MLF) word order in code-switched sentences must be compatible with the word order of the matrix language, but working within the minimalist program (MP), Cantone and MacSwan arrived at the descriptive generalization that the position of the noun phrase relative to the adjective is determined by the adjectives language. Our aim is to evaluate the predictions derived from these two models regarding adjectivenoun order in SpanishEnglish code-switched sentences.Methodology:We contrasted the predictions from both models regarding the acceptability of code-switched sentences with different adjectivenoun orders that were compatible with the MP, the MLF, both, or none. Acceptability was assessed in Experiment 1 with a 5-point Likert and in Experiment 2 with a 2-Alternative Forced Choice (2AFC) task."/>
<attvalue for="2" value="7"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2727724617870014"/>
<attvalue for="harmonicclosnesscentrality" value="0.2781321184510262"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="4"/>
<attvalue for="pageranks" value="2.602327706964666E-4"/>
</attvalues>
<viz:size value="48.956593"/>
<viz:position x="13.238227" y="-60.65685"/>
<viz:color r="0" g="189" b="148"/>
</node>
<node id="abstract_25" label="abstract_25">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="This paper presents a domain adaptation strategy to efficiently train network architectures for estimating the relative camera pose in multi-view scenarios. The network architectures are fed by a pair of simultaneously acquired images, hence in order to improve the accuracy of the solutions, and due to the lack of large datasets with pairs of overlapped images, a domain adaptation strategy is proposed. The domain adaptation strategy consists on transferring the knowledge learned from synthetic images to real-world scenarios. For this, the networks are firstly trained using pairs of synthetic images, which are captured at the same time by a pair of cameras in a virtual environment; and then, the learned weights of the networks are transferred to the real-world case, where the networks are retrained with a few real images. Different virtual 3D scenarios are generated to evaluate the relationship between the accuracy on the result and the similarity between virtual and real scenariossimilarity on both geometry of the objects contained in the scene as well as relative pose between camera and objects in the scene. Experimental results and comparisons are provided showing that the accuracy of all the evaluated networks for estimating the camera pose improves when the proposed domain adaptation strategy is used, highlighting the importance on the similarity between virtual-real scenarios."/>
<attvalue for="2" value="7"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.27297599801019773"/>
<attvalue for="harmonicclosnesscentrality" value="0.2784738041002289"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="7"/>
<attvalue for="pageranks" value="1.6258461974818416E-4"/>
</attvalues>
<viz:size value="44.398304"/>
<viz:position x="-951.782" y="-14718.731"/>
<viz:color r="255" g="136" b="5"/>
</node>
<node id="abstract_26" label="abstract_26">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Deep convolutional networks can achieve impressive results on RGB scene recognition thanks to large data sets such as places. In contrast, RGB-D scene recognition is still underdeveloped in comparison, due to two limitations of RGB-D data we address in this paper. The rst limitation is the lack of depth data for training deep learning models. Rather than ne tuning or transferring RGB-specic features, we address this limitation by proposing an architecture and a two-step training approach that directly learns effective depth-specic features using weak supervision via patches. The resulting RGB-D model also benets from more complementary multimodal features. Another limitation is the short range of depth sensors (typically 0.5 m to 5.5 m), resulting in depth images not capturing distant objects in the scenes that RGB images can. We show that this limitation can be addressed by using RGB-D videos, where more comprehensive depth information is accumulated as the camera travels across the scenes. Focusing on this scenario, we introduce the ISIA RGB-D video data set to evaluate RGB-D scene recognition with videos. Our video recognition architecture combines convolutional and recurrent neural networks that are trained in three steps with increasingly complex data to learn effective features (i.e., patches, frames, and sequences). Our approach obtains the state-of-the-art performances on RGB-D image (NYUD2 and SUN RGB-D) and video (ISIA RGB-D) scene recognition."/>
<attvalue for="2" value="15"/>
<attvalue for="degree" value="16"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2729081188611215"/>
<attvalue for="harmonicclosnesscentrality" value="0.278359908883828"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="0"/>
<attvalue for="pageranks" value="1.856421460401277E-4"/>
</attvalues>
<viz:size value="45.474648"/>
<viz:position x="8371.218" y="9865.666"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="abstract_27" label="abstract_27">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="In this paper, we present a sparse-based denoising algorithm for scanned documents. This method can be applied to any kind of scanned documents with satisfactory results. Unlike other approaches, the proposed approach encodes noise documents through sparse representation and visual dictionary learning techniques without any prior noise model. Moreover, we propose a precision parameter estimator. Experiments on several datasets demonstrate the robustness of the proposed approach compared to the state-of-the-art methods on document denoising."/>
<attvalue for="2" value="6"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.27297599801019773"/>
<attvalue for="harmonicclosnesscentrality" value="0.2784738041002289"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="18"/>
<attvalue for="pageranks" value="1.5321125533119444E-4"/>
</attvalues>
<viz:size value="43.96075"/>
<viz:position x="-1294.6185" y="1371.7455"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="abstract_28" label="abstract_28">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Rainfall is a problem in automated traffic surveillance. Rain streaks occlude the road users and degrade the overall visibility which in turn decrease object detection performance. One way of alleviating this is by artificially removing the rain from the images. This requires knowledge of corresponding rainy and rain-free images. Such images are often produced by overlaying synthetic rain on top of rain-free images. However, this method fails to incorporate the fact that rain fall in the entire three-dimensional volume of the scene. To overcome this, we introduce training data from the SYNTHIA virtual world that models rain streaks in the entirety of a scene. We train a conditional Generative Adversarial Network for rain removal and apply it on traffic surveillance images from SYNTHIA and the AAU RainSnow datasets. To measure the applicability of the rain-removed images in a traffic surveillance context, we run the YOLOv2 object detection algorithm on the original and rain-removed frames. The results on SYNTHIA show an 8% increase in detection accuracy compared to the original rain image. Interestingly, we find that high PSNR or SSIM scores do not imply good object detection performance."/>
<attvalue for="2" value="12"/>
<attvalue for="degree" value="13"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.27311185765833024"/>
<attvalue for="harmonicclosnesscentrality" value="0.2787015945330307"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="1"/>
<attvalue for="pageranks" value="1.6340807856228914E-4"/>
</attvalues>
<viz:size value="44.436745"/>
<viz:position x="3255.4668" y="1562.6814"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="abstract_30" label="abstract_30">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Fine-grained recognition, i.e. identifying similar subcategories of the same superclass, is central to human activity. Recognizing a friend, finding bacteria in microscopic imagery, or discovering a new kind of galaxy, are just but few examples. However, fine-grained image recognition is still a challenging computer vision task since the differences between two images of the same category can overwhelm the differences between two images of different fine-grained categories. In this regime, where the difference between two categories resides on subtle input changes, excessively invariant CNNs discard those details that help to discriminate between categories and focus on more obvious changes, yielding poor classification performance.On the other hand, CNNs with too much capacity tend to memorize instance-specific details, thus causing overfitting. In this thesis,motivated by thepotential impact of automatic fine-grained image recognition, we tackle the previous challenges and demonstrate that proper alignment of the inputs, multiple levels of attention, regularization, and explicitmodeling of the output space, results inmore accurate fine-grained recognitionmodels, that generalize better, and are more robust to intra-class variation. Concretely, we study the different stages of the neural network pipeline: input pre-processing, attention to regions, feature activations, and the label space. In each stage, we address different issues that hinder the recognition performance on various fine-grained tasks, and devise solutions in each chapter: i)We deal with the sensitivity to input alignment on fine-grained human facial motion such as pain. ii) We introduce an attention mechanism to allow CNNs to choose and process in detail the most discriminate regions of the image. iii)We further extend attention mechanisms to act on the network activations,thus allowing them to correct their predictions by looking back at certainregions, at different levels of abstraction. iv) We propose a regularization loss to prevent high-capacity neural networks to memorize instance details by means of almost-identical feature detectors. v)We finally study the advantages of explicitly modeling the output space within the error-correcting framework. As a result, in this thesis we demonstrate that attention and regularization seem promising directions to overcome the problems of fine-grained image recognition, as well as proper treatment of the input and the output space."/>
<attvalue for="2" value="16"/>
<attvalue for="degree" value="17"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2727724617870014"/>
<attvalue for="harmonicclosnesscentrality" value="0.2781321184510262"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="5"/>
<attvalue for="pageranks" value="2.154111540756835E-4"/>
</attvalues>
<viz:size value="46.86429"/>
<viz:position x="-2010.823" y="-1900.41"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="abstract_31" label="abstract_31">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="In this PhD we have approached the human color vision from two different points of view: psychophysics and computational modeling. First, we have evaluated 15 different tone-mapping operators (TMOs). We have conducted two experiments thatconsider two different criteria: the first one evaluates the local relationships among intensity levels and the second one evaluates the global appearance of the tonemapped imagesw.r.t. the physical one (presented side by side). We conclude that the rankings depend on the criterion and they are not correlated. Considering both criteria, the best TMOs are KimKautz (Kim and Kautz, 2008) and Krawczyk (Krawczyk, Myszkowski, and Seidel, 2005). Another conclusion is that a more standardized evaluation criteria is needed to do a fair comparison among TMOs.Secondly, we have conducted several psychophysical experiments to study thecolor induction. We have studied two different properties of the visual stimuli: temporal frequency and luminance spatial distribution. To study the temporal frequency we defined equiluminant stimuli composed by both uniform and striped surrounds and we flashed them varying the flash duration. For uniform surrounds, the results show that color induction depends on both the flash duration and inducers chromaticity. As expected, in all chromatic conditions color contrast was induced. In contrast, for striped surrounds, we expected to induce color assimilation, but we observed color contrast or no induction. Since similar but not equiluminant striped stimuli induce color assimilation, we concluded that luminance differences could be a key factor to induce color assimilation. Thus, in a subsequent study, we have studied the luminance differences effect on color assimilation. We varied the luminance difference between the target region and its inducers and we observed that color assimilation depends on both this difference and the inducers chromaticity. For red-green condition (where the first inducer is red and the second one is green), color assimilation occurs in almost all luminance conditions.Instead, for green-red condition, color assimilation never occurs. Purple-limeand lime-purple chromatic conditions show that luminance difference is a key factor to induce color assimilation. When the target is darker than its surround, color assimilation is stronger in purple-lime, while when the target is brighter, color assimilation is stronger in lime-purple (mirroring effect). Moreover, we evaluated whether color assimilation is due to luminance or brightness differences. Similarly to equiluminance condition, when the stimuli are equibrightness no color assimilation is induced. Our results support the hypothesis that mutual-inhibition plays a major role in color perception, or at least in color induction.Finally, we have defined a new firing rate model of color processing in the V1parvocellular pathway. We have modeled two different layers of this cortical area: layers 4Cb and 2/3. Our model is a recurrent dynamic computational model that considers both excitatory and inhibitory cells and their lateral connections. Moreover, it considers the existent laminar differences and the cells variety. Thus, we have modeled both single- and double-opponent simple cells and complex cells, which are a pool of double-opponent simple cells. A set of sinusoidal drifting gratings have been used to test the architecture. In these gratings we have varied several spatial properties such as temporal and spatial frequencies, gratings area and orientation. To reproduce the electrophysiological observations, the architecture has to consider the existence of non-oriented double-opponent cells in layer 4Cb and the lack of lateral connections between single-opponent cells. Moreover, we have tested our lateral connections simulating the center-surround modulation and we have reproduced physiological measurements where for high contrast stimulus, theresult of the lateral connections is inhibitory, while it is facilitatory for low contrast stimulus."/>
<attvalue for="2" value="35"/>
<attvalue for="degree" value="36"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2727724617870014"/>
<attvalue for="harmonicclosnesscentrality" value="0.2781321184510262"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="3"/>
<attvalue for="pageranks" value="2.4545057811034055E-4"/>
</attvalues>
<viz:size value="48.26655"/>
<viz:position x="4406.757" y="-557.80225"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="abstract_32" label="abstract_32">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="In this letter, we adapt the geodesic distance-based recursive filter to the sparse data interpolation problem. The proposed technique is general and can be easily applied to any kind of sparse data. We demonstrate its superiority over other interpolation techniques in three experiments for qualitative and quantitative evaluation. In addition, we compare our method with the popular interpolation algorithm presented in the paper on EpicFlow optical flow, which is intuitively motivated by a similar geodesic distance principle. The comparison shows that our algorithm is more accurate and considerably faster than the EpicFlow interpolation technique."/>
<attvalue for="2" value="6"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2729081188611215"/>
<attvalue for="harmonicclosnesscentrality" value="0.278359908883828"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="0"/>
<attvalue for="pageranks" value="1.759822456693468E-4"/>
</attvalues>
<viz:size value="45.023716"/>
<viz:position x="6417.0454" y="7437.7197"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="abstract_33" label="abstract_33">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Automatic age estimation from facial images represents an important task in computer vision. This paper analyses the effect of gender, age, ethnic, makeup and expression attributes of faces as sources of bias to improve deep apparent age prediction. Following recent works where it is shown that apparent age labels benefit real age estimation, rather than direct real to real age regression, our main contribution is the integration, in an end-to-end architecture, of face attributes for apparent age prediction with an additional loss for real age regression. Experimental results on the APPA-REAL dataset indicate the proposed network successfully take advantage of the adopted attributes to improve both apparent and real age estimation. Our model outperformed a state-of-the-art architecture proposed to separately address apparent and real age regression. Finally, we present preliminary results and discussion of a proof of concept application using the proposed model to regress the apparent age of an individual based on the gender of an external observer."/>
<attvalue for="2" value="7"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.27311185765833024"/>
<attvalue for="harmonicclosnesscentrality" value="0.2787015945330307"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="18"/>
<attvalue for="pageranks" value="1.5614287602577428E-4"/>
</attvalues>
<viz:size value="44.0976"/>
<viz:position x="-11363.369" y="-4368.749"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="abstract_34" label="abstract_34">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Training a Siamese architecture for re-identification with a large number of identities is a challenging task due to the difficulty of finding relevant negative samples efficiently. In this work we present Bag of Negatives (BoN), a method for accelerated and improved training of Siamese networks that scales well on datasets with a very large number of identities. BoN is an efficient and loss-independent method, able to select a bag of high quality negatives, based on a novel online hashing strategy."/>
<attvalue for="2" value="4"/>
<attvalue for="degree" value="5"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.27297599801019773"/>
<attvalue for="harmonicclosnesscentrality" value="0.2784738041002289"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="18"/>
<attvalue for="pageranks" value="1.8279651251535594E-4"/>
</attvalues>
<viz:size value="45.341812"/>
<viz:position x="1836.3445" y="4994.5146"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="abstract_35" label="abstract_35">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="This paper investigates the role of saliency to improve the classification accuracy of a Convolutional Neural Network (CNN) for the case when scarce training data is available. Our approach consists in adding a saliency branch to an existing CNN architecture which is used to modulate the standard bottom-up visual features from the original image input, acting as an attentional mechanism that guides the feature extraction process. The main aim of the proposed approach is to enable the effective training of a fine-grained recognition model with limited training samples and to improve the performance on the task, thereby alleviating the need to annotate a large dataset. The vast majority of saliency methods are evaluated on their ability to generate saliency maps, and not on their functionality in a complete vision pipeline. Our proposed pipeline allows to evaluate saliency methods for the high-level task of object recognition. We perform extensive experiments on various fine-grained datasets (Flowers, Birds, Cars, and Dogs) under different conditions and show that saliency can considerably improve the networks performance, especially for the case of scarce training data. Furthermore, our experiments show that saliency methods that obtain improved saliency maps (as measured by traditional saliency benchmarks) also translate to saliency methods that yield improved performance gains when applied in an object recognition pipeline."/>
<attvalue for="2" value="8"/>
<attvalue for="degree" value="9"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.27297599801019773"/>
<attvalue for="harmonicclosnesscentrality" value="0.2784738041002289"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="1.6133555604185107E-4"/>
</attvalues>
<viz:size value="44.339996"/>
<viz:position x="7397.7964" y="5718.8086"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="abstract_36" label="abstract_36">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="This paper explores the possibilities of image style transfer applied to text maintaining the original transcriptions. Results on different text domains (scene text, machine printed text and handwritten text) and cross-modal results demonstrate that this is feasible, and open different research lines. Furthermore, two architectures for selective style transfer, which meanstransferring style to only desired image pixels, are proposed. Finally, scene text selective style transfer is evaluated as a data augmentation technique to expand scene text detection datasets, resulting in a boost of text detectors performance. Our implementation of the described models is publicly available."/>
<attvalue for="2" value="6"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2735202492211838"/>
<attvalue for="harmonicclosnesscentrality" value="0.27938496583143624"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="15"/>
<attvalue for="pageranks" value="1.4464332234712278E-4"/>
</attvalues>
<viz:size value="43.56079"/>
<viz:position x="-6545.394" y="5558.9194"/>
<viz:color r="76" g="70" b="62"/>
</node>
<node id="abstract_37" label="abstract_37">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Self-Supervised learning from multimodal image and text data allows deep neural networks to learn powerful features with no need of human annotated data. Web and Social Media platforms provide a virtually unlimited amount of this multimodal data. In this work we propose to exploit this free available data to learn a multimodal image and text embedding, aiming to leverage the semantic knowledge learnt in the text domain and transfer it to a visual model for semantic image retrieval. We demonstrate that the proposed pipeline can learn from images with associated text without supervision and analyze the semantic structure of the learnt joint image and text embeddingspace. Weperformathoroughanalysisandperformancecomparisonofvedifferentstateof the art text embeddings in three different benchmarks. We show that the embeddings learnt with Web and Social Media data have competitive performances over supervised methods in the text basedimageretrievaltask,andweclearlyoutperformstateoftheartintheMIRFlickrdatasetwhen training in the target data. Further, we demonstrate how semantic multimodal image retrieval can be performed using the learnt embeddings, going beyond classical instance-level retrieval problems. Finally, we present a new dataset, InstaCities1M, composed by Instagram images and their associated texts that can be used for fair comparison of image-text embeddings."/>
<attvalue for="2" value="9"/>
<attvalue for="degree" value="10"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2735202492211838"/>
<attvalue for="harmonicclosnesscentrality" value="0.27938496583143624"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="5"/>
<attvalue for="pageranks" value="1.4599508081169604E-4"/>
</attvalues>
<viz:size value="43.623894"/>
<viz:position x="-4951.638" y="3278.6711"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="abstract_38" label="abstract_38">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="For many applications the collection of labeled data is expensive laborious. Exploitation of unlabeled data during training is thus a long pursued objective of machine learning. Self-supervised learning addresses this by positing an auxiliary task (different, but related to the supervised task) for which data is abundantly available. In this paper, we show how ranking can be used as a proxy task for some regression problems. As another contribution, we propose an efficient backpropagation technique for Siamese networks which prevents the redundant computation introduced by the multi-branch network architecture. We apply our framework to two regression problems: Image Quality Assessment (IQA) and Crowd Counting. For both we show how to automatically generate ranked image sets from unlabeled data. Our results show that networks trained to regress to the ground truth targets for labeled data and to simultaneously learn to rank unlabeled data obtain significantly better, state-of-the-art results for both IQA and crowd counting. In addition, we show that measuring network uncertainty on the self-supervised proxy task is a good measure of informativeness of unlabeled data. This can be used to drive an algorithm for active learning and we show that this reduces labeling effort by up to 50 percent."/>
<attvalue for="2" value="11"/>
<attvalue for="degree" value="12"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2737931894723712"/>
<attvalue for="harmonicclosnesscentrality" value="0.27984054669703984"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="13"/>
<attvalue for="pageranks" value="1.4350541059172158E-4"/>
</attvalues>
<viz:size value="43.507675"/>
<viz:position x="216.14542" y="4613.2266"/>
<viz:color r="255" g="85" b="132"/>
</node>
<node id="abstract_39" label="abstract_39">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Due to the lack of thermal image datasets, a new dataset has been acquired for proposed a super-resolution approach using a Deep Convolution Neural Network schema. In order to achieve this image enhancement process, a new thermal images dataset is used. Different experiments have been carried out, firstly, the proposed architecture has been trained using only images of the visible spectrum, and later it has been trained with images of the thermal spectrum, the results showed that with the network trained with thermal images, better results are obtained in the process of enhancing the images, maintaining the image details and perspective. The thermal dataset is available at http://www.cidis.espol.edu.ec/es/dataset."/>
<attvalue for="2" value="9"/>
<attvalue for="degree" value="10"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.27297599801019773"/>
<attvalue for="harmonicclosnesscentrality" value="0.2784738041002289"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="7"/>
<attvalue for="pageranks" value="1.6074931387783972E-4"/>
</attvalues>
<viz:size value="44.31263"/>
<viz:position x="-1909.0776" y="-14571.514"/>
<viz:color r="255" g="136" b="5"/>
</node>
<node id="abstract_40" label="abstract_40">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="One interesting publicity application for Smart City environments is recognizing brand information contained in urban advertising panels. For such a purpose, a previous stage is to accurately detect and locate the position of these panels in images. This work presents an effective solution to this problem using a Single Shot Detector (SSD) based on a deep neural network architecture that minimizes the number of false detections under multiple variable conditions regarding the panels and the scene. Achieved experimental results using the Intersection over Union (IoU) accuracy metric make this proposal applicable in real complex urban images."/>
<attvalue for="2" value="5"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.27345209916531704"/>
<attvalue for="harmonicclosnesscentrality" value="0.2792710706150353"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="13"/>
<attvalue for="pageranks" value="1.4945448412200907E-4"/>
</attvalues>
<viz:size value="43.78538"/>
<viz:position x="-2500.0974" y="-10847.244"/>
<viz:color r="255" g="85" b="132"/>
</node>
<node id="abstract_41" label="abstract_41">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="This paper presents a novel approach for colorizing near infrared (NIR) images. The approach is based on image-to-image translation using a Cycle-Consistent adversarial network for learning the color channels on unpaired dataset. This architecture is able to handle unpaired datasets. The approach uses as generators tailored networks that require less computation times, converge faster and generate high quality samples. The obtained results have been quantitativelyusing standard evaluation metricsand qualitatively evaluated showing considerable improvements with respect to the state of the art"/>
<attvalue for="2" value="5"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2728402734617775"/>
<attvalue for="harmonicclosnesscentrality" value="0.27824601366742713"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="7"/>
<attvalue for="pageranks" value="1.8942654489031546E-4"/>
</attvalues>
<viz:size value="45.651306"/>
<viz:position x="-1748.2487" y="-12799.466"/>
<viz:color r="255" g="136" b="5"/>
</node>
<node id="abstract_42" label="abstract_42">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="This paper proposes a novel approach to estimate the Normalized Difference Vegetation Index (NDVI) just from an RGB image. The NDVI values are obtained by using images from the visible spectral band together with a synthetic near infrared image obtained by a cycled GAN. The cycled GAN network is able to obtain a NIR image from a given gray scale image. It is trained by using unpaired set of gray scale and NIR images by using a U-net architecture and a multiple loss function (gray scale images are obtained from the provided RGB images). Then, the NIR image estimated with the proposed cycle generative adversarial network is used to compute the NDVI index. Experimental results are provided showing the validity of the proposed approach. Additionally, comparisons with previous approaches are also provided."/>
<attvalue for="2" value="8"/>
<attvalue for="degree" value="9"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.27297599801019773"/>
<attvalue for="harmonicclosnesscentrality" value="0.2784738041002289"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="7"/>
<attvalue for="pageranks" value="1.6317589417199908E-4"/>
</attvalues>
<viz:size value="44.425907"/>
<viz:position x="-1012.6188" y="-13680.634"/>
<viz:color r="255" g="136" b="5"/>
</node>
<node id="abstract_43" label="abstract_43">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="The management of solid waste in large urban environments has become a complex problem due to increasing amount of waste generated every day by citizens and companies. Current Computer Vision and Deep Learning techniques can help in the automatic detection and classification of waste types for further recycling tasks. In this work, we use the TrashNet dataset to train and compare different deep learning architectures for automatic classification of garbage types. In particular, several Convolutional Neural Networks (CNN) architectures were compared: VGG, Inception and ResNet. The best classification results were obtained using a combined Inception-ResNet model that achieved 88.6% of accuracy. These are the best results obtained with the considered dataset."/>
<attvalue for="2" value="8"/>
<attvalue for="degree" value="9"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2735202492211838"/>
<attvalue for="harmonicclosnesscentrality" value="0.27938496583143624"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="13"/>
<attvalue for="pageranks" value="1.4155698942128116E-4"/>
</attvalues>
<viz:size value="43.41672"/>
<viz:position x="-2130.5554" y="-7336.576"/>
<viz:color r="255" g="85" b="132"/>
</node>
<node id="abstract_44" label="abstract_44">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="In this study we provide the analysis of eye movement behavior elicited by low-level feature distinctiveness with a dataset of synthetically-generated image patterns. Design of visual stimuli was inspired by the ones used in previous psychophysical experiments, namely in free-viewing and visual searching tasks, to provide a total of 15 types of stimuli, divided according to the task and feature to be analyzed. Our interest is to analyze the influences of low-level feature contrast between a salient region and the rest of distractors, providing fixation localization characteristics and reaction time of landing inside the salient region. Eye-tracking data was collected from 34 participants during the viewing of a 230 images dataset. Results show that saliency is predominantly and distinctively influenced by: 1. feature type, 2. feature contrast, 3. temporality of fixations, 4. task difficulty and 5. center bias. This experimentation proposes a new psychophysical basis for saliency model evaluation using synthetic images."/>
<attvalue for="2" value="12"/>
<attvalue for="degree" value="13"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.27338398306140244"/>
<attvalue for="harmonicclosnesscentrality" value="0.2791571753986344"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="3"/>
<attvalue for="pageranks" value="1.5943008065494253E-4"/>
</attvalues>
<viz:size value="44.25105"/>
<viz:position x="7329.276" y="-1937.4258"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="abstract_45" label="abstract_45">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Optical Music Recognition (OMR) is the branch of document image analysis that aims to convert images of musical scores into a computer-readable format. Despite decades of research, the recognition of handwritten music scores, concretely the Western notation, is still an open problem, and the few existing works only focus on a specific stage of OMR. In this work, we propose a full Handwritten Music Recognition (HMR) system based on Convolutional Recurrent Neural Networks, data augmentation and transfer learning, that can serve as a baseline for the research community."/>
<attvalue for="2" value="4"/>
<attvalue for="degree" value="5"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2729081188611215"/>
<attvalue for="harmonicclosnesscentrality" value="0.278359908883828"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="1.672624369591331E-4"/>
</attvalues>
<viz:size value="44.61667"/>
<viz:position x="2210.906" y="-1937.7892"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="abstract_46" label="abstract_46">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Historical ciphers, a special type of manuscripts, contain encrypted information, important for the interpretation of our history. The first step towards decipherment is to transcribe the images, either manually or by automatic image processing techniques. Despite the improvements in handwritten text recognition (HTR) thanks to deep learning methodologies, the need of labelled data to train is an important limitation. Given that ciphers often use symbol sets across various alphabets and unique symbols without any transcription scheme available, these supervised HTR techniques are not suitable to transcribe ciphers. In this paper we propose an un-supervised method for transcribing encrypted manuscripts based on clustering and label propagation, which has been successfully applied to community detection in networks. We analyze the performance on ciphers with various symbol sets, and discuss the advantages and drawbacks compared to supervised HTR methods."/>
<attvalue for="2" value="7"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2731798382078407"/>
<attvalue for="harmonicclosnesscentrality" value="0.27881548974943166"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="1.6232919181939076E-4"/>
</attvalues>
<viz:size value="44.386383"/>
<viz:position x="4538.669" y="-2102.8782"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="abstract_47" label="abstract_47">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Handwritten Text Recognition (HTR) is still a challenging problem because it must deal with two important difficulties: the variability among writing styles, and the scarcity of labelled data. To alleviate such problems, synthetic data generation and data augmentation are typically used to train HTR systems. However, training with such data produces encouraging but still inaccurate transcriptions in real words. In this paper, we propose an unsupervised writer adaptation approach that is able to automatically adjust a generic handwritten word recognizer, fully trained with synthetic fonts, towards a new incoming writer. We have experimentally validated our proposal using five different datasets, covering several challenges (i) the document source: modern and historic samples, which may involve paper degradation problems; (ii) different handwriting styles: single and multiple writer collections; and (iii) language, which involves different character combinations. Across these challenging collections, we show that our system is able to maintain its performance, thus, it provides a practical and generic approach to deal with new document collections without requiring any expensive and tedious manual annotation step."/>
<attvalue for="2" value="7"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2730439109341958"/>
<attvalue for="harmonicclosnesscentrality" value="0.27858769931662986"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="1.5393567255707178E-4"/>
</attvalues>
<viz:size value="43.994568"/>
<viz:position x="1248.6173" y="-744.376"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="abstract_48" label="abstract_48">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Siamese approaches address the visual tracking problem by extracting an appearance template from the current frame, which is used to localize the target in the next frame. In general, this template is linearly combined with the accumulated template from the previous frame, resulting in an exponential decay of information over time. While such an approach to updating has led to improved results, its simplicity limits the potential gain likely to be obtained by learning to update. Therefore, we propose to replace the handcrafted update function with a method which learns to update. We use a convolutional neural network, called UpdateNet, which given the initial template, the accumulated template and the template of the current frame aims to estimate the optimal template for the next frame. The UpdateNet is compact and can easily be integrated into existing Siamese trackers. We demonstrate the generality of the proposed approach by applying it to two Siamese trackers, SiamFC and DaSiamRPN. Extensive experiments on VOT2016, VOT2018, LaSOT, and TrackingNet datasets demonstrate that our UpdateNet effectively predicts the new target template, outperforming the standard linear update. On the large-scale TrackingNet dataset, our UpdateNet improves the results of DaSiamRPN with an absolute gain of 3.9% in terms of success score."/>
<attvalue for="2" value="11"/>
<attvalue for="degree" value="12"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2730439109341958"/>
<attvalue for="harmonicclosnesscentrality" value="0.27858769931662986"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="1.6197160716276468E-4"/>
</attvalues>
<viz:size value="44.36969"/>
<viz:position x="9497.591" y="9217.2295"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="abstract_49" label="abstract_49">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Wearable sensors (e.g., lifelogging cameras) represent very useful tools to monitor people's daily habits and lifestyle. Wearable cameras are able to continuously capture different moments of the day of their wearers, their environment, and interactions with objects, people, and places reflecting their personal lifestyle. The food places where people eat, drink, and buy food, such as restaurants, bars, and supermarkets, can directly affect their daily dietary intake and behavior. Consequently, developing an automated monitoring system based on analyzing a person's food habits from daily recorded egocentric photo-streams of the food places can provide valuable means for people to improve their eating habits. This can be done by generating a detailed report of the time spent in specific food places by classifying the captured food place images to different groups. In this paper, we propose a self-attention mechanism with multi-scale atrous convolutional networks to generate discriminative features from image streams to recognize a predetermined set of food place categories. We apply our model on an egocentric food place dataset called EgoFoodPlaces that comprises of 43 392 images captured by 16 individuals using a lifelogging camera. The proposed model achieved an overall classification accuracy of 80% on the EgoFoodPlaces dataset, respectively, outperforming the baseline methods, such as VGG16, ResNet50, and InceptionV3."/>
<attvalue for="2" value="11"/>
<attvalue for="degree" value="12"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2731798382078407"/>
<attvalue for="harmonicclosnesscentrality" value="0.27881548974943166"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="4"/>
<attvalue for="pageranks" value="1.600607053019363E-4"/>
</attvalues>
<viz:size value="44.280487"/>
<viz:position x="-2339.449" y="19620.965"/>
<viz:color r="0" g="189" b="148"/>
</node>
<node id="abstract_50" label="abstract_50">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="We propose an end-to-end tracking framework for fusing the RGB and TIR modalities in RGB-T tracking. Our baseline tracker is DiMP (Discriminative Model Prediction), which employs a carefully designed target prediction network trained end-to-end using a discriminative loss. We analyze the effectiveness of modality fusion in each of the main components in DiMP, i.e. feature extractor, target estimation network, and classifier. We consider several fusion mechanisms acting at different levels of the framework, including pixel-level, feature-level and response-level. Our tracker is trained in an end-to-end manner, enabling the components to learn how to fuse the information from both modalities. As data to train our model, we generate a large-scale RGB-T dataset by considering an annotated RGB tracking dataset (GOT-10k) and synthesizing paired TIR images using an image-to-image translation approach. We perform extensive experiments on VOT-RGBT2019 dataset and RGBT210 dataset, evaluating each type of modality fusing on each model component. The results show that the proposed fusion mechanisms improve the performance of the single modality counterparts. We obtain our best results when fusing at the feature-level on both the IoU-Net and the model predictor, obtaining an EAO score of 0.391 on VOT-RGBT2019 dataset. With this fusion mechanism we achieve the state-of-the-art performance on RGBT210 dataset."/>
<attvalue for="2" value="14"/>
<attvalue for="degree" value="15"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2730439109341958"/>
<attvalue for="harmonicclosnesscentrality" value="0.27858769931662986"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="1.6197160716276468E-4"/>
</attvalues>
<viz:size value="44.36969"/>
<viz:position x="9334.672" y="8929.16"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="abstract_51" label="abstract_51">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="In this work we target the problem of hate speech detection in multimodal publications formed by a text and an image. We gather and annotate a large scale dataset from Twitter, MMHS150K, and propose different models that jointly analyze textual and visual information for hate speech detection, comparing them with unimodal detection. We provide quantitative and qualitative results and analyze the challenges of the proposed task. We find that, even though images are useful for the hate speech detection task, current multimodal models cannot outperform models analyzing only text. We discuss why and open the field and the dataset for further research."/>
<attvalue for="2" value="6"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.27297599801019773"/>
<attvalue for="harmonicclosnesscentrality" value="0.2784738041002289"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="15"/>
<attvalue for="pageranks" value="1.5857754462995634E-4"/>
</attvalues>
<viz:size value="44.21125"/>
<viz:position x="-4749.179" y="5784.2515"/>
<viz:color r="76" g="70" b="62"/>
</node>
<node id="abstract_52" label="abstract_52">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Metric learning networks are used to compute image embeddings, which are widely used in many applications such as image retrieval and face recognition. In this paper, we propose to use network distillation to efficiently compute image embeddings with small networks. Network distillation has been successfully applied to improve image classification, but has hardly been explored for metric learning. To do so, we propose two new loss functions that model thecommunication of a deep teacher network to a small student network. We evaluate our system in several datasets, including CUB-200-2011, Cars-196, Stanford Online Products and show that embeddings computed using small student networks perform significantly better than those computed using standard networks of similar size. Results on a very compact network (MobileNet-0.25), which can beused on mobile devices, show that the proposed method can greatly improve Recall@1 results from 27.5% to 44.6%. Furthermore, we investigate various aspects of distillation for embeddings, including hint and attention layers, semisupervised learning and cross quality distillation."/>
<attvalue for="2" value="11"/>
<attvalue for="degree" value="12"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2730439109341958"/>
<attvalue for="harmonicclosnesscentrality" value="0.27858769931662986"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="1.6495868372490294E-4"/>
</attvalues>
<viz:size value="44.50913"/>
<viz:position x="9198.173" y="5832.157"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="abstract_53" label="abstract_53">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Se analizan los sistemas de procesamiento automtico que trabajan sobre documentos digitalizados con el objetivo de describir los contenidos. De esta forma contribuyen a facilitar el acceso, permitir la indizacin automtica y hacer accesibles los documentos a los motores de bsqueda. El objetivo de estas tecnologas es poder entrenar modelos computacionales que sean capaces de clasificar, agrupar o realizar bsquedas sobre documentos digitales. As, se describen las tareas de clasificacin, agrupamiento y bsqueda. Cuando utilizamos tecnologas de inteligencia artificial en los sistemas declasificacin esperamos que la herramienta nos devuelva etiquetas semnticas; en sistemas de agrupamiento que nos devuelva documentos agrupados en clusters significativos; y en sistemas de bsqueda esperamos que dada una consulta, nos devuelva una lista ordenada de documentos en funcin de la relevancia. A continuacin se da una visin de conjunto de los mtodos que nos permiten describir los documentos digitales, tanto de manera visual (cul es su apariencia), como a partir de sus contenidos semnticos (de qu hablan). En cuanto a la descripcin visual de documentos se aborda el estado de la cuestin de las representaciones numricas de documentos digitalizadostanto por mtodos clsicos como por mtodos basados en el aprendizaje profundo (deep learning). Respecto de la descripcin semntica de los contenidos se analizan tcnicas como el reconocimiento ptico de caracteres (OCR); el clculo de estadsticas bsicas sobre la aparicin de las diferentes palabras en un texto (bag-of-words model); y los mtodos basados en aprendizaje profundo como el mtodo word2vec, basado en una red neuronal que, dadas unas cuantas palabras de un texto, debe predecir cul ser lasiguiente palabra. Desde el campo de las ingenieras se estn transfiriendo conocimientos que se han integrado en productos o servicios en los mbitos de la archivstica, la biblioteconoma, la documentacin y las plataformas de gran consumo, sin embargo los algoritmos deben ser lo suficientemente eficientes no slo para el reconocimiento y transcripcin literal sino tambin para la capacidad de interpretacin de los contenidos."/>
<attvalue for="2" value="10"/>
<attvalue for="degree" value="11"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2727724617870014"/>
<attvalue for="harmonicclosnesscentrality" value="0.2781321184510262"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="15"/>
<attvalue for="pageranks" value="2.5680234821148524E-4"/>
</attvalues>
<viz:size value="48.796455"/>
<viz:position x="-1985.1411" y="4081.1157"/>
<viz:color r="76" g="70" b="62"/>
</node>
<node id="abstract_54" label="abstract_54">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Reading text in images has attracted interest from computer vision researchers formany years. Our technology focuses on the extraction of structured text such as serialnumbers, machine readings, product codes, etc. so that it is able to center its attention just on the relevant textual elements. It is conceived to work in an end-to-end fashion, bypassing any explicit text segmentation stage. In this paper we present two different industrial use cases where we have applied our automatic structured text reading technology. In the first one, we demonstrate an outstanding performance when reading license plates compared to the current state of the art. In the second one, we present results on our solution for reading utility meters. The technology is commercialized by a recently created spin-off company, and both solutions are at different stages of integration with final clients."/>
<attvalue for="2" value="9"/>
<attvalue for="degree" value="10"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2729081188611215"/>
<attvalue for="harmonicclosnesscentrality" value="0.278359908883828"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="15"/>
<attvalue for="pageranks" value="1.8031774455319057E-4"/>
</attvalues>
<viz:size value="45.2261"/>
<viz:position x="-3745.8904" y="5927.34"/>
<viz:color r="76" g="70" b="62"/>
</node>
<node id="abstract_55" label="abstract_55">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="This paper presents final results of ICDAR 2019 Scene Text Visual Question Answering competition (ST-VQA). ST-VQA introduces an important aspect that is not addressedby any Visual Question Answering system up to date, namely the incorporation of scene text to answer questions asked about an image. The competition introduces a new dataset comprising 23, 038 images annotated with 31, 791 question / answer pairs where the answer is always grounded on text instances present in the image. The images are taken from 7 different public computer vision datasets, covering a wide range of scenarios.The competition was structured in three tasks of increasing difficulty, that require reading the text in a scene and understanding it in the context of the scene, to correctly answer a given question. A novel evaluation metric is presented, which elegantly assesses both key capabilities expected from an optimal model: text recognition and image understanding. A detailed analysis of results from different participants is showcased, which provides insight into the current capabilities of VQA systems that can read. We firmly believe the dataset proposed in this challenge will be an important milestone to consider towards a path of more robust and general models thatcan exploit scene text to achieve holistic image understanding."/>
<attvalue for="2" value="9"/>
<attvalue for="degree" value="10"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2733159008840742"/>
<attvalue for="harmonicclosnesscentrality" value="0.2790432801822335"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="15"/>
<attvalue for="pageranks" value="1.4411122952815152E-4"/>
</attvalues>
<viz:size value="43.535954"/>
<viz:position x="-5184.5684" y="8876.858"/>
<viz:color r="76" g="70" b="62"/>
</node>
<node id="abstract_56" label="abstract_56">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Current visual question answering datasets do not consider the rich semantic information conveyed by text within an image. In this work, we present a new dataset, ST-VQA, that aims to highlight the importance of exploiting highlevel semantic information present in images as textual cues in the Visual Question Answering process. We use this dataset to define a series of tasks of increasing difficulty for which reading the scene text in the context provided by the visual information is necessary to reason and generate an appropriate answer. We propose a new evaluation metric for these tasks to account both for reasoning errors as well as shortcomings of the text recognition module. In addition we put forward a series of baseline methods, which provide further insight to the newly released dataset, and set the scene for further research."/>
<attvalue for="2" value="6"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.27324785260799206"/>
<attvalue for="harmonicclosnesscentrality" value="0.2789293849658326"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="15"/>
<attvalue for="pageranks" value="1.4366618086637545E-4"/>
</attvalues>
<viz:size value="43.51518"/>
<viz:position x="-4958.556" y="7921.4253"/>
<viz:color r="76" g="70" b="62"/>
</node>
<node id="abstract_57" label="abstract_57">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="This paper presents final results of ICDAR 2019 Scene Text Visual Question Answering competition (ST-VQA). ST-VQA introduces an important aspect that is not addressed by any Visual Question Answering system up to date, namely the incorporation of scene text to answer questions asked about an image. The competition introduces a new dataset comprising 23,038 images annotated with 31,791 question / answer pairs where the answer is always grounded on text instances present in the image. The images are taken from 7 different public computer vision datasets, covering a wide range of scenarios. The competition was structured in three tasks of increasing difficulty, that require reading the text in a scene and understanding it in the context of the scene, to correctly answer a given question. A novel evaluation metric is presented, which elegantly assesses both key capabilities expected from an optimal model: text recognition and image understanding. A detailed analysis of results from different participants is showcased, which provides insight into the current capabilities of VQA systems that can read. We firmly believe the dataset proposed in this challenge will be an important milestone to consider towards a path of more robust and general models that can exploit scene text to achieve holistic image understanding."/>
<attvalue for="2" value="9"/>
<attvalue for="degree" value="10"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2733159008840742"/>
<attvalue for="harmonicclosnesscentrality" value="0.2790432801822335"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="15"/>
<attvalue for="pageranks" value="1.4411122952815152E-4"/>
</attvalues>
<viz:size value="43.535954"/>
<viz:position x="-4974.46" y="8587.563"/>
<viz:color r="76" g="70" b="62"/>
</node>
<node id="abstract_58" label="abstract_58">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Cross-modal retrieval methods have been significantly improved in last years with the use of deep neural networks and large-scale annotated datasets such as ImageNet and Places. However, collecting and annotating such datasets requires a tremendous amount of human effort and, besides, their annotations are limited to discrete sets of popular visual classes that may not be representative of the richer semantics found on large-scale cross-modal retrieval datasets. In this paper, we present a self-supervised cross-modal retrieval framework that leverages as training data the correlations between images and text on the entire set of Wikipedia articles. Our method consists in training a CNN to predict: (1) the semantic context of the article in which an image is more probable to appear as an illustration, and (2) the semantic context of its caption. Our experiments demonstrate that the proposed method is not only capable of learning discriminative visual representations for solving vision tasks like classification, but that the learned representations are better for cross-modal retrieval when compared to supervised pre-training of the network on the ImageNet dataset."/>
<attvalue for="2" value="6"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.27297599801019773"/>
<attvalue for="harmonicclosnesscentrality" value="0.2784738041002289"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="15"/>
<attvalue for="pageranks" value="1.5271539686260942E-4"/>
</attvalues>
<viz:size value="43.937603"/>
<viz:position x="-4612.959" y="6987.2603"/>
<viz:color r="76" g="70" b="62"/>
</node>
<node id="abstract_59" label="abstract_59">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Current image captioning systems perform at a merely descriptive level, essentially enumerating the objects in the scene and their relations. Humans, on the contrary, interpret images by integrating several sources of prior knowledge of the world. In this work, we aim to take a step closer to producing captions that offer a plausible interpretation of the scene, by integrating such contextual information into the captioning pipeline. For this we focus on the captioning of images used to illustrate news articles. We propose a novel captioning method that is able to leverage contextual information provided by the text of news articles associated with an image. Our model is able to selectively draw information from the article guided by visual cues, and to dynamically extend the output dictionary to out-of-vocabulary named entities that appear in the context source. Furthermore we introduce&quot; GoodNews&quot;, the largest news image captioning dataset in the literature and demonstrate state-of-the-art results."/>
<attvalue for="2" value="8"/>
<attvalue for="degree" value="9"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.27297599801019773"/>
<attvalue for="harmonicclosnesscentrality" value="0.2784738041002289"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="15"/>
<attvalue for="pageranks" value="1.534932534222453E-4"/>
</attvalues>
<viz:size value="43.973915"/>
<viz:position x="-4252.8584" y="6974.361"/>
<viz:color r="76" g="70" b="62"/>
</node>
<node id="abstract_60" label="abstract_60">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="We introduce a novel approach for keypoint detection task that combines handcrafted and learned CNN filters within a shallow multi-scale architecture. Handcrafted filters provide anchor structures for learned filters, which localize, score and rank repeatable features. Scale-space representation is used within the network to extract keypoints at different levels. We design a loss function to detect robust features that exist across a range of scales and to maximize the repeatability score. Our Key.Net model is trained on data synthetically created from ImageNet and evaluated on HPatches benchmark. Results show that our approach outperforms state-of-the-art detectors in terms of repeatability, matching performance and complexity."/>
<attvalue for="2" value="8"/>
<attvalue for="degree" value="9"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2728402734617775"/>
<attvalue for="harmonicclosnesscentrality" value="0.27824601366742713"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="18"/>
<attvalue for="pageranks" value="2.1189864536387318E-4"/>
</attvalues>
<viz:size value="46.70032"/>
<viz:position x="-484.60214" y="929.9041"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="abstract_62" label="abstract_62">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Autonomous driving systems require huge amounts of data to train. Manual annotation of this data is time-consuming and prohibitively expensive since it involves human resources. Therefore, active learning emerged as an alternative to ease this effort and to make data annotation more manageable. In this paper, we introduce a novel active learning approach for object detection in videos by exploiting temporal coherence. Our active learning criterion is based on the estimated number of errors in terms of false positives and false negatives. The detections obtained by the object detector are used to define the nodes of a graph and tracked forward and backward to temporally link the nodes. Minimizing an energy function defined on this graphical model provides estimates of both false positives and false negatives. Additionally, we introduce a synthetic video dataset, called SYNTHIA-AL, specially designed to evaluate active learning for video object detection in road scenes. Finally, we show that our approach outperforms active learning baselines tested on two datasets."/>
<attvalue for="2" value="10"/>
<attvalue for="degree" value="11"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2731798382078407"/>
<attvalue for="harmonicclosnesscentrality" value="0.27881548974943166"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="1.5882387894318307E-4"/>
</attvalues>
<viz:size value="44.22275"/>
<viz:position x="7131.2847" y="4905.9766"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="abstract_63" label="abstract_63">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="We live in a society where the large majority of the population has a camera-equipped smartphone. In addition, hard drives and cloud storage are getting cheaper and cheaper, leading to a tremendous growth in stored personal photos. Unlike photo collections captured by a digital camera, which typically are pre-processed by the user who organizes them into event-related folders, smartphone pictures are automatically stored in the cloud. As a consequence, photo collections captured by a smartphone are highly unstructured and because smartphones are ubiquitous, they present a larger variability compared to pictures captured by a digital camera. To solve the need of organizing large smartphone photo collections automatically, we propose here a new methodology for hierarchical photo organization into topics and topic-related categories. Our approach successfully estimates latent topics in the pictures by applying probabilistic Latent Semantic Analysis, and automatically assigns a name to each topic by relying on a lexical database. Topic-related categories are then estimated by using a set of topic-specific Convolutional Neuronal Networks. To validate our approach, we ensemble and make public a large dataset of more than 8,000 smartphone pictures from 40 persons. Experimental results demonstrate major user satisfaction with respect to state of the art solutions in terms of organization."/>
<attvalue for="2" value="10"/>
<attvalue for="degree" value="11"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2728402734617775"/>
<attvalue for="harmonicclosnesscentrality" value="0.27824601366742713"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="4"/>
<attvalue for="pageranks" value="1.9150689195041056E-4"/>
</attvalues>
<viz:size value="45.748417"/>
<viz:position x="336.7184" y="9380.403"/>
<viz:color r="0" g="189" b="148"/>
</node>
<node id="abstract_64" label="abstract_64">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Food plays an important role in several aspects of our daily life. Several computer vision approaches have been proposed for tackling food analysis problems, but very little effort has been done in developing methodologies that could take profit of the existent correlation between tasks. In this paper, we propose a new multi-task model that is able to simultaneously predict different food-related tasks, e.g. dish, cuisine and food categories. Here, we extend the homoscedastic uncertainty modeling to allow single-label and multi-label classification and propose a regularization term, which jointly weighs the tasks as well as their correlations. Furthermore, we propose a new Multi-Attribute Food dataset and a new metric, Multi-Task Accuracy. We prove that using both our uncertainty-based loss and the class regularization term, we are able to improve the coherence of outputs between different tasks. Moreover, we outperform the use of task-specific models on classical measures like accuracy or ."/>
<attvalue for="2" value="10"/>
<attvalue for="degree" value="11"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.273724903354533"/>
<attvalue for="harmonicclosnesscentrality" value="0.27972665148063897"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="4"/>
<attvalue for="pageranks" value="1.4275007005826832E-4"/>
</attvalues>
<viz:size value="43.472412"/>
<viz:position x="-1583.8992" y="3414.4487"/>
<viz:color r="0" g="189" b="148"/>
</node>
<node id="abstract_65" label="abstract_65">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="The mitral valve vortex ring is a promising flow structure for analysis of diastolic function, however, methods for objective extraction of its formation to dissolution are lacking. We present a novel algorithm for objective extraction of the temporal evolution of the mitral valve vortex ring from magnetic resonance 4D flow data and validated the method against visual analysis. The algorithm successfully extracted mitral valve vortex rings during both early- and late-diastolic filling and agreed substantially with visual assessment. Early-diastolic mitral valve vortex ring properties differed between healthy subjects and patients with ischemic heart disease."/>
<attvalue for="2" value="5"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2727724617870014"/>
<attvalue for="harmonicclosnesscentrality" value="0.2781321184510262"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="16"/>
<attvalue for="pageranks" value="2.227657456811039E-4"/>
</attvalues>
<viz:size value="47.207603"/>
<viz:position x="7873.8247" y="2730.7305"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="abstract_66" label="abstract_66">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Blur detection aims at segmenting the blurred areas of a given image. Recent deep learning-based methods approach this problem by learning an end-to-end mapping between the blurred input and a binary mask representing the localization of its blurred areas. Nevertheless, the effectiveness of such deep models is limited due to the scarcity of datasets annotated in terms of blur segmentation, as blur annotation is labor intensive. In this work, we bypass the need for such annotated datasets for end-to-end learning, and instead rely on object proposals and a model for blur generation in order to produce a dataset of synthetically blurred images. This allows us to perform self-supervised learning over the generated image and ground truth blur mask pairs using CNNs, defining a framework that can be employed in purely self-supervised, weakly supervised or semi-supervised configurations. Interestingly, experimental results of such setups over the largest blur segmentation datasets available show that this approach achieves state of the art results in blur segmentation, even without ever observing any real blurred image."/>
<attvalue for="2" value="7"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2729081188611215"/>
<attvalue for="harmonicclosnesscentrality" value="0.278359908883828"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="13"/>
<attvalue for="pageranks" value="1.7651921012550072E-4"/>
</attvalues>
<viz:size value="45.048782"/>
<viz:position x="8215.404" y="-665.05164"/>
<viz:color r="255" g="85" b="132"/>
</node>
<node id="abstract_67" label="abstract_67">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Recent progress of self-supervised visual representation learning has achieved remarkable success on many challenging computer vision benchmarks. However, whether these techniques can be used for domain adaptation has not been explored. In this work, we propose a generic method for self-supervised domain adaptation, using object recognition and semantic segmentation of urban scenes as use cases. Focusing on simple pretext/auxiliary tasks (e.g. image rotation prediction), we assess different learning strategies to improve domain adaptation effectiveness by self-supervision. Additionally, we propose two complementary strategies to further boost the domain adaptation accuracy on semantic segmentation within our method, consisting of prediction layer alignment and batch normalization calibration. The experimental results show adaptation levels comparable to most studied domain adaptation methods, thus, bringing self-supervision as a new alternative for reaching domain adaptation. The code is available at this link. https://github.com/Jiaolong/self-supervised-da."/>
<attvalue for="2" value="12"/>
<attvalue for="degree" value="13"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2728402734617775"/>
<attvalue for="harmonicclosnesscentrality" value="0.27824601366742713"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="1"/>
<attvalue for="pageranks" value="2.061047282977836E-4"/>
</attvalues>
<viz:size value="46.429855"/>
<viz:position x="4617.5713" y="2052.1624"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="abstract_68" label="abstract_68">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Deep video action recognition models have been highly successful in recent years but require large quantities of manually-annotated data, which are expensive and laborious to obtain. In this work, we investigate the generation of synthetic training data for video action recognition, as synthetic data have been successfully used to supervise models for a variety of other computer vision tasks. We propose an interpretable parametric generative model of human action videos that relies on procedural generation, physics models and other components of modern game engines. With this model we generate a diverse, realistic, and physically plausible dataset of human action videos, called PHAV for Procedural Human Action Videos. PHAV contains a total of 39,982 videos, with more than 1000 examples for each of 35 action categories. Our video generation approach is not limited to existing motion capture sequences: 14 of these 35 categories are procedurally-defined synthetic actions. In addition, each video is represented with 6 different data modalities, including RGB, optical flow and pixel-level semantic labels. These modalities are generated almost simultaneously using the Multiple Render Targets feature of modern GPUs. In order to leverage PHAV, we introduce a deep multi-task (i.e. that considers action classes from multiple datasets) representation learning architecture that is able to simultaneously learn from synthetic and real video datasets, even when their action categories differ. Our experiments on the UCF-101 and HMDB-51 benchmarks suggest that combining our large set of synthetic videos with small real-world datasets can boost recognition performance. Our approach also significantly outperforms video representations produced by fine-tuning state-of-the-art unsupervised generative models of videos."/>
<attvalue for="2" value="14"/>
<attvalue for="degree" value="15"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2731798382078407"/>
<attvalue for="harmonicclosnesscentrality" value="0.27881548974943166"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="1"/>
<attvalue for="pageranks" value="1.5706498700728593E-4"/>
</attvalues>
<viz:size value="44.140644"/>
<viz:position x="-475.18613" y="-3317.0076"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="abstract_69" label="abstract_69">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="This work presents and evaluates a novel compact scene representation based on Stixels that infers geometric and semantic information. Our approach overcomes the previous rather restrictive geometric assumptions for Stixels by introducing a novel depth model to account for non-flat roads and slanted objects. Both semantic and depth cues are used jointly to infer the scene representation in a sound global energy minimization formulation. Furthermore, a novel approximation scheme is introduced in order to significantly reduce the computational complexity of the Stixel algorithm, and then achieve real-time computation capabilities. The idea is to first perform an over-segmentation of the image, discarding the unlikely Stixel cuts, and apply the algorithm only on the remaining Stixel cuts. This work presents a novel over-segmentation strategy based on a fully convolutional network, which outperforms an approach based on using local extrema of the disparity map. We evaluate the proposed methods in terms of semantic and geometric accuracy as well as run-time on four publicly available benchmark datasets. Our approach maintains accuracy on flat road scene datasets while improving substantially on a novel non-flat road dataset."/>
<attvalue for="2" value="9"/>
<attvalue for="degree" value="10"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2729081188611215"/>
<attvalue for="harmonicclosnesscentrality" value="0.278359908883828"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="1"/>
<attvalue for="pageranks" value="1.8985247410095024E-4"/>
</attvalues>
<viz:size value="45.67119"/>
<viz:position x="6156.318" y="2330.799"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="abstract_70" label="abstract_70">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Anticipating the intentions of vulnerable road users (VRUs) such as pedestrians and cyclists is critical for performing safe and comfortable driving maneuvers. This is the case for human driving and, thus, should be taken into account by systems providing any level of driving assistance, from advanced driver assistant systems (ADAS) to fully autonomous vehicles (AVs). In this paper, we show how the latest advances on monocular vision-based human pose estimation, i.e. those relying on deep Convolutional Neural Networks (CNNs), enable to recognize the intentions of such VRUs. In the case of cyclists, we assume that they follow traffic rules to indicate future maneuvers with arm signals. In the case of pedestrians, no indications can be assumed. Instead, we hypothesize that the walking pattern of a pedestrian allows to determine if he/she has the intention of crossing the road in the path of the ego-vehicle, so that the ego-vehicle must maneuver accordingly (e.g. slowing down or stopping). In this paper, we show how the same methodology can be used for recognizing pedestrians and cyclists' intentions. For pedestrians, we perform experiments on the JAAD dataset. For cyclists, we did not found an analogous dataset, thus, we created our own one by acquiring and annotating videos which we share with the research community. Overall, the proposed pipeline provides new state-of-the-art results on the intention recognition of VRUs."/>
<attvalue for="2" value="15"/>
<attvalue for="degree" value="16"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2728402734617775"/>
<attvalue for="harmonicclosnesscentrality" value="0.27824601366742713"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="1"/>
<attvalue for="pageranks" value="2.2091781255579758E-4"/>
</attvalues>
<viz:size value="47.12134"/>
<viz:position x="3400.967" y="1975.8066"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="abstract_71" label="abstract_71">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Depth estimation provides essential information to perform autonomous driving and driver assistance. A promising line of work consists of introducing additional semantic information about the traffic scene when training CNNs for depth estimation. In practice, this means that the depth data used for CNN training is complemented with images having pixel-wise semantic labels where the same raw training data is associated with both types of ground truth, i.e., depth and semantic labels. The main contribution of this paper is to show that this hard constraint can be circumvented, i.e., that we can train CNNs for depth estimation by leveraging the depth and semantic information coming from heterogeneous datasets. In order to illustrate the benefits of our approach, we combine KITTI depth and Cityscapes semantic segmentation datasets, outperforming state-of-the-art results on monocular depth estimation."/>
<attvalue for="2" value="10"/>
<attvalue for="degree" value="11"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2729081188611215"/>
<attvalue for="harmonicclosnesscentrality" value="0.278359908883828"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="1"/>
<attvalue for="pageranks" value="1.945763838516231E-4"/>
</attvalues>
<viz:size value="45.891705"/>
<viz:position x="5094.3086" y="1994.8317"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="abstract_72" label="abstract_72">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="This manuscript has been withdrawn by bioRxiv due to upload of an incorrect version of the manuscript by the authors. Therefore, this manuscript should not be cited as reference for this project."/>
<attvalue for="2" value="3"/>
<attvalue for="degree" value="4"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.27297599801019773"/>
<attvalue for="harmonicclosnesscentrality" value="0.2784738041002289"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="2"/>
<attvalue for="pageranks" value="1.663056926237638E-4"/>
</attvalues>
<viz:size value="44.572006"/>
<viz:position x="12871.258" y="-5586.831"/>
<viz:color r="211" g="179" b="176"/>
</node>
<node id="abstract_73" label="abstract_73">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Previous work from Wloka et al. (2017) presented the Selective Tuning Attentive Reference model Fixation Controller (STAR-FC), an active vision model for saccade prediction. Although the model is able to efficiently predict saccades during free-viewing, it is well known that stimulus and task instructions can strongly affect eye movement patterns (Yarbus, 1967). These factors are considered in previous Selective Tuning architectures (Tsotsos and Kruijne, 2014)(Tsotsos, Kotseruba and Wloka, 2016)(Rosenfeld, Biparva &amp; Tsotsos 2017), proposing a way to combine bottom-up and top-down contributions to fixation and saccade programming. In particular, task priming has been shown to be crucial to the deployment of eye movements, involving interactions between brain areas related to goal-directed behavior, working and long-term memory in combination with stimulus-driven eye movement neuronal correlates. Initial theories and models of these influences include (Rao, Zelinsky, Hayhoe and Ballard, 2002)(Navalpakkam and Itti, 2005)(Huang and Pashler, 2007) and show distinct ways to process the task requirements in combination with bottom-up attention. In this study we extend the STAR-FC with novel computational definitions of Long-Term Memory, Visual Task Executive and a Task Relevance Map. With these modules we are able to use textual instructions in order to guide the model to attend to specific categories of objects and/or places in the scene. We have designed our memory model by processing a hierarchy of visual features learned from salient object detection datasets. The relationship between the executive task instructions and the memory representations has been specified using a tree of semantic similarities between the learned features and the object category labels. Results reveal that by using this model, the resulting relevance maps and predicted saccades have a higher probability to fall inside the salient regions depending on the distinct task instructions."/>
<attvalue for="2" value="12"/>
<attvalue for="degree" value="13"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2727724617870014"/>
<attvalue for="harmonicclosnesscentrality" value="0.2781321184510262"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="3"/>
<attvalue for="pageranks" value="2.2494663706652408E-4"/>
</attvalues>
<viz:size value="47.30941"/>
<viz:position x="6075.9404" y="-747.72864"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="abstract_75" label="abstract_75">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="The impressive performance of Convolutional Neural Networks (CNNs) when solving different vision problems is shadowed by their black-box nature and our consequent lack of understanding of the representations they build and how these representations are organized. To help understanding these issues, we propose to describe the activity of individual neurons by their Neuron Feature visualization and quantify their inherent selectivity with two specific properties. We explore selectivity indexes for: an image feature (color); and an image label (class membership). Our contribution is a framework to seek or classify neurons by indexing on these selectivity properties. It helps to find color selective neurons, such as a red-mushroom neuron in layer Conv4 or class selective neurons such as dog-face neurons in layer Conv5 in VGG-M, and establishes a methodology to derive other selectivity properties. Indexing on neuron selectivity can statistically draw how features and classes are represented through layers in a moment when the size of trained nets is growing and automatic tools to index neurons can be helpful."/>
<attvalue for="2" value="7"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2727724617870014"/>
<attvalue for="harmonicclosnesscentrality" value="0.2781321184510262"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="18"/>
<attvalue for="pageranks" value="2.441150799031565E-4"/>
</attvalues>
<viz:size value="48.20421"/>
<viz:position x="1208.485" y="3412.7668"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="abstract_76" label="abstract_76">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Estimation of intrinsic images still remains a challenging task due to weaknesses of ground-truth datasets, which either are too small or present non-realistic issues. On the other hand, end-to-end deep learning architectures start to achieve interesting results that we believe could be improved if important physical hints were not ignored. In this work, we present a twofold framework: (a) a flexible generation of images overcoming some classical dataset problems such as larger size jointly with coherent lighting appearance; and (b) a flexible architecture tying physical properties through intrinsic losses. Our proposal is versatile, presents low computation time, and achieves state-of-the-art results."/>
<attvalue for="2" value="5"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2729081188611215"/>
<attvalue for="harmonicclosnesscentrality" value="0.278359908883828"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="18"/>
<attvalue for="pageranks" value="2.044280766230574E-4"/>
</attvalues>
<viz:size value="46.35159"/>
<viz:position x="1187.647" y="3658.2527"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="abstract_77" label="abstract_77">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Personality perception is implicitly biased due to many subjective factors, such as cultural, social, contextual, gender and appearance. Approaches developed for automatic personality perception are not expected to predict the real personality of the target, but the personality external observers attributed to it. Hence, they have to deal with human bias, inherently transferred to the training data. However, bias analysis in personality computing is an almost unexplored area. In this work, we study different possible sources of bias affecting personality perception, including emotions from facial expressions, attractiveness, age, gender, and ethnicity, as well as their influence on prediction ability for apparent personality estimation. To this end, we propose a multi-modal deep neural network that combines raw audio and visual information alongside predictions of attribute-specific models to regress apparent personality. We also analyse spatio-temporal aggregation schemes and the effect of different time intervals on first impressions. We base our study on the ChaLearn First Impressions dataset, consisting of one-person conversational videos. Our model shows state-of-the-art results regressing apparent personality based on the Big-Five model. Furthermore, given the interpretability nature of our network design, we provide an incremental analysis on the impact of each possible source of bias on final network predictions."/>
<attvalue for="2" value="11"/>
<attvalue for="degree" value="12"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2729081188611215"/>
<attvalue for="harmonicclosnesscentrality" value="0.278359908883828"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="1.654946078951399E-4"/>
</attvalues>
<viz:size value="44.534145"/>
<viz:position x="-12353.486" y="-2224.499"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="abstract_78" label="abstract_78">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="With the consolidation of the new data protection regulation paradigm for each individual within the European Union (EU), major biometric technologies are now confronted with many concerns related to user privacy in biometric deployments. When individual biometrics are disclosed, the sensitive information about his/her personal data such as financial or health are at high risk of being misused or compromised. This issue can be escalated considerably over scenarios of non-cooperative users, such as elderly people residing in care homes, with their inability to interact conveniently and securely with the biometric system. The primary goal of this study is to design a novel database to investigate the problem of automatic people recognition under privacy constraints. To do so, the collected data-set contains the subjects hand and foot traits and excludes the face biometrics of individuals in order to protect their privacy. We carried out extensive simulations using different baseline methods, including deep learning. Simulation results show that, with the spatial features extracted from the subject sequence in both individual hand or foot videos, state-of-the-art deep models provide promising recognition performance."/>
<attvalue for="2" value="8"/>
<attvalue for="degree" value="9"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2736566512903628"/>
<attvalue for="harmonicclosnesscentrality" value="0.27961275626423804"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="5"/>
<attvalue for="pageranks" value="1.4660474593136433E-4"/>
</attvalues>
<viz:size value="43.65235"/>
<viz:position x="-8321.771" y="-5731.3193"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="abstract_79" label="abstract_79">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Structural Health Monitoring (SHM) has greatly benefited from computer vision. Recently, deep learning approaches are widely used to accurately estimate the state of deterioration of infrastructure. In this work, we focus on the problem of bridge surface structural damage detection, such as delamination and rebar exposure. It is well known that the quality of a deep learning model is highly dependent on the quality of the training dataset. Bridge damage detection, our application domain, has the following main challenges: (i) labeling the damages requires knowledgeable civil engineering professionals, which makes it difficult to collect a large annotated dataset; (ii) the damage area could be very small, whereas the background area is large, which creates an unbalanced training environment; (iii) due to the difficulty to exactly determine the extension of the damage, there is often a variation among different labelers who perform pixel-wise labeling. In this paper, we propose a novel model for bridge structural damage detection to address the first two challenges. This paper follows the idea of an atrous spatial pyramid pooling (ASPP) module that is designed as a novel network for bridge damage detection. Further, we introduce the weight balanced Intersection over Union (IoU) loss function to achieve accurate segmentation on a highly unbalanced small dataset. The experimental results show that (i) the IoU loss function improves the overall performance of damage detection, as compared to cross entropy loss or focal loss, and (ii) the proposed model has a better ability to detect a minority class than other light segmentation networks."/>
<attvalue for="2" value="10"/>
<attvalue for="degree" value="11"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.27345209916531704"/>
<attvalue for="harmonicclosnesscentrality" value="0.2792710706150353"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="13"/>
<attvalue for="pageranks" value="1.484146580233999E-4"/>
</attvalues>
<viz:size value="43.73684"/>
<viz:position x="-3545.7878" y="-8604.928"/>
<viz:color r="255" g="85" b="132"/>
</node>
<node id="abstract_80" label="abstract_80">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Structural Health Monitoring (SHM) has benefited from computer vision and more recently, Deep Learning approaches, to accurately estimate the state of deterioration of infrastructure. In our work, we test Fully Convolutional Networks (FCNs) with a dataset of deck areas of bridges for damage segmentation. We create a dataset for delamination and rebar exposure that has been collected from inspection records of bridges in Niigata Prefecture, Japan. The dataset consists of 734 images with three labels per image, which makes it the largest dataset of images of bridge deck damage. This data allows us to estimate the performance of our method based on regions of agreement, which emulates the uncertainty of in-field inspections. We demonstrate the practicality of FCNs to perform automated semantic segmentation of surface damages. Our model achieves a mean accuracy of 89.7% for delamination and 78.4% for rebar exposure, and a weighted F1 score of 81.9%."/>
<attvalue for="2" value="11"/>
<attvalue for="degree" value="12"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.27345209916531704"/>
<attvalue for="harmonicclosnesscentrality" value="0.2792710706150353"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="13"/>
<attvalue for="pageranks" value="1.4831020672324545E-4"/>
</attvalues>
<viz:size value="43.731964"/>
<viz:position x="-3127.484" y="-8334.456"/>
<viz:color r="255" g="85" b="132"/>
</node>
<node id="abstract_81" label="abstract_81">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="A new approach for 2D to 3D garment retexturing is proposed based on Gaussian mixture models and thin plate splines (TPS). An automatically segmented garment of an individual is matched to a new source garment and rendered, resulting in augmented images in which the target garment has been retextured using the texture of the source garment. We divide the problem into garment boundary matching based on Gaussian mixture models and then interpolate inner points using surface topology extracted through geodesic paths, which leads to a more realistic result than standard approaches. We evaluated and compared our system quantitatively by root mean square error (RMS) and qualitatively using the mean opinion score (MOS), showing the benefits of the proposed methodology on our gathered dataset."/>
<attvalue for="2" value="5"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2735202492211838"/>
<attvalue for="harmonicclosnesscentrality" value="0.27938496583143624"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="5"/>
<attvalue for="pageranks" value="1.494930249979574E-4"/>
</attvalues>
<viz:size value="43.78718"/>
<viz:position x="-5934.6694" y="-3188.809"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="abstract_82" label="abstract_82">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="This work proposes a fully convolutional network architecture for RGB face image generation from a given input thermal face image to be applied in face recognition scenarios. The proposed method is based on the FusionNet architecture and increases robustness against overfitting using dropout after bridge connections, randomised leaky ReLUs (RReLUs), and orthogonal regularization. Furthermore, we propose to use a decoding block with resize convolution instead of transposed convolution to improve final RGB face image generation. To validate our proposed network architecture, we train a face classifier and compare its face recognition rate on the reconstructed RGB images from the proposed architecture, to those when reconstructing images with the original FusionNet, as well as when using the original RGB images. As a result, we are introducing a new architecture which leads to a more accurate network."/>
<attvalue for="2" value="6"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.27345209916531704"/>
<attvalue for="harmonicclosnesscentrality" value="0.2792710706150353"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="18"/>
<attvalue for="pageranks" value="1.4582136575601982E-4"/>
</attvalues>
<viz:size value="43.615784"/>
<viz:position x="-6881.8315" y="-5560.3696"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="abstract_83" label="abstract_83">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Action recognition is a challenging task that plays an important role in many robotic systems, which highly depend on visual input feeds. However, due to privacy concerns, it is important to find a method which can recognise actions without using visual feed. In this paper, we propose a concept for detecting actions while preserving the test subjects privacy. Our proposed method relies only on recording the temporal evolution of light pulses scattered back from the scene.Such data trace to record one action contains a sequence of one-dimensional arrays of voltage values acquired by a single-pixel detector at 1 GHz repetition rate. Information about both the distance to the object and its shape are embedded in the traces. We apply machine learning in the form of recurrent neural networks for data analysis and demonstrate successful action recognition. The experimental results show that our proposed method could achieve on average 96.47% accuracy on the actions walking forward, walking backwards, sitting down, standing up and waving hand, using recurrentneural network."/>
<attvalue for="2" value="10"/>
<attvalue for="degree" value="11"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.27338398306140244"/>
<attvalue for="harmonicclosnesscentrality" value="0.2791571753986344"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="18"/>
<attvalue for="pageranks" value="1.467338193248418E-4"/>
</attvalues>
<viz:size value="43.65838"/>
<viz:position x="-8696.852" y="-6487.273"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="abstract_84" label="abstract_84">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Super-resolution (SR) has achieved great success due to the development of deep convolutional neural networks (CNNs). However, as the depth and width of the networks increase, CNN-based SR methods have been faced with the challenge of computational complexity in practice. More- over, most SR methods train a dedicated model for each target resolution, losing generality and increasing memory requirements. To address these limitations we introduce OverNet, a deep but lightweight convolutional network to solve SISR at arbitrary scale factors with a single model. We make the following contributions: first, we introduce a lightweight feature extractor that enforces efficient reuse of information through a novel recursive structure of skip and dense connections. Second, to maximize the performance of the feature extractor, we propose a model agnostic reconstruction module that generates accurate high-resolution images from overscaled feature maps obtained from any SR architecture. Third, we introduce a multi-scale loss function to achieve generalization across scales. Experiments show that our proposal outperforms previous state-of-the-art approaches in standard benchmarks, while maintaining relatively low computation and memory requirements."/>
<attvalue for="2" value="9"/>
<attvalue for="degree" value="10"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.27311185765833024"/>
<attvalue for="harmonicclosnesscentrality" value="0.2787015945330307"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="5"/>
<attvalue for="pageranks" value="1.607467192965729E-4"/>
</attvalues>
<viz:size value="44.31251"/>
<viz:position x="-3692.0166" y="-5789.1836"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="abstract_85" label="abstract_85">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="The cost of drawing object bounding boxes (ie labeling) for millions of images is prohibitively high. For instance, labeling pedestrians in a regular urban image could take 35 seconds on average. Active learning aims to reduce the cost of labeling by selecting only those images that are informative to improve the detection network accuracy. In this paper, we propose a method to perform active learning of object detectors based on convolutional neural networks. We propose a new image-level scoring process to rank unlabeled images for their automatic selection, which clearly outperforms classical scores. The proposed method can be applied to videos and sets of still images. In the former case, temporal selection rules can complement our scoring process. As a relevant use case, we extensively study the performance of our method on the task of pedestrian detection. Overall, the experiments show that the proposed method performs better than random selection."/>
<attvalue for="2" value="10"/>
<attvalue for="degree" value="11"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2729081188611215"/>
<attvalue for="harmonicclosnesscentrality" value="0.278359908883828"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="1.7111859193314638E-4"/>
</attvalues>
<viz:size value="44.796677"/>
<viz:position x="7565.265" y="5981.2905"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="abstract_86" label="abstract_86">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Driving requires reacting to a wide variety of complex environment conditions and agent behaviors. Explicitly modeling each possible scenario is unrealistic. In contrast, imitation learning can, in theory, leverage data from large fleets of human-driven cars. Behavior cloning in particular has been successfully used to learn simple visuomotor policies end-to-end, but scaling to the full spectrum of driving behaviors remains an unsolved problem. In this paper, we propose a new benchmark to experimentally investigate the scalability and limitations of behavior cloning. We show that behavior cloning leads to state-of-the-art results, executing complex lateral and longitudinal maneuvers, even in unseen environments, without being explicitly programmed to do so. However, we confirm some limitations of the behavior cloning approach: some well-known limitations (eg, dataset bias and overfitting), new generalization issues (eg, dynamic objects and the lack of a causal modeling), and training instabilities, all requiring further research before behavior cloning can graduate to real-world driving. The code, dataset, benchmark, and agent studied in this paper can be found at github."/>
<attvalue for="2" value="9"/>
<attvalue for="degree" value="10"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2729081188611215"/>
<attvalue for="harmonicclosnesscentrality" value="0.278359908883828"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="1"/>
<attvalue for="pageranks" value="2.040490818427015E-4"/>
</attvalues>
<viz:size value="46.333897"/>
<viz:position x="3711.003" y="1204.0642"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="abstract_87" label="abstract_87">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="We present the design and beta tests of a new machine learning challenge called AutoCV (for Automated Computer Vision), which is the first event in a series of challenges we are planning on the theme of Automated Deep Learning. We target applications for which Deep Learning methods have had great success in the past few years, with the aim of pushing the state of the art in fully automated methods to design the architecture of neural networks and train them without any human intervention. The tasks are restricted to multi-label image classification problems, from domains including medical, areal, people, object, and handwriting imaging. Thus the type of images will vary a lot in scales, textures, and structure. Raw data are provided (no features extracted), but all datasets are formatted in a uniform tensor manner (although images may have fixed or variable sizes within a dataset). The participants's code will be blind tested on a challenge platform in a controlled manner, with restrictions on training and test time and memory limitations. The challenge is part of the official selection of IJCNN 2019."/>
<attvalue for="2" value="8"/>
<attvalue for="degree" value="9"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.27338398306140244"/>
<attvalue for="harmonicclosnesscentrality" value="0.2791571753986344"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="1.4136622084162475E-4"/>
</attvalues>
<viz:size value="43.407814"/>
<viz:position x="-15479.364" y="-1755.4675"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="abstract_88" label="abstract_88">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="In recent years, deep learning-based networks have achieved state-of-the-art performance in medical image segmentation. Among the existing networks, U-Net has been successfully applied on medical image segmentation. In this paper, we propose an extension of U-Net, Bi-directional ConvLSTM U-Net with Densely connected convolutions (BCDU-Net), for medical image segmentation, in which we take full advantages of U-Net, bi-directional ConvLSTM (BConvLSTM) and the mechanism of dense convolutions. Instead of a simple concatenation in the skip connection of U-Net, we employ BConvLSTM to combine the feature maps extracted from the corresponding encoding path and the previous decoding up-convolutional layer in a non-linear way. To strengthen feature propagation and encourage feature reuse, we use densely connected convolutions in the last convolutional layer of the encoding path. Finally, we can accelerate the convergence speed of the proposed network by employing batch normalization (BN). The proposed model is evaluated on three datasets of: retinal blood vessel segmentation, skin lesion segmentation, and lung nodule segmentation, achieving state-of-the-art performance."/>
<attvalue for="2" value="8"/>
<attvalue for="degree" value="9"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2729081188611215"/>
<attvalue for="harmonicclosnesscentrality" value="0.278359908883828"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="1.7828624576545242E-4"/>
</attvalues>
<viz:size value="45.131268"/>
<viz:position x="-10867.572" y="-5101.522"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="abstract_89" label="abstract_89">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Maria Ines Torres; Javier Mikel Olaso, Csar Montenegro, Riberto Santana, A. Vzquez, Raquel Justo, J. A. Lozano, Stephan Schlgl, Grard Chollet, Nazim Dugan, M. Irvine, N. Glackin, C. Pickard, Anna Esposito, Gennaro Cordasco, Alda Troncone, Dijana Petrovska-Delacrtaz, Aymen Mtibaa, Mohamed Amine Hmani, M. S. Korsnes, L. J. Martinussen, Sergio Escalera, C. Palmero Cantario, Olivier Deroo, O. Gordeeva, Jofre Tenorio-Laranga, E. Gonzalez-Fraile, Begoa Fernndez-Ruanova, A. Gonzalez-Pinto"/>
<attvalue for="2" value="15"/>
<attvalue for="degree" value="16"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.27338398306140244"/>
<attvalue for="harmonicclosnesscentrality" value="0.2791571753986344"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="12"/>
<attvalue for="pageranks" value="1.6338157164831775E-4"/>
</attvalues>
<viz:size value="44.43551"/>
<viz:position x="-23471.615" y="-6239.227"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="abstract_90" label="abstract_90">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="While many individual tasks in the domain of human analysis have recently received an accuracy boost from deep learning approaches, multi-task learning has mostly been ignored due to a lack of data. New synthetic datasets are being released, filling this gap with synthetic generated data. In this work, we analyze four related human analysis tasks in still images in a multi-task scenario by leveraging such datasets. Specifically, we study the correlation of 2D/3D pose estimation, body part segmentation and full-body depth estimation. These tasks are learned via the well-known Stacked Hourglass module such that each of the task-specific streams shares information with the others. The main goal is to analyze how training together these four related tasks can benefit each individual task for a better generalization. Results on the newly released SURREAL dataset show that all four tasks benefit from the multi-task approach, but with different combinations of tasks: while combining all four tasks improves 2D pose estimation the most, 2D pose improves neither 3D pose nor full-body depth estimation. On the other hand 2D parts segmentation can benefit from 2D pose but not from 3D pose. In all cases, as expected, the maximum improvement is achieved on those human body parts that show more variability in terms of spatial distribution, appearance and shape, e.g. wrists and ankles."/>
<attvalue for="2" value="12"/>
<attvalue for="degree" value="13"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2728402734617775"/>
<attvalue for="harmonicclosnesscentrality" value="0.27824601366742713"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="1.7456865330037525E-4"/>
</attvalues>
<viz:size value="44.95773"/>
<viz:position x="-10923.159" y="-935.4415"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="abstract_91" label="abstract_91">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Dealing with incomplete information is a well studied problem in the context of machine learning and computational intelligence. However, in the context of computer vision, the problem has only been studied in specific scenarios (e.g., certain types of occlusions in specific types of images), although it is common to have incomplete information in visual data. This chapter describes the design of an academic competition focusing on inpainting of images and video sequences that was part of the competition program of WCCI2018 and had a satellite event collocated with ECCV2018. The ChaLearn Looking at People Inpainting Challenge aimed at advancing the state of the art on visual inpainting by promoting the development of methods for recovering missing and occluded information from images and video. Three tracks were proposed in which visual inpainting might be helpful but still challenging: human body pose estimation, text overlays removal and fingerprint denoising. This chapter describes the design of the challenge, which includes the release of three novel datasets, and the description of evaluation metrics, baselines and evaluation protocol. The results of the challenge are analyzed and discussed in detail and conclusions derived from this event are outlined."/>
<attvalue for="2" value="10"/>
<attvalue for="degree" value="11"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.27324785260799206"/>
<attvalue for="harmonicclosnesscentrality" value="0.2789293849658326"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="1.4981015696062118E-4"/>
</attvalues>
<viz:size value="43.801983"/>
<viz:position x="-14669.603" y="14.993994"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="abstract_92" label="abstract_92">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="This volume presents the results of the Neural Information Processing Systems Competition track at the 2018 NeurIPS conference. The competition follows the same format as the 2017 competition track for NIPS. Out of 21 submitted proposals, eight competition proposals were selected, spanning the area of Robotics, Health, Computer Vision, Natural Language Processing, Systems and Physics. Competitions have become an integral part of advancing state-of-the-art in artificial intelligence (AI). They exhibit one important difference to benchmarks: Competitions test a system end-to-end rather than evaluating only a single component; they assess the practicability of an algorithmic solution in addition to assessing feasibility."/>
<attvalue for="2" value="6"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2727724617870014"/>
<attvalue for="harmonicclosnesscentrality" value="0.2781321184510262"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="2.2747896396618878E-4"/>
</attvalues>
<viz:size value="47.42762"/>
<viz:position x="-10190.098" y="1801.9738"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="abstract_93" label="abstract_93">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Anti-spoofing attack detection is critical to guarantee the security of face-based authentication and facial analysis systems. Recently, a multi-modal face anti-spoofing dataset, CASIA-SURF, has been released with the goal of boosting research in this important topic. CASIA-SURF is the largest public data set for facial anti-spoofing attack detection in terms of both, diversity and modalities: it comprises 1,000 subjects and 21,000 video samples. We organized a challenge around this novel resource to boost research in the subject. The Chalearn LAP multi-modal face anti-spoofing attack detection challenge attracted more than 300 teams for the development phase with a total of 13 teams qualifying for the final round. This paper presents an overview of the challenge, including its design, evaluation protocol and a summary of results. We analyze the top ranked solutions and draw conclusions derived from the competition. In addition we outline future work directions."/>
<attvalue for="2" value="9"/>
<attvalue for="degree" value="10"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.27345209916531704"/>
<attvalue for="harmonicclosnesscentrality" value="0.2792710706150353"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="1.4342571355465867E-4"/>
</attvalues>
<viz:size value="43.503952"/>
<viz:position x="-18329.855" y="542.62146"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="abstract_94" label="abstract_94">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="The ChaLearn AutoML Challenge (The authors are in alphabetical order of last name, except the first author who did most of the writing and the second author who produced most of the numerical analyses and plots.) (NIPS 2015 ICML 2016) consisted of six rounds of a machine learning competition of progressive difficulty, subject to limited computational resources. It was followed bya one-round AutoML challenge (PAKDD 2018). The AutoML setting differs from former model selection/hyper-parameter selection challenges, such as the one we previously organized for NIPS 2006: the participants aim to develop fully automated and computationally efficient systems, capable of being trained and tested without human intervention, with code submission. This chapter analyzes the results of these competitions and provides details about the datasets, which were not revealed to the participants. The solutions of the winners are systematically benchmarked over all datasets of all rounds and compared with canonical machine learning algorithms available in scikit-learn. All materials discussed in this chapter (data and code) have been made publicly available at http://automl.chalearn.org/."/>
<attvalue for="2" value="10"/>
<attvalue for="degree" value="11"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.27297599801019773"/>
<attvalue for="harmonicclosnesscentrality" value="0.2784738041002289"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="1.5896823706069543E-4"/>
</attvalues>
<viz:size value="44.22949"/>
<viz:position x="-14747.379" y="-831.94055"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="abstract_95" label="abstract_95">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Face anti-spoofing is essential to prevent face recognition systems from a security breach. Much of the progresses have been made by the availability of face anti-spoofing benchmark datasets in recent years. However, existing face anti-spoofing benchmarks have limited number of subjects (170) and modalities (2), which hinder the further development of the academic community. To facilitate face anti-spoofing research, we introduce a large-scale multi-modal dataset, namely CASIA-SURF, which is the largest publicly available dataset for face anti-spoofing in terms of both subjects and visual modalities. Specifically, it consists of 1,000 subjects with 21,000 videos and each sample has 3 modalities (i.e., RGB, Depth and IR). We also provide a measurement set, evaluation protocol and training/validation/testing subsets, developing a new benchmark for face anti-spoofing. Moreover, we present a new multi-modal fusion method as baseline, which performs feature re-weighting to select the more informative channel features while suppressing the less useful ones for each modal. Extensive experiments have been conducted on the proposed dataset to verify its significance and generalization capability. The dataset is available at https://sites.google.com/qq.com/chalearnfacespoofingattackdete/."/>
<attvalue for="2" value="15"/>
<attvalue for="degree" value="16"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.27311185765833024"/>
<attvalue for="harmonicclosnesscentrality" value="0.2787015945330307"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="1.5141233400321204E-4"/>
</attvalues>
<viz:size value="43.876774"/>
<viz:position x="-17839.219" y="1679.4376"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="abstract_96" label="abstract_96">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="The flexibility and high-accuracy of Deep Neural Networks (DNNs) has transformed computer vision. But, the fact that we do not know when a specific DNN will work and when it will fail has resulted in a lack of trust. A clear example is self-driving cars; people are uncomfortable sitting in a car driven by algorithms that may fail under some unknown, unpredictable conditions. Interpretability and explainability approaches attempt to address this by uncovering what a DNN models, i.e., what each node (cell) in the network represents and what images are most likely to activate it. This can be used to generate, for example, adversarial attacks. But these approaches do not generally allow us to determine where a DNN will succeed or fail and why. i.e., does this learned representation generalize to unseen samples? Here, we derive a novel approach to define what it means to learn in deep networks, and how to use this knowledge to detect adversarial attacks. We show how this defines the ability of a network to generalize to unseen testing samples and, most importantly, why this is the case."/>
<attvalue for="2" value="13"/>
<attvalue for="degree" value="14"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.27297599801019773"/>
<attvalue for="harmonicclosnesscentrality" value="0.2784738041002289"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="1.705876879856445E-4"/>
</attvalues>
<viz:size value="44.771893"/>
<viz:position x="-11175.787" y="-4.9329996"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="abstract_97" label="abstract_97">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Egocentric activity recognition is one of the most challenging tasks in video analysis. It requires a fine-grained discrimination of small objects and their manipulation. While some methods base on strong supervision and attention mechanisms, they are either annotation consuming or do not take spatio-temporal patterns into account. In this paper we propose LSTA as a mechanism to focus on features from spatial relevant parts while attention is being tracked smoothly across the video sequence. We demonstrate the effectiveness of LSTA on egocentric activity recognition with an end-to-end trainable two-stream architecture, achieving state-of-the-art performance on four standard benchmarks."/>
<attvalue for="2" value="6"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2729081188611215"/>
<attvalue for="harmonicclosnesscentrality" value="0.278359908883828"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="1.75368278515377E-4"/>
</attvalues>
<viz:size value="44.995056"/>
<viz:position x="-7877.8887" y="-0.8921771"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="abstract_98" label="abstract_98">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Text contained in an image carries high-level semantics that can be exploited to achieve richer image understanding. In particular, the mere presence of text provides strong guiding content that should be employed to tackle a diversity of computer vision tasks such as image retrieval, fine-grained classification, and visual question answering. In this paper, we address the problem of fine-grained classification and image retrieval by leveraging textual information along with visual cues to comprehend the existing intrinsic relation between the two modalities. The novelty of the proposed model consists of the usage of a PHOC descriptor to construct a bag of textual words along with a Fisher Vector Encoding that captures the morphology of text. This approach provides a stronger multimodal representation for this task and as our experiments demonstrate, it achieves state-of-the-art results on two different tasks, fine-grained classification and image retrieval."/>
<attvalue for="2" value="6"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2730439109341958"/>
<attvalue for="harmonicclosnesscentrality" value="0.27858769931662986"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="15"/>
<attvalue for="pageranks" value="1.525067296218555E-4"/>
</attvalues>
<viz:size value="43.92786"/>
<viz:position x="-4731.5767" y="6447.553"/>
<viz:color r="76" g="70" b="62"/>
</node>
<node id="abstract_99" label="abstract_99">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Chinese scene text reading is one of the most challenging problems in computer vision and has attracted great interest. Different from English text, Chinese has more than 6000 commonly used characters and Chinesecharacters can be arranged in various layouts with numerous fonts. The Chinese signboards in street view are a good choice for Chinese scene text images since they have different backgrounds, fonts and layouts. We organized a competition called ICDAR2019-ReCTS, which mainly focuses on reading Chinese text on signboard. This report presents the final results of the competition. A large-scale dataset of 25,000 annotated signboard images, in which all the text lines and characters are annotated with locations and transcriptions, were released. Four tasks, namely character recognition, text line recognition, text line detection and end-to-end recognition were set up. Besides, considering the Chinese text ambiguity issue, we proposed a multi ground truth (multi-GT) evaluation method to make evaluation fairer. The competition started on March 1, 2019 and ended on April 30, 2019. 262 submissions from 46 teams are received. Most of the participants come from universities, research institutes, and tech companies in China. There are also some participants from the United States, Australia, Singapore, and Korea. 21 teams submit results for Task 1, 23 teams submit results for Task 2, 24 teams submit results for Task 3, and 13 teams submit results for Task 4."/>
<attvalue for="2" value="14"/>
<attvalue for="degree" value="15"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.27311185765833024"/>
<attvalue for="harmonicclosnesscentrality" value="0.2787015945330307"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="15"/>
<attvalue for="pageranks" value="1.654341762289925E-4"/>
</attvalues>
<viz:size value="44.531326"/>
<viz:position x="-7052.163" y="11107.769"/>
<viz:color r="76" g="70" b="62"/>
</node>
<node id="abstract_100" label="abstract_100">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="A key aspect of digital mailroom processes is the extraction of relevant information from administrative documents. More often than not, the extraction process cannot be fully automated, and there is instead an important amount of manual intervention. In this work we study the human process of information extraction from invoice document images. We explore whether the gaze of human annotators during an manual information extraction process could be exploited towards reducing the manual effort and automating the process. To this end, we perform an eye-tracking experiment replicating real-life interfaces for information extraction. Through this pilot study we demonstrate that relevant areas in the document can be identified reliably through automatic fixation classification, and the obtained models generalize well to new subjects. Our findings indicate that it is in principle possible to integrate the human in the document image analysis loop, making use of the scanpath to automate the extraction process or verify extracted information."/>
<attvalue for="2" value="8"/>
<attvalue for="degree" value="9"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2727724617870014"/>
<attvalue for="harmonicclosnesscentrality" value="0.2781321184510262"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="15"/>
<attvalue for="pageranks" value="2.0330764900923149E-4"/>
</attvalues>
<viz:size value="46.299286"/>
<viz:position x="-3906.2192" y="5680.1636"/>
<viz:color r="76" g="70" b="62"/>
</node>
<node id="abstract_101" label="abstract_101">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Word spotting has gained increased attention lately as it can be used to extract textual information from handwritten documents and scene-text images. Current word spotting approaches are designed to work on a single language and/or script. Building intelligent models that learn script-independent multilingual word-spotting is challenging due to the large variability of multilingual alphabets and symbols. We used ResNet-152 and the Pyramidal Histogram of Characters (PHOC) embedding to build a one-model script-independent multilingual word-spotting and we tested it on Latin, Arabic, and Bangla (Indian) languages. The one-model we propose performs on par with the multi-model language-specific word-spotting system, and thus, reduces the number of models needed for each script and/or language."/>
<attvalue for="2" value="6"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2729081188611215"/>
<attvalue for="harmonicclosnesscentrality" value="0.278359908883828"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="15"/>
<attvalue for="pageranks" value="1.7507126593084067E-4"/>
</attvalues>
<viz:size value="44.98119"/>
<viz:position x="-5600.6094" y="6746.9126"/>
<viz:color r="76" g="70" b="62"/>
</node>
<node id="abstract_102" label="abstract_102">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="The ICDAR 2019 Challenge on &quot;Scanned receipts OCR and key information extraction&quot; (SROIE) covers important aspects related to the automated analysis of scanned receipts. The SROIE tasks play a key role in many document analysis systems and hold significant commercial potential. Although a lot of work has been published over the years on administrative document analysis, the community has advanced relatively slowly, as most datasets have been kept private. One of the key contributions of SROIE to the document analysis community is to offer a first, standardized dataset of 1000 whole scanned receipt images and annotations, as well as an evaluation procedure for such tasks. The Challenge is structured around three tasks, namely Scanned Receipt Text Localization (Task 1), Scanned Receipt OCR (Task 2) and Key Information Extraction from Scanned Receipts (Task 3). The competition opened on 10th February, 2019 and closed on 5th May, 2019. We received 29, 24 and 18 valid submissions received for the three competition tasks, respectively. This report presents the competition datasets, define the tasks and the evaluation protocols, offer detailed submission statistics, as well as an analysis of the submitted performance. While the tasks of text localization and recognition seem to be relatively easy to tackle, it is interesting to observe the variety of ideas and approaches proposed for the information extraction task. According to the submissions' performance we believe there is still margin for improving information extraction performance, although the current dataset would have to grow substantially in following editions. Given the success of the SROIE competition evidenced by the wide interest generated and the healthy number of submissions from academic, research institutes and industry over different countries, we consider that the SROIE competition can evolve into a useful resource for the community, drawing further attention and promoting research and development efforts in this field."/>
<attvalue for="2" value="12"/>
<attvalue for="degree" value="13"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.27297599801019773"/>
<attvalue for="harmonicclosnesscentrality" value="0.2784738041002289"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="15"/>
<attvalue for="pageranks" value="1.6766682729122401E-4"/>
</attvalues>
<viz:size value="44.635548"/>
<viz:position x="-7094.4043" y="10666.792"/>
<viz:color r="76" g="70" b="62"/>
</node>
<node id="abstract_103" label="abstract_103">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Robust text reading from street view images provides valuable information for various applications. Performance improvement of existing methods in such a challenging scenario heavily relies on the amount of fully annotated training data, which is costly and in-efficient to obtain. To scale up the amount of training data while keeping the labeling procedure cost-effective, this competition introduces a new challenge on Large-scale Street View Text with Partial Labeling (LSVT), providing 50, 000 and 400, 000 images in full and weak annotations, respectively. This competition aims to explore the abilities of state-of-the-art methods to detect and recognize text instances from large-scale street view images, closing the gap between research benchmarks and real applications. During the competition period, a total of 41 teams participated in the two proposed tasks with 132 valid submissions, ie, text detection and end-to-end text spotting. This paper includes dataset descriptions, task definitions, evaluation protocols and results summaries of the ICDAR 2019-LSVT challenge."/>
<attvalue for="2" value="7"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.27338398306140244"/>
<attvalue for="harmonicclosnesscentrality" value="0.2791571753986344"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="15"/>
<attvalue for="pageranks" value="1.6733995222735978E-4"/>
</attvalues>
<viz:size value="44.62029"/>
<viz:position x="-10362.44" y="17786.383"/>
<viz:color r="76" g="70" b="62"/>
</node>
<node id="abstract_104" label="abstract_104">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="This paper reports the ICDAR2019 Robust Reading Challenge on Arbitrary-Shaped Text - RRC-ArT that consists of three major challenges: i) scene text detection, ii) scene text recognition, and iii) scene text spotting. A total of 78 submissions from 46 unique teams/individuals were received for this competition. The top performing score of each challenge is as follows: i) T1 - 82.65%, ii) T2.1 - 74.3%, iii) T2.2 - 85.32%, iv) T3.1 - 53.86%, and v) T3.2 - 54.91%. Apart from the results, this paper also details the ArT dataset, tasks description, evaluation metrics and participants' methods. The dataset, the evaluation kit as well as the results are publicly available at the challenge website."/>
<attvalue for="2" value="15"/>
<attvalue for="degree" value="16"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.27338398306140244"/>
<attvalue for="harmonicclosnesscentrality" value="0.2791571753986344"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="15"/>
<attvalue for="pageranks" value="1.6733995222735978E-4"/>
</attvalues>
<viz:size value="44.62029"/>
<viz:position x="-10691.545" y="17528.268"/>
<viz:color r="76" g="70" b="62"/>
</node>
<node id="abstract_105" label="abstract_105">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="With the growing cosmopolitan culture of modern cities, the need of robust Multi-Lingual scene Text (MLT) detection and recognition systems has never been more immense. With the goal to systematically benchmark and push the state-of-the-art forward, the proposed competition builds on top of the RRC-MLT-2017 with an additional end-to-end task, an additional language in the real images dataset, a large scale multi-lingual synthetic dataset to assist the training, and a baseline End-to-End recognition method. The real dataset consists of 20,000 images containing text from 10 languages. The challenge has 4 tasks covering various aspects of multi-lingual scene text: (a) text detection, (b) cropped word script classification, (c) joint text detection and script classification and (d) end-to-end detection and recognition. In total, the competition received 60 submissions from the research and industrial communities. This paper presents the dataset, the tasks and the findings of the presented RRC-MLT-2019 challenge."/>
<attvalue for="2" value="7"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.27297599801019773"/>
<attvalue for="harmonicclosnesscentrality" value="0.2784738041002289"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="15"/>
<attvalue for="pageranks" value="1.6024429955479355E-4"/>
</attvalues>
<viz:size value="44.28906"/>
<viz:position x="-6518.9956" y="8226.678"/>
<viz:color r="76" g="70" b="62"/>
</node>
<node id="abstract_106" label="abstract_106">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Class-specific text proposal algorithms can efficiently reduce the search space for possible text object locations in an image. In this paper we combine the Text Proposals algorithm with Fully Convolutional Networks to efficiently reduce the number of proposals while maintaining the same recall level and thus gaining a significant speed up. Our experiments demonstrate that such text proposal approaches yield significantly higher recall rates than state-of-the-art text localization techniques, while also producing better-quality localizations. Our results on the ICDAR 2015 Robust Reading Competition (Challenge 4) and the COCO-text datasets show that, when combined with strong word classifiers, this recall margin leads to state-of-the-art results in end-to-end scene text recognition."/>
<attvalue for="2" value="5"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.27297599801019773"/>
<attvalue for="harmonicclosnesscentrality" value="0.2784738041002289"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="7"/>
<attvalue for="pageranks" value="1.583779232719768E-4"/>
</attvalues>
<viz:size value="44.201935"/>
<viz:position x="-3406.2593" y="6484.174"/>
<viz:color r="255" g="136" b="5"/>
</node>
<node id="abstract_107" label="abstract_107">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Sequence-to-sequence models have recently become very popular for tacklinghandwritten word recognition problems. However, how to effectively integrate an external language model into such recognizer is still a challengingproblem. The main challenge faced when training a language model is todeal with the language model corpus which is usually different to the oneused for training the handwritten word recognition system. Thus, the biasbetween both word corpora leads to incorrectness on the transcriptions, providing similar or even worse performances on the recognition task. In thiswork, we introduce Candidate Fusion, a novel way to integrate an externallanguage model to a sequence-to-sequence architecture. Moreover, it provides suggestions from an external language knowledge, as a new input tothe sequence-to-sequence recognizer. Hence, Candidate Fusion provides twoimprovements. On the one hand, the sequence-to-sequence recognizer hasthe flexibility not only to combine the information from itself and the language model, but also to choose the importance of the information providedby the language model. On the other hand, the external language modelhas the ability to adapt itself to the training corpus and even learn themost commonly errors produced from the recognizer. Finally, by conductingcomprehensive experiments, the Candidate Fusion proves to outperform thestate-of-the-art language models for handwritten word recognition tasks."/>
<attvalue for="2" value="11"/>
<attvalue for="degree" value="12"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2730439109341958"/>
<attvalue for="harmonicclosnesscentrality" value="0.27858769931662986"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="1.5393567255707178E-4"/>
</attvalues>
<viz:size value="43.994568"/>
<viz:position x="1157.8899" y="-819.2414"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="abstract_108" label="abstract_108">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Despite decades of research in Optical Music Recognition (OMR), the recognition of old handwritten music scores remains a challenge because of the variabilities in the handwriting styles, paper degradation, lack of standard notation, etc. Therefore, the research in OMR systems adapted to the particularities of old manuscripts is crucial to accelerate the conversion of music scores existing in archives into digital libraries, fostering the dissemination and preservation of our music heritage. In this paper we explore the adaptation of sequence-to-sequence models with attention mechanism (used in translation and handwritten text recognition) and the generation of specific synthetic data for recognizing old music scores. The experimental validation demonstrates that our approach is promising, especially when compared with long short-term memory neural networks."/>
<attvalue for="2" value="5"/>
<attvalue for="degree" value="6"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2729081188611215"/>
<attvalue for="harmonicclosnesscentrality" value="0.278359908883828"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="1.755152914149866E-4"/>
</attvalues>
<viz:size value="45.00192"/>
<viz:position x="2295.07" y="-2180.7195"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="abstract_109" label="abstract_109">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Approaches that use more than two consecutive video frames in the optical flow estimation have a long research history. However, almost all such methods utilize extra information for a pre-processing flow prediction or for a post-processing flow correction and filtering. In contrast, this paper differs from previously developed techniques. We propose a new algorithm for the likelihood function calculation (alternatively the matching cost volume) that is used in the maximum a posteriori estimation. We exploit the fact that in general, optical flow is locally constant in the sense of time and the likelihood function depends on both the previous and the future frame. Implementation of our idea increases the robustness of optical flow estimation. As a result, our method outperforms 9% over the DCFlow technique, which we use as prototype for our CNN based computation architecture, on the most challenging MPI-Sintel dataset for the non-occluded mask metric. Furthermore, our approach considerably increases the accuracy of the flow estimation for the matching cost processing, consequently outperforming the original DCFlow algorithm results up to 50% in occluded regions and up to 9% in non-occluded regions on the MPI-Sintel dataset. The experimental section shows that the proposed method achieves state-of-the-arts results especially on the MPI-Sintel dataset."/>
<attvalue for="2" value="10"/>
<attvalue for="degree" value="11"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.27297599801019773"/>
<attvalue for="harmonicclosnesscentrality" value="0.2784738041002289"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="0"/>
<attvalue for="pageranks" value="1.6866532389321355E-4"/>
</attvalues>
<viz:size value="44.682156"/>
<viz:position x="10990.371" y="6638.615"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="abstract_110" label="abstract_110">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Variable rate is a requirement for flexible and adaptable image and video compression. However, deep image compression methods (DIC) are optimized for a single fixed rate-distortion (R-D) tradeoff. While this can be addressed by training multiple models for different tradeoffs, the memory requirements increase proportionally to the number of models. Scaling the bottleneck representation of a shared autoencoder can provide variable rate compression with a single shared autoencoder. However, the R-D performance using this simple mechanism degrades in low bitrates, and also shrinks the effective range of bitrates. To address these limitations, we formulate the problem of variable R-D optimization for DIC, and propose modulated autoencoders (MAEs), where the representations of a shared autoencoder are adapted to the specific R-D tradeoff via a modulation network. Jointly training this modulated autoencoder and the modulation network provides an effective way to navigate the R-D operational curve. Our experiments show that the proposed method can achieve almost the same R-D performance of independent models with significantly fewer parameters."/>
<attvalue for="2" value="9"/>
<attvalue for="degree" value="10"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.27311185765833024"/>
<attvalue for="harmonicclosnesscentrality" value="0.2787015945330307"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="0"/>
<attvalue for="pageranks" value="1.644011710780795E-4"/>
</attvalues>
<viz:size value="44.483105"/>
<viz:position x="10492.012" y="6227.58"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="abstract_111" label="abstract_111">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Many historians and linguists are working individually and in an uncoordinated fashion on the identification and decryption of historical ciphers. This is a time-consuming process as they often work without access to automatic methods and processes that can accelerate the decipherment. At the same time, computer scientists and cryptologists are developing algorithms to decrypt various cipher types without having access to a large number of original ciphertexts. In this paper, we describe the DECRYPT project aiming at the creation of resources and tools for historical cryptology by bringing the expertise of various disciplines together for collecting data, exchanging methods for faster progress to transcribe, decrypt and contextualize historical encrypted manuscripts. We present our goals and work-in progress of a general approach for analyzing historical encrypted manuscripts using standardized methods and a new set of state-of-the-art tools. We release the data and tools as open-source hoping that all mentioned disciplines would benefit and contribute to the research infrastructure of historical cryptology."/>
<attvalue for="2" value="7"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.27324785260799206"/>
<attvalue for="harmonicclosnesscentrality" value="0.2789293849658326"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="1.666408905628901E-4"/>
</attvalues>
<viz:size value="44.587654"/>
<viz:position x="5537.429" y="-6807.126"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="abstract_112" label="abstract_112">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Despite being very successful within the pattern recognition and machine learning community, graph-based methods are often unusable because of the lack of mathematical operations defined in graph domain. Graph embedding, which maps graphs to a vectorial space, has been proposed as a way to tackle these difficulties enabling the use of standard machine learning techniques. However, it is well known that graph embedding functions usually suffer from the loss of structural information. In this paper, we consider the hierarchical structure of a graph as a way to mitigate this loss of information. The hierarchical structure is constructed by topologically clustering the graph nodes and considering each cluster as a node in the upper hierarchical level. Once this hierarchical structure is constructed, we consider several configurations to define the mapping into a vector space given a classical graph embedding, in particular, we propose to make use of the stochastic graphlet embedding (SGE). Broadly speaking, SGE produces a distribution of uniformly sampled low-to-high-order graphlets as a way to embed graphs into the vector space. In what follows, the coarse-to-fine structure of a graph hierarchy and the statistics fetched by the SGE complements each other and includes important structural information with varied contexts. Altogether, these two techniques substantially cope with the usual information loss involved in graph embedding techniques, obtaining a more robust graph representation. This fact has been corroborated through a detailed experimental evaluation on various benchmark graph datasets, where we outperform the state-of-the-art methods."/>
<attvalue for="2" value="11"/>
<attvalue for="degree" value="12"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.27297599801019773"/>
<attvalue for="harmonicclosnesscentrality" value="0.2784738041002289"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="1.702544370156911E-4"/>
</attvalues>
<viz:size value="44.75634"/>
<viz:position x="1344.275" y="-1196.5287"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="abstract_113" label="abstract_113">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="During the last years, graph-based representations are experiencing a growing usage in visual recognition and retrieval due to their ability to capture both structural and appearance-based information. Thus, they provide a greater representational power than classical statistical frameworks. However, graph-based representations leads to high computational complexities usually dealt by graph embeddings or approximated matching techniques. Despite their representational power, they are very sensitive to noise and small variations of the input image. With the aim to cope with the time complexity and the variability present in the generated graphs, in this paper we propose to construct a novel hierarchical graph representation. Graph clustering techniques adapted from social media analysis have been used in order to contract a graph at different abstraction levels while keeping information about the topology. Abstract nodes attributes summarise information about the contracted graph partition. For the proposed representations, a coarse-to-fine matching technique is defined. Hence, small graphs are used as a filtering before more accurate matching methods are applied. This approach has been validated in real scenarios such as classification of colour images or retrieval of handwritten words (i.e. word spotting)."/>
<attvalue for="2" value="13"/>
<attvalue for="degree" value="14"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.27338398306140244"/>
<attvalue for="harmonicclosnesscentrality" value="0.2791571753986344"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="5"/>
<attvalue for="pageranks" value="1.4953439315292278E-4"/>
</attvalues>
<viz:size value="43.789112"/>
<viz:position x="1576.9396" y="-4946.3257"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="abstract_115" label="abstract_115">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="The Baix Llobregat (BALL) Demographic Database is an ongoing database project containing individual census data from the Catalan region of Baix Llobregat (Spain) during the nineteenth and twentieth centuries. The BALL Database is built within the project NETWORKS: Technology and citizen innovation for building historical social networks to understand the demographic past directed by Alcia Forns from the Center for Computer Vision and Joana Maria Pujadas-Mora from the Center for Demographic Studies, both at the Universitat Autnoma de Barcelona, funded by the Recercaixa program (20172019).Its webpage is http://dag.cvc.uab.es/xarxes/.The aim of the project is to develop technologies facilitating massive digitalization of demographic sources, and more specifically the padrones (local censuses), in order to reconstruct historical social networks employing computer vision technology. Such virtual networks can be created thanks to the linkage of nominative records compiled in the local censuses across time and space. Thus, digitized versions of individual and family lifespans are established, and individuals and families can be located spatially."/>
<attvalue for="2" value="10"/>
<attvalue for="degree" value="11"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.27297599801019773"/>
<attvalue for="harmonicclosnesscentrality" value="0.2784738041002289"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="1.6532900459771715E-4"/>
</attvalues>
<viz:size value="44.526413"/>
<viz:position x="1439.8317" y="-2117.9902"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="abstract_116" label="abstract_116">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Manual transcription of handwritten text is a time consuming task. In the case of encrypted manuscripts, the recognition is even more complex due to the huge variety of alphabets and symbol sets. To speed up and ease this process, we present a web-based tool aimed to (semi)-automatically transcribe the encrypted sources. The user uploads one or several images of the desired encrypted document(s) as input, and the system returns the transcription(s). This process is carried out in an interactive fashion withthe user to obtain more accurate results. For discovering and testing, the developed web tool is freely available."/>
<attvalue for="2" value="7"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2729081188611215"/>
<attvalue for="harmonicclosnesscentrality" value="0.278359908883828"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="1.7615277775817923E-4"/>
</attvalues>
<viz:size value="45.031677"/>
<viz:position x="3920.2976" y="-2477.7427"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="abstract_117" label="abstract_117">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Handwritten marriage licenses books are characterized by a simple structure of the text in the records with an evolutionary vocabulary, mainly composed of proper names that change along the time. This distinct vocabulary makes automatic transcription and semantic information extraction difficult tasks. Previous works have shown that the use of category-based language models and a Grammatical Inference technique known as MGGI can improve the accuracy of thesetasks. However, the application of the MGGI algorithm requires an a priori knowledge to label the words of the training strings, that is not always easy to obtain. In this paper we study how to automatically obtain the information required by the MGGI algorithm using a technique based on Confusion Networks. Using the resulting language model, full handwritten text recognition and information extraction experiments have been carried out with results supporting the proposed approach."/>
<attvalue for="2" value="7"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2727724617870014"/>
<attvalue for="harmonicclosnesscentrality" value="0.2781321184510262"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="2.054604120859404E-4"/>
</attvalues>
<viz:size value="46.39978"/>
<viz:position x="2557.6687" y="-1872.5511"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="abstract_118" label="abstract_118">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="When transcribing handwritten document images, inaccuracies in the text segmentation step often cause errors in the subsequent transcription step. For this reason, some recent methods propose to perform the recognition at paragraph level. But still, errors in the segmentation of paragraphs can affectthe transcription performance. In this work, we propose an end-to-end framework to transcribe full pages. The joint text detection and transcription allows to remove the layout analysis requirement at test time. The experimental results show that our approach can achieve comparable results to models that assumesegmented paragraphs, and suggest that joining the two tasks brings an improvement over doing the two tasks separately."/>
<attvalue for="2" value="7"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.273724903354533"/>
<attvalue for="harmonicclosnesscentrality" value="0.27972665148063897"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="1.4334378408327355E-4"/>
</attvalues>
<viz:size value="43.50013"/>
<viz:position x="-664.68176" y="-4371.243"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="abstract_119" label="abstract_119">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="With the emergence of the touchpad devices and drawing tablets, a new era of sketching started afresh. However, the recognition of sketches is still a tough task due to the variability of the drawing styles. Moreover, in some application scenarios there is few labelled data available for training,which imposes a limitation for deep learning architectures. In addition, in many cases there is a need to generate models able to adapt to new classes. In order to cope with these limitations, we propose a method based on few-shot learning and graph neural networks for classifying sketches aiming for an efficient neural model. We test our approach with several databases ofsketches, showing promising results."/>
<attvalue for="2" value="7"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2735202492211838"/>
<attvalue for="harmonicclosnesscentrality" value="0.27938496583143624"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="13"/>
<attvalue for="pageranks" value="1.4400852986501598E-4"/>
</attvalues>
<viz:size value="43.53116"/>
<viz:position x="1081.835" y="-5265.418"/>
<viz:color r="255" g="85" b="132"/>
</node>
<node id="abstract_120" label="abstract_120">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Tabular structures in documents offer a complementary dimension to the raw textual data, representing logical or quantitative relationships among pieces of information. In digital mail room applications, where a large amount ofadministrative documents must be processed with reasonable accuracy, the detection and interpretation of tables is crucial. Table recognition has gained interest in document image analysis, in particular in unconstrained formats (absence of rule lines, unknown information of rows and columns). In this work, we propose a graph-based approach for detecting tables in document images. Instead of using the raw content (recognized text), we make use of the location, context and content type, thus it is purely a structure perception approach, not dependent on the language and the quality of the textreading. Our framework makes use of Graph Neural Networks (GNNs) in order to describe the local repetitive structural information of tables in invoice documents. Our proposed model has been experimentally validated in two invoice datasets and achieved encouraging results. Additionally, due to the scarcityof benchmark datasets for this task, we have contributed to the community a novel dataset derived from the RVL-CDIP invoice data. It will be publicly released to facilitate future research."/>
<attvalue for="2" value="10"/>
<attvalue for="degree" value="11"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.27311185765833024"/>
<attvalue for="harmonicclosnesscentrality" value="0.2787015945330307"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="1.5766415005906138E-4"/>
</attvalues>
<viz:size value="44.168613"/>
<viz:position x="2590.975" y="-772.1125"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="abstract_121" label="abstract_121">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Historical handwritten text recognition is an interesting yet challenging problem. In recent times, deep learning based methods have achieved significant performance in handwritten text recognition. However, handwriting recognition using deep learning needs training data, and often, text must be previously segmented into lines (or even words). These limitations constrain the application of HTR techniques in document collections, because training data or segmented words are not always available. Therefore, this paper proposes a training-free and segmentation-free word spotting approach that can be applied in unconstrained scenarios. The proposed word spotting framework is based on document query word expansion and relaxed feature matching algorithm, which can easily be parallelised. Since handwritten words posses distinct shape and characteristics, this work uses a combination of different keypoint detectorsand Fourier-based descriptors to obtain a sufficient degree of relaxed matching. The effectiveness of the proposed method is empirically evaluated on well-known benchmark datasets using standard evaluation measures. The use of informative features along with query expansion significantly contributed in efficient performance of the proposed method."/>
<attvalue for="2" value="10"/>
<attvalue for="degree" value="11"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2731798382078407"/>
<attvalue for="harmonicclosnesscentrality" value="0.27881548974943166"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="1.607941036106221E-4"/>
</attvalues>
<viz:size value="44.314724"/>
<viz:position x="3020.7156" y="-6164.2354"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="abstract_122" label="abstract_122">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Segmentation of airways in Computed Tomography (CT) scans is a must for accurate support of diagnosis and intervention of many pulmonary disorders. In particular, lung cancer diagnosis would benefit from segmentations reaching most distal airways. We present a method that combines descriptors of bronchi local appearance and graph global structural analysis to fine-tune thresholds on the descriptors adapted for each bronchial level. We have compared our method to the top performers of the EXACT09 challenge and to a commercial software for biopsy planning evaluated in an own-collected data-base of high resolution CT scans acquired under different breathing conditions. Results on EXACT09 data show that our method provides a high leakage reduction with minimum loss in airway detection. Results on our data-base show the reliability across varying breathing conditions and a competitive performance for biopsy planning compared to a commercial solution."/>
<attvalue for="2" value="7"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.27297599801019773"/>
<attvalue for="harmonicclosnesscentrality" value="0.2784738041002289"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="2"/>
<attvalue for="pageranks" value="1.6345883836639834E-4"/>
</attvalues>
<viz:size value="44.439114"/>
<viz:position x="13147.753" y="-5919.154"/>
<viz:color r="211" g="179" b="176"/>
</node>
<node id="abstract_123" label="abstract_123">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Dimensionality reduction is key to alleviate machine learning artifacts in clinical applications with Small Sample Size (SSS) unbalanced datasets. Existing methods rely on either the probabilistic distribution of training data or the discriminant power of the reduced space, disregarding the impact of repeatability and uncertainty in features.In the present study is proposed the use of reproducibility of radiomics features to select features with high inter-class correlation coefficient (ICC). The reproducibility includes the variability introduced in the image acquisition, like medical scans acquisition parameters and convolution kernels, that affects intensity-based features and tumor annotations made by physicians, that influences morphological descriptors of the lesion.For the reproducibility of radiomics features three studies were conducted on cases collected at Vall Hebron Oncology Institute (VHIO) on responders to oncology treatment. The studies focused on the variability due to the convolution kernel, image acquisition parameters, and the inter-observer lesion identification. The features selected were those features with a ICC higher than 0.7 in the three studies.The selected features based on reproducibility were evaluated for lesion malignancy classification using a different database. Results show better performance compared to several state-of-the-art methods including Principal Component Analysis (PCA), Kernel Discriminant Analysis via QR decomposition (KDAQR), LASSO, and an own built Convolutional Neural Network."/>
<attvalue for="2" value="11"/>
<attvalue for="degree" value="12"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.27311185765833024"/>
<attvalue for="harmonicclosnesscentrality" value="0.2787015945330307"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="2"/>
<attvalue for="pageranks" value="1.6542082743357755E-4"/>
</attvalues>
<viz:size value="44.5307"/>
<viz:position x="13530.416" y="-6617.715"/>
<viz:color r="211" g="179" b="176"/>
</node>
<node id="abstract_124" label="abstract_124">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="State of the art machine learning methods need huge amounts of data with unambiguous annotations for their training. In the context of medical imaging this is, in general, a very difficult task due to limited access to clinical data, the time required for manual annotations and variability across experts. Simulated data could serve for data augmentation provided that its appearance was comparable to the actual appearance of intra-operative acquisitions. Generative Adversarial Networks (GANs) are a powerful tool for artistic style transfer, but lack a criteria for selecting epochs ensuring also preservation of intra-operative content.We propose a multi-objective optimization strategy for a selection of cycleGAN epochs ensuring a mapping between virtual images and the intra-operative domain preserving anatomical content. Our approach has been applied to simulate intra-operative bronchoscopic videos and chest CT scans from virtual sketches generated using simple graphical primitives."/>
<attvalue for="2" value="7"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2731798382078407"/>
<attvalue for="harmonicclosnesscentrality" value="0.27881548974943166"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="4"/>
<attvalue for="pageranks" value="1.543351499449496E-4"/>
</attvalues>
<viz:size value="44.013214"/>
<viz:position x="11894.195" y="-5590.068"/>
<viz:color r="0" g="189" b="148"/>
</node>
<node id="abstract_125" label="abstract_125">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Invited speaker"/>
<attvalue for="2" value="1"/>
<attvalue for="degree" value="2"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2728402734617775"/>
<attvalue for="harmonicclosnesscentrality" value="0.27824601366742713"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="2"/>
<attvalue for="pageranks" value="1.8070489791304937E-4"/>
</attvalues>
<viz:size value="45.24417"/>
<viz:position x="12718.448" y="-5815.282"/>
<viz:color r="211" g="179" b="176"/>
</node>
<node id="abstract_126" label="abstract_126">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="In this work, we propose a convolutional neural network based approach to estimate the spectral reflectance of a surface and spectral power distribution of light from a single RGB image of a V-shaped surface. Interreflections happening in a concave surface lead to gradients of RGB values over its area. These gradients carry a lot of information concerning the physical properties of the surface and the illuminant. Our network is trained with only simulated data constructed using a physics-based interreflection model. Coupling interreflection effects with deep learning helps to retrieve the spectral reflectance under an unknown light and to estimate spectral power distribution of this light as well. In addition, it is more robust to the presence of image noise than classical approaches. Our results show that the proposed approach outperforms state-of-the-art learning-based approaches on simulated data. In addition, it gives better results on real data compared to other interreflection-based approaches."/>
<attvalue for="2" value="9"/>
<attvalue for="degree" value="10"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2727724617870014"/>
<attvalue for="harmonicclosnesscentrality" value="0.2781321184510262"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="2.087841707429965E-4"/>
</attvalues>
<viz:size value="46.55493"/>
<viz:position x="6211.957" y="4954.4595"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="abstract_127" label="abstract_127">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Recently, image-to-image translation research has witnessed remarkable progress. Although current approaches successfully generate diverse outputs or perform scalable image transfer, these properties have not been combined into a single method. To address this limitation, we propose SDIT: Scalable and Diverse image-to-image translation. These properties are combined into a single generator. The diversity is determined by a latent variable which is randomly sampled from a normal distribution. The scalability is obtained by conditioning the network on the domain attributes. Additionally, we also exploit an attention mechanism that permits the generator to focus on the domain-specific attribute. We empirically demonstrate the performance of the proposed method on face mapping and other datasets beyond faces."/>
<attvalue for="2" value="9"/>
<attvalue for="degree" value="10"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.27297599801019773"/>
<attvalue for="harmonicclosnesscentrality" value="0.2784738041002289"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="1.612683954453188E-4"/>
</attvalues>
<viz:size value="44.336864"/>
<viz:position x="8614.326" y="8660.624"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="abstract_128" label="abstract_128">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Images with visual and scene text content are ubiquitous in everyday life. However, current image interpretation systems are mostly limited to using only the visual features, neglecting to leverage the scene text content. In this paper, we propose to jointly use scene text and visual channels for robust semantic interpretation of images. We do not only extract and encode visual and scene text cues, but also model their interplay to generate a contextual joint embedding with richer semantics. The contextual embedding thus generated is applied to retrieval and classification tasks on multimedia images, with scene text content, to demonstrate its effectiveness. In the retrieval framework, we augment our learned text-visual semantic representation with scene text cues, to mitigate vocabulary misses that may have occurred during the semantic embedding. To deal with irrelevant or erroneous recognition of scene text, we also apply query-based attention to our text channel. We show how the multi-channel approach, involving visual semantics and scene text, improves upon state of the art."/>
<attvalue for="2" value="9"/>
<attvalue for="degree" value="10"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2728402734617775"/>
<attvalue for="harmonicclosnesscentrality" value="0.27824601366742713"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="15"/>
<attvalue for="pageranks" value="1.8999487952512008E-4"/>
</attvalues>
<viz:size value="45.677834"/>
<viz:position x="-5329.107" y="6597.319"/>
<viz:color r="76" g="70" b="62"/>
</node>
<node id="abstract_129" label="abstract_129">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Across-domain multitask learning is a challenging area of computer vision and machine learning due to the intra-similarities among class distributions. Addressing this problem to cope with the human cognition system by considering inter and intra-class categorization and recognition complicates the problem even further. We propose in this work an effective holistic and hierarchical learning by using a text embedding layer on top of a deep learning model. We also propose a novel sensory discriminator approach to resolve the collisions between different tasks and domains. We then train the model concurrently on textual sentiment analysis, speech recognition, image classification, action recognition from video, and handwriting word spotting of two different scripts (Arabic and English). The model we propose successfully learned different tasks across multiple domains."/>
<attvalue for="2" value="7"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2728402734617775"/>
<attvalue for="harmonicclosnesscentrality" value="0.27824601366742713"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="15"/>
<attvalue for="pageranks" value="1.9343610448396243E-4"/>
</attvalues>
<viz:size value="45.838474"/>
<viz:position x="-5586.6035" y="6592.956"/>
<viz:color r="76" g="70" b="62"/>
</node>
<node id="abstract_130" label="abstract_130">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Image classification is widely researched in the literature, where models based on Convolutional Neural Networks (CNNs) have provided better results. When data is not enough, CNN models tend to be overfitted. To deal with this, often, traditional techniques of data augmentation are applied, such as: affine transformations, adjusting the color balance, among others. However, we argue that some techniques of data augmentation may be more appropriate for some of the classes. In order to select the techniques that work best for particular class, we propose to explore the epistemic uncertainty for the samples within each class. From our experiments, we can observe that when the data augmentation is applied class-conditionally, we improve the results in terms of accuracy and also reduce the overall epistemic uncertainty. To summarize, in this paper we propose a class-conditional data augmentation procedure that allows us to obtain better results and improve robustness of the classification in the face of model uncertainty."/>
<attvalue for="2" value="8"/>
<attvalue for="degree" value="9"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.27338398306140244"/>
<attvalue for="harmonicclosnesscentrality" value="0.2791571753986344"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="4"/>
<attvalue for="pageranks" value="1.4397390152042134E-4"/>
</attvalues>
<viz:size value="43.529545"/>
<viz:position x="-1110.9933" y="2198.923"/>
<viz:color r="0" g="189" b="148"/>
</node>
<node id="abstract_131" label="abstract_131">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="The routine of a person is defined by the occurrence of activities throughout different days, and can directly affect the persons health. In this work, we address the recognition of routine related days. To do so, we rely on egocentric images, which are recorded by a wearable camera and allow to monitor the life of the user from a first-person view perspective. We propose an unsupervised model that identifies routine related days, following an outlier detection approach. We test the proposed framework over a total of 72 days in the form of photo-streams covering around 2 weeks of the life of 5 different camera wearers. Our model achieves an average of 76% Accuracy and 68% Weighted F-Score for all the users. Thus, we show that our framework is able to recognise routine related days and opens the door to the understanding of the behaviour of people."/>
<attvalue for="2" value="8"/>
<attvalue for="degree" value="9"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2733159008840742"/>
<attvalue for="harmonicclosnesscentrality" value="0.2790432801822335"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="4"/>
<attvalue for="pageranks" value="1.5778024495737098E-4"/>
</attvalues>
<viz:size value="44.174034"/>
<viz:position x="-642.4303" y="12900.5625"/>
<viz:color r="0" g="189" b="148"/>
</node>
<node id="abstract_132" label="abstract_132">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Wearable cameras are become more popular in recent years for capturing the unscripted moments of the first-person that help to analyze the users lifestyle. In this work, we aim to recognize the places related to food in egocentric images during a day to identify the daily food patterns of the first-person. Thus, this system can assist to improve their eating behavior to protect users against food-related diseases. In this paper, we use Siamese Neural Networks to learn the similarity between images from corresponding inputs for one-shot food places classification. We tested our proposed method with MiniEgoFoodPlaces with 15 food related places. The proposed Siamese Neural Networks model with MobileNet achieved an overall classification accuracy of 76.74% and 77.53% on the validation and test sets of the MiniEgoFoodPlaces dataset, respectively outperforming with the base models, such as ResNet50, InceptionV3, and InceptionResNetV2."/>
<attvalue for="2" value="9"/>
<attvalue for="degree" value="10"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.27324785260799206"/>
<attvalue for="harmonicclosnesscentrality" value="0.2789293849658326"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="4"/>
<attvalue for="pageranks" value="1.5678466737809775E-4"/>
</attvalues>
<viz:size value="44.12756"/>
<viz:position x="-2535.5054" y="19905.898"/>
<viz:color r="0" g="189" b="148"/>
</node>
<node id="abstract_133" label="abstract_133">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="The recognition of food image is an interesting research topic, in which its applicability in the creation of nutritional diaries stands out with the aim of improving the quality of life of people with a chronic disease (e.g. diabetes, heart disease) or prone to acquire it (e.g. people with overweight or obese). For a food recognition system to be useful in real applications, it is necessary to recognize a huge number of different foods. We argue that for very large scale classification, a traditional flat classifier is not enough to acquire an acceptable result. To address this, we propose a method that performs prediction with local classifiers, based on a class hierarchy, or with flat classifier. We decide which approach to use, depending on the analysis of both the Epistemic Uncertainty obtained for the image in the children classifiers and the prediction of the parent classifier. When our criterion is met, the final prediction is obtained with the respective local classifier; otherwise, with the flat classifier. From the results, we can see that the proposed method improves the classification performance compared to the use of a single flat classifier."/>
<attvalue for="2" value="12"/>
<attvalue for="degree" value="13"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2728402734617775"/>
<attvalue for="harmonicclosnesscentrality" value="0.27824601366742713"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="4"/>
<attvalue for="pageranks" value="1.8359699264325487E-4"/>
</attvalues>
<viz:size value="45.379177"/>
<viz:position x="1482.0714" y="7568.2754"/>
<viz:color r="0" g="189" b="148"/>
</node>
<node id="abstract_134" label="abstract_134">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="This paper proposes an approach to automatically categorize the social interactions of a user wearing a photo-camera (2fpm), by relying solely on what the camera is seeing. The problem is challenging due to the overwhelming complexity of social life and the extreme intra-class variability of social interactions captured under unconstrained conditions. We adopt the formalization proposed in Bugental's social theory, that groups human relations into five social domains with related categories. Our method is a new deep learning architecture that exploits the hierarchical structure of the label space and relies on a set of social attributes estimated at frame level to provide a semantic representation of social interactions. Experimental results on the new EgoSocialRelation dataset demonstrate the effectiveness of our proposal."/>
<attvalue for="2" value="6"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2728402734617775"/>
<attvalue for="harmonicclosnesscentrality" value="0.27824601366742713"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="4"/>
<attvalue for="pageranks" value="1.9150689195041056E-4"/>
</attvalues>
<viz:size value="45.748417"/>
<viz:position x="239.57858" y="9324.486"/>
<viz:color r="0" g="189" b="148"/>
</node>
<node id="abstract_135" label="abstract_135">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Mirror symmetry is a property most likely to be encountered in animals than in medium scale vegetation or inanimate objects in the natural world. This might be the reason why the human visual system has evolved to detect it quickly and robustly. Indeed, the perception of symmetry assists higher-level visual processing that are crucial for survival such as target recognition and identification irrespective of position and location. Although the task of detecting symmetrical objects seems effortless to us, it is very challenging for computers (to the extent that it has been proposed as a robust captcha by Funk &amp; Liu in 2016). Indeed, the exact mechanism of symmetry detection in primates is not well understood: fMRI studies have shown that symmetrical shapes activate specific higher-level areas of the visual cortex (Sasaki et al.; 2005) and similarly, a large body of psychophysical experiments suggest that the symmetry perception is critically influenced by low-level mechanisms (Treder; 2010). In this work we attempt to find plausible low-level mechanisms that might form the basis for symmetry perception. Our simple model is made from banks of (i) odd-symmetric Gabors (resembling edge-detecting V1 neurons); and (ii) banks of larger odd- and even-symmetric Gabors (resembling higher visual cortex neurons), that pool signals from the 'edge image'. As reported previously (Akbarinia et al, ECVP2017), the convolution of the symmetrical lines with the two Gabor kernels of alternative phase produces a minimum in one and a maximum in the other (Osorio; 1996), and the rectification and combination of these signals create lines which hint of mirror symmetry in natural images. We improved the algorithm by combining these signals across several spatial scales. Our preliminary results suggest that such multiscale combination of convolutional operations might form the basis for much of the operation of the HVS in terms of symmetry detection and representation."/>
<attvalue for="2" value="12"/>
<attvalue for="degree" value="13"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2728402734617775"/>
<attvalue for="harmonicclosnesscentrality" value="0.27824601366742713"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="7"/>
<attvalue for="pageranks" value="2.0878669970026783E-4"/>
</attvalues>
<viz:size value="46.555054"/>
<viz:position x="5520.5586" y="-1936.0096"/>
<viz:color r="255" g="136" b="5"/>
</node>
<node id="abstract_136" label="abstract_136">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="A benchmark of saliency models performance with a synthetic image dataset is provided. Model performance is evaluated through saliency metrics as well as the influence of model inspiration and consistency with human psychophysics. SID4VAM is composed of 230 synthetic images, with known salient regions. Images were generated with 15 distinct types of low-level features (e.g. orientation, brightness, color, size...) with a target-distractor popout type of synthetic patterns. We have used Free-Viewing and Visual Search task instructions and 7 feature contrasts for each feature category. Our study reveals that state-ofthe-art Deep Learning saliency models do not perform well with synthetic pattern images, instead, models with Spectral/Fourier inspiration outperform others in saliency metrics and are more consistent with human psychophysical experimentation. This study proposes a new way to evaluate saliency models in the forthcoming literature, accounting for synthetic images with uniquely low-level feature contexts, distinct from previous eye tracking image datasets."/>
<attvalue for="2" value="13"/>
<attvalue for="degree" value="14"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.27297599801019773"/>
<attvalue for="harmonicclosnesscentrality" value="0.2784738041002289"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="3"/>
<attvalue for="pageranks" value="1.7709021422187074E-4"/>
</attvalues>
<viz:size value="45.075436"/>
<viz:position x="6640.12" y="-1129.3943"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="abstract_137" label="abstract_137">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="In this study we present a unifed model of the visual cortex for predicting visual attention using real image scenes. Feedforward mechanisms from RGC and LGN have been functionally modeled using wavelet filters at distinct orientations and scales for each chromatic pathway (Magno-, Parvo-, Konio-cellular) and polarity (ON-/OFF-center), by processing image components in the CIE Lab space. In V1, we process cortical interactions with an excitatory-inhibitory network of fring rate neurons, initially proposed by (Li, 1999), later extended by (Penacchio et al. 2013). Firing rates from models output have been used as predictors of neuronal activity to be projected in a map in superior colliculus (with WTA-like computations), determining locations of visual fxations. These locations will be considered as already visited areas for future saccades, therefore we integrated a spatiotemporal function of inhibition of return mechanisms (where LIP/FEF is responsible) to feed to the model with spatial memory for next saccades. Foveation mechanisms have been simulated with a cortical magnifcation function, which distort spatial viewing properties for each fxation. Results show lower prediction errors than with respect no IoR cases (Fig. 1), and it is functionally consistent with human psychophysical measurements. Our model follows a biologically-constrained architecture, previously shown to reproduce visual saliency (Berga &amp; Otazu, 2018), visual discomfort (Penacchio et al. 2016), brightness (Penacchio et al. 2013) and chromatic induction (Cerda &amp; Otazu, 2016)."/>
<attvalue for="2" value="13"/>
<attvalue for="degree" value="14"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2728402734617775"/>
<attvalue for="harmonicclosnesscentrality" value="0.27824601366742713"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="3"/>
<attvalue for="pageranks" value="1.9663987322797235E-4"/>
</attvalues>
<viz:size value="45.98803"/>
<viz:position x="6121.4985" y="-1028.0747"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="abstract_138" label="abstract_138">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Latest computer vision architectures use a chain of feedforward computations, mainly optimizing artificial neural networks for very specific tasks. Although their impressive performance (i.e. in saliency) using real image datasets, these models do not follow several biological principles of the human visual system (e.g. feedback and horizontal connections in cortex) and are unable to predict several visual tasks simultaneously. In this study we present biologically plausible computations from the early stages of the human visual system (i.e. retina and lateral geniculate nucleus) and lateral connections in V1. Despite the simplicity of these processes and without any type of training or optimization, simulations of firing-rate dynamics of V1 are able to predict bottom-up visual attention at distinct contexts (shown previously as well to predict visual discomfort, brightness and chromatic induction). We also show functional top-down selection mechanisms as feedback inhibition projections (i.e. prefrontal cortex for search/task-based attention and parietal area for inhibition of return). Distinct saliency model predictions are tested with eye tracking datasets in free-viewing and visual search tasks, using real images and synthetically-generated patterns. Results on predicting saliency and scanpaths show that artificial models do not outperform biologically-inspired ones (specifically for datasets that lack of common endogenous biases found in eye tracking experimentation), as well as, do not correctly predict contrast sensitivities in pop-out stimulus patterns. This work remarks the importance of considering biological principles of the visual system for building models that reproduce this (and any other) visual effects."/>
<attvalue for="2" value="17"/>
<attvalue for="degree" value="18"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2728402734617775"/>
<attvalue for="harmonicclosnesscentrality" value="0.27824601366742713"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="3"/>
<attvalue for="pageranks" value="1.9663987322797235E-4"/>
</attvalues>
<viz:size value="45.98803"/>
<viz:position x="6202.1523" y="-1015.314"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="abstract_139" label="abstract_139">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="A benchmark of saliency models performance with a synthetic image dataset is provided. Model performance is evaluated through saliency metrics as well as the influence of model inspiration and consistency with human psychophysics. SID4VAM is composed of 230 synthetic images, with known salient regions. Images were generated with 15 distinct types of low-level features (e.g. orientation, brightness, color, size...) with a target-distractor pop-out type of synthetic patterns. We have used Free-Viewing and Visual Search task instructions and 7 feature contrasts for each feature category. Our study reveals that state-of-the-art Deep Learning saliency models do not perform well with synthetic pattern images, instead, models with Spectral/Fourier inspiration outperform others in saliency metrics and are more consistent with human psychophysical experimentation. This study proposes a new way to evaluate saliency models in the forthcoming literature, accounting for synthetic images with uniquely low-level feature contexts, distinct from previous eye tracking image datasets."/>
<attvalue for="2" value="13"/>
<attvalue for="degree" value="14"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2730439109341958"/>
<attvalue for="harmonicclosnesscentrality" value="0.27858769931662986"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="3"/>
<attvalue for="pageranks" value="1.7557808429421263E-4"/>
</attvalues>
<viz:size value="45.00485"/>
<viz:position x="6952.899" y="-1269.4777"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="abstract_141" label="abstract_141">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="CoRR abs/1905.04073Wearable cameras capture a first-person view of the daily activities of the camera wearer, offering a visual diary of the user behaviour. Detection of the appearance of people the camera user interacts with for social interactions analysis is of high interest. Generally speaking, social events, lifestyle and health are highly correlated, but there is a lack of tools to monitor and analyse them. We consider that egocentric vision provides a tool to obtain information and understand users social interactions. We propose a model that enables us to evaluate and visualize social traits obtained by analysing social interactions appearance within egocentric photostreams. Given sets of egocentric images, we detect the appearance of faces within the days of the camera wearer, and rely on clustering algorithms to group their feature descriptors in order to re-identify persons. Recurrence of detected faces within photostreams allows us to shape an idea of the social pattern of behaviour of the user. We validated our model over several weeks recorded by different camera wearers. Our findings indicate that social profiles are potentially useful for social behaviour interpretation."/>
<attvalue for="2" value="11"/>
<attvalue for="degree" value="12"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2729081188611215"/>
<attvalue for="harmonicclosnesscentrality" value="0.278359908883828"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="4"/>
<attvalue for="pageranks" value="1.7487865279306678E-4"/>
</attvalues>
<viz:size value="44.9722"/>
<viz:position x="-372.21964" y="11752.879"/>
<viz:color r="0" g="189" b="148"/>
</node>
<node id="abstract_142" label="abstract_142">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Recent studies have shown that the environment where people eat can affect their nutritional behaviour. In this work, we provide automatic tools for a personalised analysis of a person's health habits by the examination of daily recorded egocentric photo-streams. Specifically, we propose a new automatic approach for the classification of food-related environments, that is able to classify up to 15 such scenes. In this way, people can monitor the context around their food intake in order to get an objective insight into their daily eating routine. We propose a model that classifies food-related scenes organized in a semantic hierarchy. Additionally, we present and make available a new egocentric dataset composed of more than 33000 images recorded by a wearable camera, over which our proposed model has been tested. Our approach obtains an accuracy and F-score of 56\% and 65\%, respectively, clearly outperforming the baseline methods."/>
<attvalue for="2" value="8"/>
<attvalue for="degree" value="9"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.27297599801019773"/>
<attvalue for="harmonicclosnesscentrality" value="0.2784738041002289"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="4"/>
<attvalue for="pageranks" value="1.6723891019653682E-4"/>
</attvalues>
<viz:size value="44.61557"/>
<viz:position x="-468.25858" y="12091.158"/>
<viz:color r="0" g="189" b="148"/>
</node>
<node id="abstract_143" label="abstract_143">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="CoRR abs/1905.04093Nowadays, there is an upsurge of interest in using lifelogging devices. Such devices generate huge amounts of image data; consequently, the need for automatic methods for analyzing and summarizing these data is drastically increasing. We present a new method for familiar scene recognition in egocentric videos, based on background pattern detection through automatically configurable COSFIRE filters. We present some experiments over egocentric data acquired with the Narrative Clip."/>
<attvalue for="2" value="6"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2729081188611215"/>
<attvalue for="harmonicclosnesscentrality" value="0.278359908883828"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="4"/>
<attvalue for="pageranks" value="1.7487865279306678E-4"/>
</attvalues>
<viz:size value="44.9722"/>
<viz:position x="-456.12808" y="11813.715"/>
<viz:color r="0" g="189" b="148"/>
</node>
<node id="abstract_144" label="abstract_144">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="CoRR abs/1905.04107The availability and use of egocentric data are rapidly increasing due to the growing use of wearable cameras. Our aim is to study the effect (positive, neutral or negative) of egocentric images or events on an observer. Given egocentric photostreams capturing the wearer's days, we propose a method that aims to assign sentiment to events extracted from egocentric photostreams. Such moments can be candidates to retrieve according to their possibility of representing a positive experience for the camera's wearer. The proposed approach obtained a classification accuracy of 75% on the test set, with a deviation of 8%. Our model makes a step forward opening the door to sentiment recognition in egocentric photostreams."/>
<attvalue for="2" value="8"/>
<attvalue for="degree" value="9"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2729081188611215"/>
<attvalue for="harmonicclosnesscentrality" value="0.278359908883828"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="4"/>
<attvalue for="pageranks" value="1.7487865279306678E-4"/>
</attvalues>
<viz:size value="44.9722"/>
<viz:position x="-421.6325" y="11642.74"/>
<viz:color r="0" g="189" b="148"/>
</node>
<node id="abstract_145" label="abstract_145">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="CoRR abs/1906.00634 Sounds are an important source of information on our daily interactions with objects. For instance, a significant amount of people can discern the temperature of water that it is being poured just by using the sense of hearing. However, only a few works have explored the use of audio for the classification of object interactions in conjunction with vision or as single modality. In this preliminary work, we propose an audio model for egocentric action recognition and explore its usefulness on the parts of the problem (noun, verb, and action classification). Our model achieves a competitive result in terms of verb classification (34.26% accuracy) on a standard benchmark with respect to vision-based state of the art systems, using a comparatively lighter architecture."/>
<attvalue for="2" value="8"/>
<attvalue for="degree" value="9"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2730439109341958"/>
<attvalue for="harmonicclosnesscentrality" value="0.27858769931662986"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="4"/>
<attvalue for="pageranks" value="1.7882527317558884E-4"/>
</attvalues>
<viz:size value="45.15643"/>
<viz:position x="228.18913" y="10154.012"/>
<viz:color r="0" g="189" b="148"/>
</node>
<node id="abstract_146" label="abstract_146">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="CoRR abs/1907.00856Skin lesion segmentation in dermoscopic images is a challenge due to their blurry and irregular boundaries. Most of the segmentation approaches based on deep learning are time and memory consuming due to the hundreds of millions of parameters. Consequently, it is difficult to apply them to real dermatoscope devices with limited GPU and memory resources. In this paper, we propose a lightweight and efficient Generative Adversarial Networks (GAN) model, called MobileGAN for skin lesion segmentation. More precisely, the MobileGAN combines 1D non-bottleneck factorization networks with position and channel attention modules in a GAN model. The proposed model is evaluated on the test dataset of the ISBI 2017 challenges and the validation dataset of ISIC 2018 challenges. Although the proposed network has only 2.35 millions of parameters, it is still comparable with the state-of-the-art. The experimental results show that our MobileGAN obtains comparable performance with an accuracy of 97.61%."/>
<attvalue for="2" value="12"/>
<attvalue for="degree" value="13"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.27338398306140244"/>
<attvalue for="harmonicclosnesscentrality" value="0.2791571753986344"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="4"/>
<attvalue for="pageranks" value="1.5600135725432802E-4"/>
</attvalues>
<viz:size value="44.090996"/>
<viz:position x="-2297.4094" y="20472.15"/>
<viz:color r="0" g="189" b="148"/>
</node>
<node id="abstract_147" label="abstract_147">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Our interaction with the world is an inherently multimodal experience. However, the understanding of human-to-object interactions has historically been addressed focusing on a single modality. In particular, a limited number of works have considered to integrate the visual and audio modalities for this purpose. In this work, we propose a multimodal approach for egocentric action recognition in a kitchen environment that relies on audio and visual information. Our model combines a sparse temporal sampling strategy with a late fusion of audio, spatial, and temporal streams. Experimental results on the EPIC-Kitchens dataset show that multimodal integration leads to better performance than unimodal approaches. In particular, we achieved a 5.18% improvement over the state of the art on verb classification."/>
<attvalue for="2" value="9"/>
<attvalue for="degree" value="10"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2730439109341958"/>
<attvalue for="harmonicclosnesscentrality" value="0.27858769931662986"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="4"/>
<attvalue for="pageranks" value="1.7882527317558884E-4"/>
</attvalues>
<viz:size value="45.15643"/>
<viz:position x="54.193176" y="10165.802"/>
<viz:color r="0" g="189" b="148"/>
</node>
<node id="abstract_148" label="abstract_148">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Social networks have attracted the attention of psychologists, as the behavior of users can be used to assess personality traits, and to detect sentiments and critical mental situations such as depression or suicidal tendencies. Recently, the increasing amount of image uploads to social networks has shifted the focus from text to image-based personality assessment. However, obtaining the ground-truth requires giving personality questionnaires to the users, making the process very costly and slow, and hindering research on large populations. In this paper, we demonstrate that it is possible to predict which images are most associated with each personality trait of the OCEAN personality model, without requiring ground-truth personality labels. Namely, we present a weakly supervised framework which shows that the personality scores obtained using specific images textually associated with particular personality traits are highly correlated with scores obtained using standard text-based personality questionnaires. We trained an OCEAN trait model based on Convolutional Neural Networks (CNNs), learned from 120K pictures posted with specific textual hashtags, to infer whether the personality scores from the images uploaded by users are consistent with those scores obtained from text. In order to validate our claims, we performed a personality test on a heterogeneous group of 280 human subjects, showing that our model successfully predicts which kind of image will match a person with a given level of a trait. Looking at the results, we obtained evidence that personality is not only correlated with text, but with image content too. Interestingly, different visual patterns emerged from those images most liked by persons with a particular personality trait: for instance, pictures most associated with high conscientiousness usually contained healthy food, while low conscientiousness pictures contained injuries, guns, and alcohol. These findings could pave the way to complement text-based personality questionnaires with image-based questions."/>
<attvalue for="2" value="11"/>
<attvalue for="degree" value="12"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2736566512903628"/>
<attvalue for="harmonicclosnesscentrality" value="0.27961275626423804"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="5"/>
<attvalue for="pageranks" value="1.4355547985917948E-4"/>
</attvalues>
<viz:size value="43.51001"/>
<viz:position x="-3047.7153" y="-4315.539"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="abstract_149" label="abstract_149">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Autonomous vehicles are now considered as an assured asset in the future. Literally, all the relevant car-markers are now in a race to produce fully autonomous vehicles. These car-makers usually make use of modular pipelines for designing autonomous vehicles. This strategy decomposes the problem in a variety of tasks such as object detection and recognition, semantic and instance segmentation, depth estimation, SLAM and place recognition, as well as planning and control. Each module requires a separate set of expert algorithms, which are costly specially in the amount of human labor and necessity of data labelling. An alternative, that recently has driven considerable interest, is the end-to-end driving. In the end-to-end driving paradigm, perception and control are learned simultaneously using a deep network. These sensorimotor models are typically obtained by imitation learning fromhuman demonstrations. The main advantage is that this approach can directly learn from large fleets of human-driven vehicles without requiring a fixed ontology and extensive amounts of labeling. However, scaling end-to-end driving methods to behaviors more complex than simple lane keeping or lead vehicle following remains an open problem. On this thesis, in order to achieve more complex behaviours, weaddress some issues when creating end-to-end driving system through imitationlearning. The first of themis a necessity of an environment for algorithm evaluation and collection of driving demonstrations. On this matter, we participated on the creation of the CARLA simulator, an open source platformbuilt from ground up for autonomous driving validation and prototyping. Since the end-to-end approach is purely reactive, there is also the necessity to provide an interface with a global planning system. With this, we propose the conditional imitation learning that conditions the actions produced into some high level command. Evaluation is also a concern and is commonly performed by comparing the end-to-end network output to some pre-collected driving dataset. We show that this is surprisingly weakly correlated to the actual driving and propose strategies on how to better acquire data and a better comparison strategy. Finally, we confirmwell-known generalization issues(due to dataset bias and overfitting), new ones (due to dynamic objects and thelack of a causal model), and training instability; problems requiring further research before end-to-end driving through imitation can scale to real-world driving."/>
<attvalue for="2" value="19"/>
<attvalue for="degree" value="20"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2727724617870014"/>
<attvalue for="harmonicclosnesscentrality" value="0.2781321184510262"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="1"/>
<attvalue for="pageranks" value="2.6616096533964875E-4"/>
</attvalues>
<viz:size value="49.233322"/>
<viz:position x="3468.8982" y="1193.3807"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="abstract_150" label="abstract_150">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Anticipating the intentions of vulnerable road users (VRUs) such as pedestriansand cyclists can be critical for performing safe and comfortable driving maneuvers. This is the case for human driving and, therefore, should be taken into account by systems providing any level of driving assistance, i.e. from advanced driver assistant systems (ADAS) to fully autonomous vehicles (AVs). In this PhD work, we show how the latest advances on monocular vision-based human pose estimation, i.e. those relying on deep Convolutional Neural Networks (CNNs), enable to recognize the intentions of such VRUs. In the case of cyclists, we assume that they follow the established traffic codes to indicate future left/right turns and stop maneuvers with arm signals. In the case of pedestrians, no indications can be assumed a priori. Instead, we hypothesize that the walking pattern of a pedestrian can allow us to determine if he/she has the intention of crossing the road in the path of the egovehicle, so that the ego-vehicle must maneuver accordingly (e.g. slowing down or stopping). In this PhD work, we show how the same methodology can be used for recognizing pedestrians and cyclists intentions. For pedestrians, we perform experiments on the publicly available Daimler and JAAD datasets. For cyclists, we did not found an analogous dataset, therefore, we created our own one by acquiringand annotating corresponding video-sequences which we aim to share with theresearch community. Overall, the proposed pipeline provides new state-of-the-art results on the intention recognition of VRUs."/>
<attvalue for="2" value="17"/>
<attvalue for="degree" value="18"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2727724617870014"/>
<attvalue for="harmonicclosnesscentrality" value="0.2781321184510262"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="1"/>
<attvalue for="pageranks" value="2.7281245532583627E-4"/>
</attvalues>
<viz:size value="49.54382"/>
<viz:position x="3172.1006" y="1926.7737"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="abstract_151" label="abstract_151">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="In this thesis we explore information Extraction from totally or partially handwritten documents. Basically we are dealing with two different application scenarios. The first scenario are modern highly structured documents like forms. In this kind of documents, the semantic information is encoded in different fields with a pre-defined location in the document, therefore, information extraction becomes roughly equivalent to transcription. The second application scenario are loosely structured totally handwritten documents, besides transcribing them, we need to assign a semantic label, from a set of known values to the handwritten words. In both scenarios, transcription is an important part of the information extraction. For that reason in this thesis we present two methods based on Neural Networks, to transcribe handwritten text.In order to tackle the challenge of loosely structured documents, we have produced a benchmark, consisting of a dataset, a defined set of tasks and a metric, that was presented to the community as an international competition. Also, we propose different models based on Convolutional and Recurrent neural networks that are able to transcribe and assign different semantic labels to each handwritten words, that is, able to perform Information Extraction."/>
<attvalue for="2" value="10"/>
<attvalue for="degree" value="11"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2727724617870014"/>
<attvalue for="harmonicclosnesscentrality" value="0.2781321184510262"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="13"/>
<attvalue for="pageranks" value="2.4285276899852387E-4"/>
</attvalues>
<viz:size value="48.14528"/>
<viz:position x="-149.52074" y="155.2068"/>
<viz:color r="255" g="85" b="132"/>
</node>
<node id="abstract_152" label="abstract_152">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Humansmove their eyes in order to learn visual representations of the world. These eye movements depend on distinct factors, either by the scene that we perceive or by our own decisions. To select what is relevant to attend is part of our survival mechanisms and the way we build reality, as we constantly react both consciously and unconsciously to all the stimuli that is projected into our eyes. In this thesis we try to explain (1) how we move our eyes, (2) how to build machines that understand visual information and deploy eyemovements, and (3) how to make these machines understand tasks in order to decide for eye movements.(1) We provided the analysis of eye movement behavior elicited by low-level feature distinctiveness with a dataset of 230 synthetically-generated image patterns. A total of 15 types of stimuli has been generated (e.g. orientation, brightness, color, size, etc.), with 7 feature contrasts for each feature category. Eye-tracking data was collected from 34 participants during the viewing of the dataset, using Free-Viewing and Visual Search task instructions. Results showed that saliency is predominantly and distinctively influenced by: 1. feature type, 2. feature contrast, 3. Temporality of fixations, 4. task difficulty and 5. center bias. From such dataset (SID4VAM), we have computed a benchmark of saliency models by testing performance using psychophysical patterns. Model performance has been evaluated considering model inspiration and consistency with human psychophysics. Our study reveals that state-of-the-art Deep Learning saliency models do not performwell with synthetic pattern images, instead, modelswith Spectral/Fourier inspiration outperform others in saliency metrics and are more consistent with human psychophysical experimentation.(2) Computations in the primary visual cortex (area V1 or striate cortex) have long been hypothesized to be responsible, among several visual processing mechanisms, of bottom-up visual attention (also named saliency). In order to validate this hypothesis, images from eye tracking datasets have been processed with a biologically plausible model of V1 (named Neurodynamic SaliencyWaveletModel or NSWAM). Following Lis neurodynamic model, we define V1s lateral connections with a network of firing rate neurons, sensitive to visual features such as brightness, color, orientation and scale. Early subcortical processes (i.e. retinal and thalamic) are functionally simulated. The resulting saliency maps are generated from the model output, representing the neuronal activity of V1 projections towards brain areas involved in eye movement control. We want to pinpoint that our unified computational architecture is able to reproduce several visual processes (i.e. brightness, chromatic induction and visual discomfort) without applying any type of training or optimization and keeping the same parametrization. The model has been extended (NSWAM-CM) with an implementation of the cortical magnification function to define the retinotopical projections towards V1, processing neuronal activity for each distinct view during scene observation. Novel computational definitions of top-down inhibition (in terms of inhibition of return and selection mechanisms), are also proposed to predict attention in Free-Viewing and Visual Search conditions. Results show that our model outperforms other biologically-inpired models of saliency prediction as well as to predict visual saccade sequences, specifically for nature and synthetic images. We also show how temporal and spatial characteristics of inhibition of return can improve prediction of saccades, as well as how distinct search strategies (in terms of feature-selective or category-specific inhibition) predict attention at distinct image contexts.(3) Although previous scanpath models have been able to efficiently predict saccades during Free-Viewing, it is well known that stimulus and task instructions can strongly affect eye movement patterns. In particular, task priming has been shown to be crucial to the deployment of eye movements, involving interactions between brain areas related to goal-directed behavior, working and long-termmemory in combination with stimulus-driven eyemovement neuronal correlates. In our latest study we proposed an extension of the Selective Tuning Attentive Reference Fixation ControllerModel based on task demands (STAR-FCT), describing novel computational definitions of Long-TermMemory, Visual Task Executive and Task Working Memory. With these modules we are able to use textual instructions in order to guide the model to attend to specific categories of objects and/or places in the scene. We have designed our memorymodel by processing a visual hierarchy of low- and high-level features. The relationship between the executive task instructions and the memory representations has been specified using a tree of semantic similarities between the learned features and the object category labels. Results reveal that by using this model, the resulting object localizationmaps and predicted saccades have a higher probability to fall inside the salient regions depending on the distinct task instructions compared to saliency."/>
<attvalue for="2" value="41"/>
<attvalue for="degree" value="42"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2727724617870014"/>
<attvalue for="harmonicclosnesscentrality" value="0.2781321184510262"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="3"/>
<attvalue for="pageranks" value="2.2494663706652408E-4"/>
</attvalues>
<viz:size value="47.30941"/>
<viz:position x="5948.482" y="-715.8591"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="abstract_153" label="abstract_153">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="The image sensor, nowadays, is rolling the smartphone industry. While some phone brands explore equipping more image sensors, others, like Google, maintain their smartphones with just one sensor; but this sensor is equipped with Deep Learning to enhance the image quality. However, what all brands agree on is the need to research new image sensors; for instance, in 2015 Omnivision and PixelTeq presented new CMOS based image sensors defined as multispectral Single Sensor Camera (SSC), which are capable of capturing multispectral bands. This dissertation presents the benefits of using a multispectral SSCs that, as aforementioned, simultaneously acquires images in the visible and near-infrared (NIR) bands. The principal benefits while addressing problems related to image bands in the spectral range of 400 to 1100 nanometers, there are cost reductions in the hardware and software setup because only one SSC is needed instead of two, and the images alignment are not required any more. Concerning to the NIR spectrum, many works in literature have proven the benefits of working with NIR to enhance RGB images (e.g., image enhancement, remove shadows, dehazing, etc.). In spite of the advantage of using SSC (e.g., low latency), there are some drawback to be solved. One of this drawback corresponds to the nature of the silicon-based sensor, which in addition to capture the RGB image, when the infrared cut off filter is not installed it also acquires NIR information into the visible image. This phenomenon is called RGB and NIR crosstalking. This thesis firstly faces this problem in challenging images and then it shows the benefit of using multispectral images in the edge detection task.The RGB color restoration from RGBN image is the topic tackled in RGB and NIR crosstalking. Even though in the literature a set of processes have been proposed to face this issue, in this thesis novel approaches, based on DL, are proposed to subtract the additional NIR included in the RGB channel. More precisely, an Artificial Neural Network (NN) and two Convolutional Neural Network (CNN) models are proposed. As the DL based models need a dataset with a large collection of image pairs, a large dataset is collected to address the color restoration. The collected images are from challenging scenes where the sunlight radiation is sufficient to give absorption/reflectance properties to the considered scenes. An extensive evaluation has been conducted on the CNN models, differences from most of the restored images are almost imperceptible to the human eye. The next proposal of the thesis is the validation of the usage of SSC images in the edge detection task. Three methods based on CNN have been proposed. While the first one is based on the most used model, holistically-nested edge detection (HED) termed as multispectral HED (MS-HED), the other two have been proposed observing the drawbacks of MS-HED. These two novel architectures have been designed from scratch (training from scratch); after the first architecture is validated in the visible domain a slight redesign is proposed to tackle the multispectral domain. Again, another dataset is collected to face this problem with SSCs. Even though edge detection is confronted in the multispectral domain, its qualitative and quantitative evaluation demonstrates the generalization in other datasets used for edge detection, improving state-of-the-art results."/>
<attvalue for="2" value="28"/>
<attvalue for="degree" value="29"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2727724617870014"/>
<attvalue for="harmonicclosnesscentrality" value="0.2781321184510262"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="7"/>
<attvalue for="pageranks" value="2.3498226760259485E-4"/>
</attvalues>
<viz:size value="47.777878"/>
<viz:position x="282.30444" y="-9089.3"/>
<viz:color r="255" g="136" b="5"/>
</node>
<node id="abstract_154" label="abstract_154">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Lung cancer is one of the most diagnosed cancers among men and women. Actually,lung cancer accounts for 13% of the total cases with a 5-year global survivalrate in patients. Although Early detection increases survival rate from 38% to 67%, accurate diagnosis remains a challenge. Pathological confirmation requires extracting a sample of the lesion tissue for its biopsy. The preferred procedure for tissue biopsy is called bronchoscopy. A bronchoscopy is an endoscopic technique for the internal exploration of airways which facilitates the performance of minimal invasive interventions with low risk for the patient. Recent advances in bronchoscopic devices have increased their use for minimal invasive diagnostic and intervention procedures, like lung cancer biopsy sampling. Despite the improvement in bronchoscopic device quality, there is a lack of intelligent computational systems for supporting in-vivo clinical decision during examinations. Existing technologies fail to accurately reach the lesion due to several aspects at intervention off-line planning and poor intra-operative guidance at exploration time. Existing guiding systems radiate patients and clinical staff,might be expensive and achieve a suboptimlal 70% of yield boost. Diagnostic yield could be improved reducing radiation and costs by developing intra-operative support systems able to guide the bronchoscopist to the lesion during the intervention. The goal of this PhD thesis is to develop an image-based navigation systemfor intra-operative guidance of bronchoscopists to a target lesion across a path previously planned on a CT-scan. We propose a 3D navigation system which uses the anatomy of video bronchoscopy frames to locate the bronchoscope within the airways. Once the bronchoscope is located, our navigation system is able to indicate the bifurcation which needs to be followed to reach the lesion. In order to facilitate an off-line validationas realistic as possible, we also present a method for augmenting simulated virtual bronchoscopies with the appearance of intra-operative videos. Experiments performed on augmented and intra-operative videos, prove that our algorithm can be speeded up for an on-line implementation in the operating room."/>
<attvalue for="2" value="17"/>
<attvalue for="degree" value="18"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.27270468381165364"/>
<attvalue for="harmonicclosnesscentrality" value="0.2780182232346253"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="7"/>
<attvalue for="pageranks" value="4.557573547843128E-4"/>
</attvalues>
<viz:size value="58.08382"/>
<viz:position x="198.22917" y="1021.195"/>
<viz:color r="255" g="136" b="5"/>
</node>
<node id="abstract_155" label="abstract_155">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="In the current work, we identify several problems of current tracking systems. The lack of large-scale labeled datasets hampers the usage of deep learning, especially end-to-end training, for tracking in TIR images. Therefore, many methods for tracking on TIR data are still based on hand-crafted features. This situation also happens in multi-modal tracking, e.g. RGB-T tracking. Another reason, which hampers the development of RGB-T tracking, is that there exists little research on the fusion mechanisms for combining information from RGB and TIR modalities. One of the crucial components of most trackers is the update module. For the currently existing end-to-end tracking architecture, e.g, Siamese trackers, the online model update is still not taken into consideration at the training stage. They use no-update or a linear update strategy during the inference stage. While such a hand-crafted approach to updating has led to improved results, its simplicity limits the potential gain likely to be obtained by learning to update.To address the data-scarcity for TIR and RGB-T tracking, we use image-to-image translation to generate a large-scale synthetic TIR dataset. This dataset allows us to perform end-to-end training for TIR tracking. Furthermore, we investigate several fusion mechanisms for RGB-T tracking. The multi-modal trackers are also trained in an end-to-end manner on the synthetic data. To improve the standard online update, we pose the updating step as an optimization problem which can be solved by training a neural network. Our approach thereby reduces the hand-crafted components in the tracking pipeline and sets a further step in the direction of a complete end-to-end trained tracking network which also considers updating during optimization."/>
<attvalue for="2" value="19"/>
<attvalue for="degree" value="20"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2727724617870014"/>
<attvalue for="harmonicclosnesscentrality" value="0.2781321184510262"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="2.2388589655756812E-4"/>
</attvalues>
<viz:size value="47.259895"/>
<viz:position x="6188.4697" y="7032.929"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="abstract_156" label="abstract_156">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="One of the fundamental problems of computer vision is to represent images with compact semantically relevant embeddings. These embeddings could then be used in a wide variety of applications, such as image retrieval, object detection, and video search. The main objective of this thesis is to study image embeddings from two aspects: color embeddings and deep embeddings.In the first part of the thesis we start from hand-crafted color embeddings. We propose a method to order the additional color names according to their complementary nature with the basic eleven color names. This allows us to compute color name representations with high discriminative power of arbitrary length. Psychophysical experiments confirm that our proposed method outperforms baseline approaches. Secondly, we learn deep color embeddings from weakly labeled data by adding an attention strategy. The attention branch is able to correctly identify the relevant regions for each class. The advantage of our approach is that it can learn color names for specific domains for which no pixel-wise labels exists.In the second part of the thesis, we focus on deep embeddings. Firstly, we address the problem of compressing large embedding networks into small networks, while maintaining similar performance. We propose to distillate the metrics from a teacher network to a student network. Two new losses are introduced to model the communication of a deep teacher network to a small student network: one based on an absolute teacher, where the student aims to produce the same embeddings as the teacher, and one based on a relative teacher, where the distances between pairs of data points is communicated from the teacher to the student. In addition, various aspects of distillation have been investigated for embeddings, including hint and attention layers, semi-supervised learning and cross quality distillation. Finally, another aspect of deep metric learning, namely lifelong learning, is studied. We observed some drift occurs during training of new tasks for metric learning. A method to estimate the semantic drift based on the drift which is experienced by data of the current task during its training is introduced. Having this estimation, previous tasks can be compensated for this drift, thereby improving their performance. Furthermore, we show that embedding networks suffer significantly less from catastrophic forgetting compared to classification networks when learning new tasks."/>
<attvalue for="2" value="21"/>
<attvalue for="degree" value="22"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2727724617870014"/>
<attvalue for="harmonicclosnesscentrality" value="0.2781321184510262"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="2.2145232705905762E-4"/>
</attvalues>
<viz:size value="47.146294"/>
<viz:position x="5523.7983" y="3523.464"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="abstract_157" label="abstract_157">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Counterfeiting and piracy are a form of theft that has been steadily growing in recent years. A counterfeit is an unauthorized reproduction of an authentic/genuine object. Banknotes and identity documents are two common objects of counterfeiting. The former is used by organized criminal groups to finance a variety of illegal activities or even to destabilize entire countries due the inflation effect. Generally, in order to run their illicit businesses, counterfeiters establish companies and bank accounts using fraudulent identity documents. The illegal activities generated by counterfeit banknotes and identity documents has a damaging effect on business, the economy and the general population. To fight against counterfeiters, governments and authorities around the globe cooperate and develop security features to protect their security documents. Many of the security features in identity documents can also be found in banknotes. In this dissertation we focus our efforts in detecting the counterfeit banknotes and identity documents by analyzing the security features at the background printing. Background areas on secure documents contain fine-line patterns and designs that are difficult to reproduce without the manufacturers cutting-edge printing equipment. Our objective is to find the loose of resolution between the genuine security document and the printed counterfeit version with a publicly available commercial printer. We first present the most complete survey to date in identity and banknote security features. The compared algorithms and systems are based on computer vision and machine learning. Then we advance to present the banknote and identity counterfeit dataset we have built and use along all this thesis. Afterwards, we evaluate and adapt algorithms in the literature for the security background texture analysis. We study this problem from the point of view of robustness, computational efficiency and applicability into a real and non-controlled industrial scenario, proposing key insights to use these algorithms. Next, within the industrial environment of this thesis, we build a complete service oriented architecture to detect counterfeit documents. The mobile application and the server framework intends to be used even by non-expert document examiners to spot counterfeits. Later, we re-frame the problem of background texture counterfeit detection as a full-reference game of spotting the differences, by alternating glimpses between a counterfeit and a genuine background using recurrent neural networks. Finally, we deal with the lack of counterfeit samples, studying different approaches based on anomaly detection."/>
<attvalue for="2" value="21"/>
<attvalue for="degree" value="22"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2727724617870014"/>
<attvalue for="harmonicclosnesscentrality" value="0.2781321184510262"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="2.5805966241210145E-4"/>
</attvalues>
<viz:size value="48.85515"/>
<viz:position x="2113.0068" y="364.79547"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="abstract_158" label="abstract_158">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Deep convolutional neural networks (CNNs) have achieved superior performance in many visual recognition application, such as image classification, detection and segmentation. In this thesis we address two limitations of CNNs. Training deep CNNs requires huge amounts of labeled data, which is expensive and labor intensive to collect. Another limitation is that training CNNs in a continual learning setting is still an open research question. Catastrophic forgetting is very likely when adapting trained models to new environments or new tasks. Therefore, in this thesis, we aim to improve CNNs for applications with limited data and to adapt CNNs continually to new tasks.Self-supervised learning leverages unlabelled data by introducing an auxiliary task for which data is abundantly available. In the first part of the thesis, we show how rankings can be used as a proxy self-supervised task for regression problems. Then we propose an efficient backpropagation technique for Siamese networks which prevents the redundant computation introduced by the multi-branch network architecture. In addition, we show that measuring network uncertainty on the self-supervised proxy task is a good measure of informativeness of unlabeled data. This can be used to drive an algorithm for active learning. We then apply our framework on two regression problems: Image Quality Assessment (IQA) and Crowd Counting. For both, we show how to automatically generate ranked image sets from unlabeled data. Our results show that networks trained to regress to the ground truth targets for labeled data and to simultaneously learn to rank unlabeled data obtain significantly better, state-of-the-art results. We further show that active learning using rankings can reduce labeling effort by up to 50\% for both IQA and crowd counting.In the second part of the thesis, we propose two approaches to avoiding catastrophic forgetting in sequential task learning scenarios. The first approach is derived from Elastic Weight Consolidation, which uses a diagonal Fisher Information Matrix (FIM) to measure the importance of the parameters of the network. However the diagonal assumption is unrealistic. Therefore, we approximately diagonalize the FIM using a set of factorized rotation parameters. This leads to significantly better performance on continual learning of sequential tasks. For the second approach, we show that forgetting manifests differently at different layers in the network and propose a hybrid approach where distillation is used in the feature extractor and replay in the classifier via feature generation. Our method addresses the limitations of generative image replay and probability distillation (i.e. learning without forgetting) and can naturally aggregate new tasks in a single, well-calibrated classifier. Experiments confirm that our proposed approach outperforms the baselines and some start-of-the-art methods."/>
<attvalue for="2" value="26"/>
<attvalue for="degree" value="27"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2727724617870014"/>
<attvalue for="harmonicclosnesscentrality" value="0.2781321184510262"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="2.2555348886232638E-4"/>
</attvalues>
<viz:size value="47.337738"/>
<viz:position x="2289.9102" y="3300.8113"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="abstract_159" label="abstract_159">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Image generation is arguably one of the most attractive, compelling, and challenging tasks in computer vision. Among the methods which perform image generation, generative adversarial networks (GANs) play a key role. The most common image generation models based on GANs can be divided into two main approaches. The first one, called simply image generation takes random noise as an input and synthesizes an image which follows the same distribution as the images in the training set. The second class, which is called image-to-image translation, aims to map an image from a source domain to one that is indistinguishable from those in the target domain. Image-to-image translation methods can further be divided into paired and unpaired image-to-image translation based on whether they require paired data or not. In this thesis, we aim to address some challenges of both image generation and image-to-image generation.GANs highly rely upon having access to vast quantities of data, and fail to generate realistic images from random noise when applied to domains with few images. To address this problem, we aim to transfer knowledge from a model trained on a large dataset (source domain) to the one learned on limited data (target domain). We find that both GANs andconditional GANs can benefit from models trained on large datasets. Our experiments show that transferring the discriminator is more important than the generator. Using both the generator and discriminator results in the best performance. We found, however, that this method suffers from overfitting, since we update all parameters to adapt to the target data. We propose a novel architecture, which is tailored to address knowledge transfer to very small target domains. Our approach effectively exploreswhich part of the latent space is more related to the target domain. Additionally, the proposed method is able to transfer knowledge from multiple pretrained GANs. Although image-to-image translation has achieved outstanding performance, it still facesseveral problems. First, for translation between complex domains (such as translations between different modalities) image-to-image translation methods require paired data. We show that when only some of the pairwise translations have been seen (i.e. during training), we can infer the remaining unseen translations (where training pairs are not available). We propose a new approach where we align multiple encoders and decoders in such a way that the desired translation can be obtained by simply cascadingthe source encoder and the target decoder, even when they have not interacted during the training stage (i.e. unseen). Second, we address the issue of bias in image-to-image translation. Biased datasets unavoidably contain undesired changes, which are dueto the fact that the target dataset has a particular underlying visual distribution. We use carefully designed semantic constraints to reduce the effects of the bias. The semantic constraint aims to enforce the preservation of desired image properties. Finally, current approaches fail to generate diverse outputs or perform scalable image transfer in a single model. To alleviate this problem, we propose a scalable and diverse image-to-image translation. We employ random noise to control the diversity. The scalabitlity is determined by conditioning the domain label.computer vision, deep learning, imitation learning, adversarial generative networks, image generation, image-to-image translation."/>
<attvalue for="2" value="34"/>
<attvalue for="degree" value="35"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2727724617870014"/>
<attvalue for="harmonicclosnesscentrality" value="0.2781321184510262"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="2.1227982191096665E-4"/>
</attvalues>
<viz:size value="46.718113"/>
<viz:position x="6398.792" y="6959.9023"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="abstract_160" label="abstract_160">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Medical image fusion plays an important role in the clinical diagnosis of several critical neurological diseases by merging complementary information available in multimodal images. In this paper, a novel CT-MR neurological image fusion framework is proposed using an optimized biologically inspired feedforward neural model in two-scale hybrid 10 decomposition domain using gray wolf optimization to preserve the structural as well as texture information present in source CT and MR images. Initially, the source images are subjected to two-scale 10 decomposition with optimized parameters, giving a scale-1 detail layer, a scale-2 detail layer and a scale-2 base layer. Two detail layers at scale-1 and 2 are fused using an optimized biologically inspired neural model and weighted average scheme based on local energy and modified spatial frequency to maximize the preservation of edges and local textures, respectively, while the scale-2 base layer gets fused using choose max rule to preserve the background information. To optimize the hyper-parameters of hybrid 10 decomposition and biologically inspired neural model, a fitness function is evaluated based on spatial frequency and edge index of the resultant fused image obtained by adding all the fused components. The fusion performance is analyzed by conducting extensive experiments on different CT-MR neurological images. Experimental results indicate that the proposed method provides better-fused images and outperforms the other state-of-the-art fusion methods in both visual and quantitative assessments."/>
<attvalue for="2" value="8"/>
<attvalue for="degree" value="9"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.27297599801019773"/>
<attvalue for="harmonicclosnesscentrality" value="0.2784738041002289"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="4"/>
<attvalue for="pageranks" value="1.8981695761714334E-4"/>
</attvalues>
<viz:size value="45.669533"/>
<viz:position x="-323.9313" y="9660.453"/>
<viz:color r="0" g="189" b="148"/>
</node>
<node id="abstract_161" label="abstract_161">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="The problem of dealing with missing or incomplete data in machine learning and computer vision arises in many applications. Recent strategies make use of generative models to impute missing or corrupted data. Advances in computer vision using deep generative models have found applications in image/video processing, such as denoising, restoration, super-resolution, or inpainting. Inpainting and Denoising Challenges comprises recent efforts dealing with image and video inpainting tasks. This includes winning solutions to the ChaLearn Looking at People inpainting and denoising challenges: human pose recovery, video de-captioning and fingerprint restoration. This volume starts with a wide review on image denoising, retracing and comparing various methods from the pioneer signal processing methods, to machine learning approaches with sparse and low-rank models, and recent deep learning architectures with autoencoders and variants. The following chapters present results from the Challenge, including three competition tasks at WCCI and ECML 2018. The top best approaches submitted by participants are described, showing interesting contributions and innovating methods. The last two chapters propose novel contributions and highlight new applications that benefit from image/video inpainting."/>
<attvalue for="2" value="10"/>
<attvalue for="degree" value="11"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2730439109341958"/>
<attvalue for="harmonicclosnesscentrality" value="0.27858769931662986"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="1.5702615220233586E-4"/>
</attvalues>
<viz:size value="44.138832"/>
<viz:position x="-13362.676" y="320.9956"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="abstract_162" label="abstract_162">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Perceiving text is crucial to understand semantics of outdoor scenes and hence is a critical requirement to build intelligent systems for driver assistance and self-driving. Most of the existing datasets for text detection and recognition comprise still images and are mostly compiled keeping text in mind. This paper introduces a new RoadText-1K dataset for text in driving videos. The dataset is 20 times larger than the existing largest dataset for text in videos. Our dataset comprises 1000 video clips of driving without any bias towards text and with annotations for text bounding boxes and transcriptions in every frame. State of the art methods for text detection,recognition and tracking are evaluated on the new dataset and the results signify the challenges in unconstrained driving videos compared to existing datasets. This suggests that RoadText-1K is suited for research and development of reading systems, robust enough to be incorporated into more complex downstream tasks like driver assistance and self-driving. The dataset can be found at http://cvit.iiit.ac.in/research/projects/cvit-projects/roadtext-1k"/>
<attvalue for="2" value="11"/>
<attvalue for="degree" value="12"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2730439109341958"/>
<attvalue for="harmonicclosnesscentrality" value="0.27858769931662986"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="15"/>
<attvalue for="pageranks" value="1.5039821621441022E-4"/>
</attvalues>
<viz:size value="43.829433"/>
<viz:position x="-5690.3535" y="8419.388"/>
<viz:color r="76" g="70" b="62"/>
</node>
<node id="abstract_163" label="abstract_163">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Finding a person across a camera network plays an important role in video surveillance. For a real-world person re-identification application, in order to guarantee an optimal time response, it is crucial to find the balance between accuracy and speed. We analyse this trade-off, comparing a classical method, that comprises hand-crafted feature description and metric learning, in particular, LOMO and XQDA, to deep learning based techniques, using image classification networks, ResNet and MobileNets. Additionally, we propose and analyse network distillation as a learning strategy to reduce the computational cost of the deep learning approach at test time. We evaluate both methods on the Market-1501 and DukeMTMC-reID large-scale datasets, showing that distillation helps reducing the computational cost at inference time while even increasing the accuracy performance."/>
<attvalue for="2" value="6"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2733159008840742"/>
<attvalue for="harmonicclosnesscentrality" value="0.2790432801822335"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="1.5887313232865272E-4"/>
</attvalues>
<viz:size value="44.22505"/>
<viz:position x="3790.4258" y="299.06735"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="abstract_164" label="abstract_164">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="In this work we contribute a novel pipeline to automatically generate training data, and to improve over state-of-the-art multi-object tracking and segmentation (MOTS) methods. Our proposed track mining algorithm turns raw street-level videos into high-fidelity MOTS training data, is scalable and overcomes the need of expensive and time-consuming manual annotation approaches. We leverage state-of-the-art instance segmentation results in combination with optical flow predictions, also trained on automatically harvested training data. Our second major contribution is MOTSNet - a deep learning, tracking-by-detection architecture for MOTS - deploying a novel mask-pooling layer for improved object association over time. Training MOTSNet with our automatically extracted data leads to significantly improved sMOTSA scores on the novel KITTI MOTS dataset (+1.9%/+7.5% on cars/pedestrians), and MOTSNet improves by +4.1% over previously best methods on the MOTSChallenge dataset. Our most impressive finding is that we can improve over previous best-performing works, even in complete absence of manually annotated MOTS training data."/>
<attvalue for="2" value="10"/>
<attvalue for="degree" value="11"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2729081188611215"/>
<attvalue for="harmonicclosnesscentrality" value="0.278359908883828"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="1.992381091608182E-4"/>
</attvalues>
<viz:size value="46.109318"/>
<viz:position x="3624.8901" y="-8552.34"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="abstract_165" label="abstract_165">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="BACKGROUND:Content-based image retrieval (CBIR) is an application of machine learning used to retrieve images by similarity on the basis of features. Our objective was to develop a CBIR system that could identify images containing the same polyp ('polyp fingerprint').METHODS:A machine learning technique called Bag of Words was used to describe each endoscopic image containing a polyp in a unique way. The system was tested with 243 white light images belonging to 99 different polyps (for each polyp there were at least two images representing it in two different temporal moments). Images were acquired in routine colonoscopies at Hospital Clnic using high-definition Olympus endoscopes. The method provided for each image the closest match within the dataset.RESULTS:The system matched another image of the same polyp in 221/243 cases (91%). No differences were observed in the number of correct matches according to Paris classification (protruded: 90.7% vs. non-protruded: 91.3%) and size (&lt;10 mm: 91.6% vs.>10 mm: 90%).CONCLUSIONS:A CBIR system can match accurately two images containing the same polyp, which could be a helpful aid for polyp image recognition.KEYWORDS:Artificial intelligence; Colorectal polyps; Content-based image retrieval"/>
<attvalue for="2" value="15"/>
<attvalue for="degree" value="16"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2731798382078407"/>
<attvalue for="harmonicclosnesscentrality" value="0.27881548974943166"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="16"/>
<attvalue for="pageranks" value="1.5718616205002511E-4"/>
</attvalues>
<viz:size value="44.1463"/>
<viz:position x="9136.658" y="-18485.45"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="abstract_166" label="abstract_166">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Computer-aided diagnosis (CAD) is a tool with great potential to help endoscopists in the tasks of detecting and histologically classifying colorectal polyps. In recent years, different technologies have been described and their potential utility has been increasingly evidenced, which has generated great expectations among scientific societies. However, most of these works are retrospective and use images of different quality and characteristics which are analysed off line. This review aims to familiarise gastroenterologists with computational methods and the particularities of endoscopic imaging, which have an impact on image processing analysis. Finally, the publicly available image databases, needed to compare and confirm the results obtained with different methods, are presented."/>
<attvalue for="2" value="6"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2730439109341958"/>
<attvalue for="harmonicclosnesscentrality" value="0.27858769931662986"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="16"/>
<attvalue for="pageranks" value="1.6138403863033617E-4"/>
</attvalues>
<viz:size value="44.342262"/>
<viz:position x="8763.054" y="-18206.01"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="abstract_167" label="abstract_167">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="On-board vision systems may need to increase the number of classes that can be recognized in a relatively short period. For instance, a traffic sign recognition system may suddenly be required to recognize new signs. Since collecting and annotating samples of such new classes may need more time than we wish, especially for uncommon signs, we propose a method to generate these samples by combining synthetic images and Generative Adversarial Network (GAN) technology. In particular, the GAN is trained on synthetic and real-world samples from known classes to perform synthetic-to-real domain adaptation, but applied to synthetic samples of the new classes. Using the Tsinghua dataset with a synthetic counterpart, SYNTHIA-TS, we have run an extensive set of experiments. The results show that the proposed method is indeed effective, provided that we use a proper Convolutional Neural Network (CNN) to perform the traffic sign recognition (classification) task as well as a proper GAN to transform the synthetic images. Here, a ResNet101-based classifier and domain adaptation based on CycleGAN performed extremely well for a ratio 1/4 for new/known classes; even for more challenging ratios such as 4/1, the results are also very positive."/>
<attvalue for="2" value="8"/>
<attvalue for="degree" value="9"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2729081188611215"/>
<attvalue for="harmonicclosnesscentrality" value="0.278359908883828"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="1"/>
<attvalue for="pageranks" value="1.8645289024160644E-4"/>
</attvalues>
<viz:size value="45.512493"/>
<viz:position x="5022.28" y="2601.9722"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="abstract_168" label="abstract_168">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Explainability and interpretability are two critical aspects of decision support systems. Despite their importance, it is only recently that researchers are starting to explore these aspects. This paper provides an introduction to explainability and interpretability in the context of apparent personality recognition. To the best of our knowledge, this is the first effort in this direction. We describe a challenge we organized on explainability in first impressions analysis from video. We analyze in detail the newly introduced data set, evaluation protocol, proposed solutions and summarize the results of the challenge. We investigate the issue of bias in detail. Finally, derived from our study, we outline research opportunities that we foresee will be relevant in this area in the near future."/>
<attvalue for="2" value="9"/>
<attvalue for="degree" value="10"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2733159008840742"/>
<attvalue for="harmonicclosnesscentrality" value="0.2790432801822335"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="1.5044767246409014E-4"/>
</attvalues>
<viz:size value="43.831745"/>
<viz:position x="-14325.861" y="-808.01514"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="abstract_169" label="abstract_169">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Nutrition and social interactions are both key aspects of the daily lives of humans. In this work, we propose a system to evaluate the influence of social interaction in the nutritional habits of a person from a first-person perspective. In order to detect the routine of an individual, we construct a nutritional behaviour pattern discovery model, which outputs routines over a number of days. Our method evaluates similarity of routines with respect to visited food-related scenes over the collected days, making use of Dynamic Time Warping, as well as considering social engagement and its correlation with food-related activities. The nutritional and social descriptors of the collected days are evaluated and encoded using an LSTM Autoencoder. Later, the obtained latent space is clustered to find similar days unaffected by outliers using the Isolation Forest method. Moreover, we introduce a new score metric to evaluate the performance of the proposed algorithm. We validate our method on 104 days and more than 100 k egocentric images gathered by 7 users. Several different visualizations are evaluated for the understanding of the findings. Our results demonstrate good performance and applicability of our proposed model for social-related nutritional behaviour understanding. At the end, relevant applications of the model are discussed by analysing the discovered routine of particular individuals."/>
<attvalue for="2" value="12"/>
<attvalue for="degree" value="13"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2729081188611215"/>
<attvalue for="harmonicclosnesscentrality" value="0.278359908883828"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="4"/>
<attvalue for="pageranks" value="1.880073321940531E-4"/>
</attvalues>
<viz:size value="45.585056"/>
<viz:position x="68.30383" y="10589.438"/>
<viz:color r="0" g="189" b="148"/>
</node>
<node id="abstract_170" label="abstract_170">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Recurrent neural networks (RNN) are popular for many computer vision tasks, including multi-label classification. Since RNNs produce sequential outputs, labels need to be ordered for the multi-label classification task. Current approaches sort labels according to their frequency, typically ordering them in either rare-first or frequent-first. These imposed orderings do not take into account that the natural order to generate the labels can change for each image, e.g.\ first the dominant object before summing up the smaller objects in the image. Therefore, in this paper, we propose ways to dynamically order the ground truth labels with the predicted label sequence. This allows for the faster training of more optimal LSTM models for multi-label classification. Analysis evidences that our method does not suffer from duplicate generation, something which is common for other models. Furthermore, it outperforms other CNN-RNN models, and we show that a standard architecture of an image encoder and language decoder trained with our proposed loss obtains the state-of-the-art results on the challenging MS-COCO, WIDER Attribute and PA-100K and competitive results on NUS-WIDE."/>
<attvalue for="2" value="11"/>
<attvalue for="degree" value="12"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2730439109341958"/>
<attvalue for="harmonicclosnesscentrality" value="0.27858769931662986"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="1.6637555501963742E-4"/>
</attvalues>
<viz:size value="44.575268"/>
<viz:position x="9707.435" y="5909.3105"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="abstract_171" label="abstract_171">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Today, we witness the appearance of many lifelogging cameras that are able to capture the life of a person wearing the camera and which produce a large number of images everyday. Automatically characterizing the experience and extracting patterns of behavior of individuals from this huge collection of unlabeled and unstructured egocentric data present major challenges and require novel and efficient algorithmic solutions. The main goal of this work is to propose a new method to automatically assess day similarity from the lifelogging images of a person. We propose a technique to measure the similarity between images based on the Swains distance and generalize it to detect the similarity between daily visual data. To this purpose, we apply the dynamic time warping (DTW) combined with the Swains distance for final day similarity estimation. For validation, we apply our technique on the Egocentric Dataset of University of Barcelona (EDUB) of 4912 daily images acquired by four persons with preliminary encouraging results."/>
<attvalue for="2" value="7"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2727724617870014"/>
<attvalue for="harmonicclosnesscentrality" value="0.2781321184510262"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="4"/>
<attvalue for="pageranks" value="2.159029674470621E-4"/>
</attvalues>
<viz:size value="46.887245"/>
<viz:position x="265.79987" y="8034.9854"/>
<viz:color r="0" g="189" b="148"/>
</node>
<node id="abstract_172" label="abstract_172">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="One of the main characteristics of agricultural fields is that the appearance of different crops and their growth status, in an aerial image, is varied, and has a wide range of radiometric values and high level of variability. The extraction of these fields and their monitoring are activities that require a high level of human intervention. In this article, we propose a novel automatic algorithm, named deep network energy-minimization (DeepNEM), to extract agricultural fields in aerial images. The model-guided process selects the most relevant image clues extracted by a deep network, completes them and finally generates regions that represent the agricultural fields under a minimization scheme. DeepNEM has been tested over a broad range of fields in terms of size, shape, and content. Different measures were used to compare the DeepNEM with other methods, and to prove that it represents an improved approach to achieve a high-quality segmentation of agricultural fields. Furthermore, this article also presents a new public dataset composed of 1200 images with their parcels boundaries annotations."/>
<attvalue for="2" value="8"/>
<attvalue for="degree" value="9"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2728402734617775"/>
<attvalue for="harmonicclosnesscentrality" value="0.27824601366742713"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="4"/>
<attvalue for="pageranks" value="1.9439227599143196E-4"/>
</attvalues>
<viz:size value="45.88311"/>
<viz:position x="170.56221" y="7254.665"/>
<viz:color r="0" g="189" b="148"/>
</node>
<node id="abstract_173" label="abstract_173">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Hand sign language recognition from video is a challenging research area in computer vision, which performance is affected by hand occlusion, fast hand movement, illumination changes, or background complexity, just to mention a few. In recent years, deep learning approaches have achieved state-of-the-art results in the field, though previous challenges are not completely solved. In this work, we propose a novel deep learning-based pipeline architecture for efficient automatic hand sign language recognition using Single Shot Detector (SSD), 2D Convolutional Neural Network (2DCNN), 3D Convolutional Neural Network (3DCNN), and Long Short-Term Memory (LSTM) from RGB input videos. We use a CNN-based model which estimates the 3D hand keypoints from 2D input frames. After that, we connect these estimated keypoints to build the hand skeleton by using midpoint algorithm. In order to obtain a more discriminative representation of hands, we project 3D hand skeleton into three views surface images. We further employ the heatmap image of detected keypoints as input for refinement in a stacked fashion. We apply 3DCNNs on the stacked features of hand, including pixel level, multi-view hand skeleton, and heatmap features, to extract discriminant local spatio-temporal features from these stacked inputs. The outputs of the 3DCNNs are fused and fed to a LSTM to model long-term dynamics of hand sign gestures. Analyzing 2DCNN vs. 3DCNN using different number of stacked inputs into the network, we demonstrate that 3DCNN better capture spatio-temporal dynamics of hands. To the best of our knowledge, this is the first time that this multi-modal and multi-view set of hand skeleton features are applied for hand sign language recognition. Furthermore, we present a new large-scale hand sign language dataset, namely RKS-PERSIANSIGN, including 10000 RGB videos of 100 Persian sign words. Evaluation results of the proposed model on three datasets, NYU, First-Person, and RKS-PERSIANSIGN, indicate that our model outperforms state-of-the-art models in hand sign language recognition, hand pose estimation, and hand action recognition."/>
<attvalue for="2" value="15"/>
<attvalue for="degree" value="16"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2736566512903628"/>
<attvalue for="harmonicclosnesscentrality" value="0.27961275626423804"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="7"/>
<attvalue for="pageranks" value="1.4811921711976517E-4"/>
</attvalues>
<viz:size value="43.72305"/>
<viz:position x="-7854.446" y="-8327.907"/>
<viz:color r="255" g="136" b="5"/>
</node>
<node id="abstract_174" label="abstract_174">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Face anti-spoofing is essential to prevent face recognition systems from a security breach. Much of the progresses have been made by the availability of face anti-spoofing benchmark datasets in recent years. However, existing face anti-spoofing benchmarks have limited number of subjects (170) and modalities (2), which hinder the further development of the academic community. To facilitate face anti-spoofing research, we introduce a large-scale multi-modal dataset, namely CASIA-SURF, which is the largest publicly available dataset for face anti-spoofing in terms of both subjects and modalities. Specifically, it consists of 1,000 subjects with 21,000 videos and each sample has 3 modalities ( i.e. , RGB, Depth and IR). We also provide comprehensive evaluation metrics, diverse evaluation protocols, training/validation/testing subsets and a measurement tool, developing a new benchmark for face anti-spoofing. Moreover, we present a novel multi-modal multi-scale fusion method as a strong baseline, which performs feature re-weighting to select the more informative channel features while suppressing the less useful ones for each modality across different scales. Extensive experiments have been conducted on the proposed dataset to verify its significance and generalization capability. The dataset is available at https://sites.google.com/qq.com/face-anti-spoofing/welcome/challengecvpr2019?authuser=0"/>
<attvalue for="2" value="14"/>
<attvalue for="degree" value="15"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2731798382078407"/>
<attvalue for="harmonicclosnesscentrality" value="0.27881548974943166"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="1.4820732262586436E-4"/>
</attvalues>
<viz:size value="43.72716"/>
<viz:position x="-17751.37" y="1434.311"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="abstract_175" label="abstract_175">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="First impressions strongly influence social interactions, having a high impact in the personal and professional life. In this paper, we present a deep Classification-Regression Network (CR-Net) for analyzing the Big Five personality problem and further assisting on job interview recommendation in a first impressions setup. The setup is based on the ChaLearn First Impressions dataset, including multimodal data with video, audio, and text converted from the corresponding audio data, where each person is talking in front of a camera. In order to give a comprehensive prediction, we analyze the videos from both the entire scene (including the persons motions and background) and the face of the person. Our CR-Net first performs personality trait classification and applies a regression later, which can obtain accurate predictions for both personality traits and interview recommendation. Furthermore, we present a new loss function called Bell Loss to address inaccurate predictions caused by the regression-to-the-mean problem. Extensive experiments on the First Impressions dataset show the effectiveness of our proposed network, outperforming the state-of-the-art."/>
<attvalue for="2" value="8"/>
<attvalue for="degree" value="9"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.27297599801019773"/>
<attvalue for="harmonicclosnesscentrality" value="0.2784738041002289"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="1.580402143065761E-4"/>
</attvalues>
<viz:size value="44.18617"/>
<viz:position x="-16786.912" y="303.94507"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="abstract_176" label="abstract_176">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Social media, as a major platform for communication and information exchange, is a rich repository of the opinions and sentiments of 2.3 billion users about a vast spectrum of topics. In this sense, user text interactions are widely used to sense the whys of certain social users demands and cultural- driven interests. However, the knowledge embedded in the 1.8 billion pictures which are uploaded daily in public profiles has just started to be exploited. Following this trend on visual-based social analysis, we present a novel methodology based on neural networks to build a combined image-and-text based personality trait model, trained with images posted together with words found highly correlated to specific personality traits. So, the key contribution in this work is to explore whether OCEAN personality trait modeling can be addressed based on images, here called MindPics, appearing with certain tags with psychological insights. We found that there is a correlation between posted images and the personality estimated from their accompanying texts. Thus, the experimental results are consistent with previous cyber-psychology results based on texts, suggesting that images could also be used for personality estimation: classification results on some personality traits show that specific and characteristic visual patterns emerge, in essence representing abstract concepts. These results open new avenues of research for further refining the proposed personality model under the supervision of psychology experts, and to further substitute current textual personality questionnaires by image-based ones."/>
<attvalue for="2" value="11"/>
<attvalue for="degree" value="12"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.27297599801019773"/>
<attvalue for="harmonicclosnesscentrality" value="0.2784738041002289"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="5"/>
<attvalue for="pageranks" value="1.6029178034873755E-4"/>
</attvalues>
<viz:size value="44.291275"/>
<viz:position x="-3295.0762" y="-3551.2126"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="abstract_177" label="abstract_177">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="In handball videos recorded during the training, multiple players are present in the scene at the same time. Although they all might move and interact, not all players contribute to the currently relevant exercise nor practice the given handball techniques. The goal of this experiment is to automatically determine players on training footage that perform given handball techniques and are therefore considered active. It is a very challenging task for which a precise object detector is needed that can handle cluttered scenes with poor illumination, with many players present in different sizes and distances from the camera, partially occluded, moving fast. To determine which of the detected players are active, additional information is needed about the level of player activity. Since many handball actions are characterized by considerable changes in speed, position, and variations in the player's appearance, we propose using spatio-temporal interest points (STIPs) and optical flow (OF). Therefore, we propose an active player detection method combining the YOLO object detector and two activity measures based on STIPs and OF. The performance of the proposed method and activity measures are evaluated on a custom handball video dataset acquired during handball training lessons."/>
<attvalue for="2" value="9"/>
<attvalue for="degree" value="10"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2727724617870014"/>
<attvalue for="harmonicclosnesscentrality" value="0.2781321184510262"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="5"/>
<attvalue for="pageranks" value="2.0862195271822878E-4"/>
</attvalues>
<viz:size value="46.54736"/>
<viz:position x="-2350.7297" y="-2091.1497"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="abstract_178" label="abstract_178">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="It is commonly known the natural tendency of artificial neural networks to completely and abruptly forget previously known information when learning new information. We explore this behaviour in the context of Face Verification on the recently proposed Disguised Faces in the Wild dataset (DFW). We empirically evaluate several commonly used DCNN architectures on Face Recognition and distill some insights about the effect of sequential learning on distinct identities from different datasets, showing that the catastrophic forgetness phenomenon is present even in feature embeddings fine-tuned on different tasks from the original domain."/>
<attvalue for="2" value="4"/>
<attvalue for="degree" value="5"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.27338398306140244"/>
<attvalue for="harmonicclosnesscentrality" value="0.2791571753986344"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="5"/>
<attvalue for="pageranks" value="1.4702381550528437E-4"/>
</attvalues>
<viz:size value="43.671917"/>
<viz:position x="-3612.142" y="-4957.112"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="abstract_179" label="abstract_179">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Fine-grained image recognition is central to many multimedia tasks such as search, retrieval, and captioning. Unfortunately, these tasks are still challenging since the appearance of samples of the same class can be more different than those from different classes. This issue is mainly due to changes in deformation, pose, and the presence of clutter. In the literature, attention has been one of the most successful strategies to handle the aforementioned problems. Attention has been typically implemented in neural networks by selecting the most informative regions of the image that improve classification. In contrast, in this paper, attention is not applied at the image level but to the convolutional feature activations. In essence, with our approach, the neural model learns to attend to lower-level feature activations without requiring part annotations and uses those activations to update and rectify the output likelihood distribution. The proposed mechanism is modular, architecture-independent, and efficient in terms of both parameters and computation required. Experiments demonstrate that well-known networks such as wide residual networks and ResNeXt, when augmented with our approach, systematically improve their classification accuracy and become more robust to changes in deformation and pose and to the presence of clutter. As a result, our proposal reaches state-of-the-art classification accuracies in CIFAR-10, the Adience gender recognition task, Stanford Dogs, and UEC-Food100 while obtaining competitive performance in ImageNet, CIFAR-100, CUB200 Birds, and Stanford Cars. In addition, we analyze the different components of our model, showing that the proposed attention modules succeed in finding the most discriminative regions of the image. Finally, as a proof of concept, we demonstrate that with only local predictions, an augmented neural network can successfully classify an image before reaching any fully connected layer, thus reducing the computational amount up to 10%."/>
<attvalue for="2" value="13"/>
<attvalue for="degree" value="14"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.27311185765833024"/>
<attvalue for="harmonicclosnesscentrality" value="0.2787015945330307"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="5"/>
<attvalue for="pageranks" value="1.5583504686172498E-4"/>
</attvalues>
<viz:size value="44.08323"/>
<viz:position x="-3497.5146" y="-3782.4792"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="abstract_180" label="abstract_180">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Humans are capable of learning new tasks without forgetting previous ones, while neural networks fail due to catastrophic forgetting between new and previously-learned tasks. We consider a class-incremental setting which means that the task-ID is unknown at inference time. The imbalance between old and new classes typically results in a bias of the network towards the newest ones. This imbalance problem can either be addressed by storing exemplars from previous tasks, or by using image replay methods. However, the latter can only be applied to toy datasets since image generation for complex datasets is a hard problem.We propose a solution to the imbalance problem based on generative feature replay which does not require any exemplars. To do this, we split the network into two parts: a feature extractor and a classifier. To prevent forgetting, we combine generative feature replay in the classifier with feature distillation in the feature extractor. Through feature generation, our method reduces the complexity of generative replay and prevents the imbalance problem. Our approach is computationally efficient and scalable to large datasets. Experiments confirm that our approach achieves state-of-the-art results on CIFAR-100 and ImageNet, while requiring only a fraction of the storage needed for exemplar-based continual learning"/>
<attvalue for="2" value="11"/>
<attvalue for="degree" value="12"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.27324785260799206"/>
<attvalue for="harmonicclosnesscentrality" value="0.2789293849658326"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="1.5504495458160768E-4"/>
</attvalues>
<viz:size value="44.04635"/>
<viz:position x="9022.911" y="7994.1665"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="abstract_181" label="abstract_181">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="People from different parts of the globe describe objects and concepts in distinct manners. Visual appearance can thus vary across different geographic locations, which makes location a relevant contextual information when analysing visual data. In this work, we address the task of image retrieval related to a given tag conditioned on a certain location on Earth. We present LocSens, a model that learns to rank triplets of images, tags and coordinates by plausibility, and two training strategies to balance the location influence in the final ranking. LocSens learns to fuse textual and location information of multimodal queries to retrieve related images at different levels of location granularity, and successfully utilizes location information to improve image tagging."/>
<attvalue for="2" value="6"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.27297599801019773"/>
<attvalue for="harmonicclosnesscentrality" value="0.2784738041002289"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="15"/>
<attvalue for="pageranks" value="1.5857754462995634E-4"/>
</attvalues>
<viz:size value="44.21125"/>
<viz:position x="-4812.9673" y="5653.9263"/>
<viz:color r="76" g="70" b="62"/>
</node>
<node id="abstract_182" label="abstract_182">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="One of the attractive characteristics of deep neural networks is their ability to transfer knowledge obtained in one domain to other related domains. As a result, high-quality networks can be trained in domains with relatively little training data. This property has been extensively studied for discriminative networks but has received significantly less attention for generative models. Given the often enormous effort required to train GANs, both computationally as well as in the dataset collection, the re-use of pretrained GANs is a desirable objective. We propose a novel knowledge transfer method for generative models based on mining the knowledge that is most beneficial to a specific target domain, either from a single or multiple pretrained GANs. This is done using a miner network that identifies which part of the generative distribution of each pretrained GAN outputs samples closest to the target domain. Mining effectively steers GAN sampling towards suitable regions of the latent space, which facilitates the posterior finetuning and avoids pathologies of other methods such as mode collapse and lack of flexibility. We perform experiments on several complex datasets using various GAN architectures (BigGAN, Progressive GAN) and show that the proposed method, called MineGAN, effectively transfers knowledge to domains with few target images, outperforming existing methods. In addition, MineGAN can successfully transfer knowledge from multiple pretrained GANs."/>
<attvalue for="2" value="10"/>
<attvalue for="degree" value="11"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.27311185765833024"/>
<attvalue for="harmonicclosnesscentrality" value="0.2787015945330307"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="1.5566897203874483E-4"/>
</attvalues>
<viz:size value="44.075478"/>
<viz:position x="8840.895" y="8794.49"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="abstract_183" label="abstract_183">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Class-incremental learning of deep networks sequentially increases the number of classes to be classified. During training, the network has only access to data of one task at a time, where each task contains several classes. In this setting, networks suffer from catastrophic forgetting which refers to the drastic drop in performance on previous tasks. The vast majority of methods have studied this scenario for classification networks, where for each new task the classification layer of the network must be augmented with additional weights to make room for the newly added classes. Embedding networks have the advantage that new classes can be naturally included into the network without adding new weights. Therefore, we study incremental learning for embedding networks. In addition, we propose a new method to estimate the drift, called semantic drift, of features and compensate for it without the need of any exemplars. We approximate the drift of previous tasks based on the drift that is experienced by current task data. We perform experiments on fine-grained datasets, CIFAR100 and ImageNet-Subset. We demonstrate that embedding networks suffer significantly less from catastrophic forgetting. We outperform existing methods which do not require exemplars and obtain competitive results compared to methods which store exemplars. Furthermore, we show that our proposed SDC when combined with existing methods to prevent forgetting consistently improves results."/>
<attvalue for="2" value="13"/>
<attvalue for="degree" value="14"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2731798382078407"/>
<attvalue for="harmonicclosnesscentrality" value="0.27881548974943166"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="1.5472005408437306E-4"/>
</attvalues>
<viz:size value="44.03118"/>
<viz:position x="9203.989" y="6760.566"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="abstract_184" label="abstract_184">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="In recent years, convolutional neural networks (CNNs) have achieved impressive performance for various visual recognition scenarios. CNNs trained on large labeled datasets can not only obtain significant performance on most challenging benchmarks but also provide powerful representations, which can be used to a wide range of other tasks. However, the requirement of massive amounts of data to train deep neural networks is a major drawback of these models, as the data available is usually limited or imbalanced. Fine-tuning (FT) is an effective way to transfer knowledge learned in a source dataset to a target task. In this paper, we introduce and systematically investigate several factors that influence the performance of fine-tuning for visual recognition. These factors include parameters for the retraining procedure (e.g., the initial learning rate of fine-tuning), the distribution of the source and target data (e.g., the number of categories in the source dataset, the distance between the source and target datasets) and so on. We quantitatively and qualitatively analyze these factors, evaluate their influence, and present many empirical observations. The results reveal insights into what fine-tuning changes CNN parameters and provide useful and evidence-backed intuitions about how to implement fine-tuning for computer vision tasks."/>
<attvalue for="2" value="13"/>
<attvalue for="degree" value="14"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2728402734617775"/>
<attvalue for="harmonicclosnesscentrality" value="0.27824601366742713"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="0"/>
<attvalue for="pageranks" value="1.8908496159140156E-4"/>
</attvalues>
<viz:size value="45.63536"/>
<viz:position x="8399.699" y="9678.564"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="abstract_185" label="abstract_185">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="This paper addresses the problem of inferring unseen cross-modal image-to-image translations between multiple modalities. We assume that only some of the pairwise translations have been seen (i.e. trained) and infer the remaining unseen translations (where training pairs are not available). We propose mix and match networks, an approach where multiple encoders and decoders are aligned in such a way that the desired translation can be obtained by simply cascading the source encoder and the target decoder, even when they have not interacted during the training stage (i.e. unseen). The main challenge lies in the alignment of the latent representations at the bottlenecks of encoder-decoder pairs. We propose an architecture with several tools to encourage alignment, including autoencoders and robust side information and latent consistency losses. We show the benefits of our approach in terms of effectiveness and scalability compared with other pairwise image-to-image translation approaches. We also propose zero-pair cross-modal image translation, a challenging setting where the objective is inferring semantic segmentation from depth (and vice-versa) without explicit segmentation-depth pairs, and only from two (disjoint) segmentation-RGB and depth-RGB training sets. We observe that a certain part of the shared information between unseen modalities might not be reachable, so we further propose a variant that leverages pseudo-pairs which allows us to exploit this shared information between the unseen modalities"/>
<attvalue for="2" value="12"/>
<attvalue for="degree" value="13"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2729081188611215"/>
<attvalue for="harmonicclosnesscentrality" value="0.278359908883828"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="1.679113157128257E-4"/>
</attvalues>
<viz:size value="44.646957"/>
<viz:position x="8406.021" y="8330.382"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="abstract_186" label="abstract_186">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="We present the results of recent challenges in Automated Computer Vision (AutoCV, renamed here for clarity AutoCV1 and AutoCV2, 2019), which are part of a series of challenge on Automated Deep Learning (AutoDL). These two competitions aim at searching for fully automated solutions for classification tasks in computer vision, with an emphasis on any-time performance. The first competition was limited to image classification while the second one included both images and videos. Our design imposed to the participants to submit their code on a challenge platform for blind testing on five datasets, both for training and testing, without any human intervention whatsoever. Winning solutions adopted deep learning techniques based on already published architectures, such as AutoAugment, MobileNet and ResNet, to reach state-of-the-art performance in the time budget of the challenge (only 20 minutes of GPU time). The novel contributions include strategies to deliver good preliminary results at any time during the learning process, such that a method can be stopped early and still deliver good performance. This feature is key for the adoption of such techniques by data analysts desiring to obtain rapidly preliminary results on large datasets and to speed up the development process. The soundness of our design was verified in several aspects: (1) Little overfitting of the on-line leaderboard providing feedback on 5 development datasets was observed, compared to the final blind testing on the 5 (separate) final test datasets, suggesting that winning solutions might generalize to other computer vision classification tasks; (2) Error bars on the winners performance allow us to say with confident that they performed significantly better than the baseline solutions we provided; (3) The ranking of participants according to the any-time metric we designed, namely the Area under the Learning Curve, was different from that of the fixed-time metric, i.e. AUC at the end of the fixed time budget. We released all winning solutions under open-source licenses. At the end of the AutoDL challenge series, all data of the challenge will be made publicly available, thus providing a collection of uniformly formatted datasets, which can serve to conduct further research, particularly on meta-learning."/>
<attvalue for="2" value="13"/>
<attvalue for="degree" value="14"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.27358843325439364"/>
<attvalue for="harmonicclosnesscentrality" value="0.2794988610478371"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="9"/>
<attvalue for="pageranks" value="1.38270972199639E-4"/>
</attvalues>
<viz:size value="43.26333"/>
<viz:position x="-15821.892" y="-3218.3298"/>
<viz:color r="115" g="192" b="0"/>
</node>
<node id="abstract_187" label="abstract_187">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Despite the latest transcription accuracies reached using deep neural network architectures, handwritten text recognition still remains a challenging problem, mainly because of the large inter-writer style variability. Both augmenting the training set with artificial samples using synthetic fonts, and writer adaptation techniques have been proposed to yield more generic approaches aimed at dodging style unevenness. In this work, we take a step closer to learn style independent features from handwritten word images. We propose a novel method that is able to disentangle the content and style aspects of input images by jointly optimizing a generative process and a handwrittenword recognizer. The generator is aimed at transferring writing style features from one sample to another in an image-to-image translation approach, thus leading to a learned content-centric features that shall be independent to writing style attributes.Our proposed recognition model is able then to leverage such writer-agnostic features to reach better recognition performances. We advance over prior training strategies and demonstrate with qualitative and quantitative evaluations the performance of boththe generative process and the recognition efficiency in the IAM dataset."/>
<attvalue for="2" value="8"/>
<attvalue for="degree" value="9"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2730439109341958"/>
<attvalue for="harmonicclosnesscentrality" value="0.27858769931662986"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="6"/>
<attvalue for="pageranks" value="1.5393567255707178E-4"/>
</attvalues>
<viz:size value="43.994568"/>
<viz:position x="986.5398" y="-733.4204"/>
<viz:color r="0" g="196" b="255"/>
</node>
<node id="abstract_188" label="abstract_188">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Although current image generation methods have reached impressive quality levels, they are still unable to produce plausible yet diverse images of handwritten words. On the contrary, when writing by hand, a great variability is observed across different writers, and even when analyzing words scribbled by the same individual, involuntary variations are conspicuous. In this work, we take a step closer to producing realistic and varied artificially rendered handwritten words. We propose a novel method that is able to produce credible handwritten word images by conditioning the generative process with both calligraphic style features and textual content. Our generator is guided by three complementary learning objectives: to produce realistic images, to imitate a certain handwriting style and to convey a specific textual content. Our model is unconstrained to any predefined vocabulary, being able to render whatever input word. Given a sample writer, it is also able to mimic its calligraphic features in a few-shot setup. We significantly advance over prior art and demonstrate with qualitative, quantitative and human-based evaluations the realistic aspect of our synthetically produced images."/>
<attvalue for="2" value="9"/>
<attvalue for="degree" value="10"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.27311185765833024"/>
<attvalue for="harmonicclosnesscentrality" value="0.2787015945330307"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="14"/>
<attvalue for="pageranks" value="1.5105796082724246E-4"/>
</attvalues>
<viz:size value="43.860233"/>
<viz:position x="1467.8857" y="-572.2706"/>
<viz:color r="223" g="137" b="255"/>
</node>
<node id="abstract_189" label="abstract_189">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="Current CNN-based stereo depth estimation models can barely run under real-time constraints on embedded graphic processing unit (GPU) devices. Moreover, state-of-the-art evaluations usually do not consider model optimization techniques, being that it is unknown what is the current potential on embedded GPU devices. In this work, we evaluate two state-of-the-art models on three different embedded GPU devices, with and without optimization methods, presenting performance results that illustrate the actual capabilities of embedded GPU devices for stereo depth estimation. More importantly, based on our evaluation, we propose the use of a U-Net like architecture for postprocessing the cost-volume, instead of a typical sequence of 3D convolutions, drastically augmenting the runtime speed of current models. In our experiments, we achieve real-time inference speed, in the range of 532 ms, for 1216 368 input stereo images on the Jetson TX2, Jetson Xavier, and Jetson Nano embedded devices."/>
<attvalue for="2" value="6"/>
<attvalue for="degree" value="7"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.2730439109341958"/>
<attvalue for="harmonicclosnesscentrality" value="0.27858769931662986"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="5"/>
<attvalue for="pageranks" value="1.6016925622777664E-4"/>
</attvalues>
<viz:size value="44.285553"/>
<viz:position x="-50.39932" y="-5503.5093"/>
<viz:color r="192" g="192" b="192"/>
</node>
<node id="abstract_190" label="abstract_190">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="This paper presents the design and implementation details of a system build-up by using off-the-shelf algorithms for urban video analytics. The system allows the connection topublic video surveillance camera networks to obtain the necessary information to generate statistics from urban scenarios (e.g., amount of vehicles, type of cars, direction, numbers of persons, etc.). The obtained information could be used not only for traffic management but also to estimate the carbon footprint of urban scenarios. As a case study, a university campus is selected to evaluate the performance of the proposed system. The system is implemented in a modular way so that it is being used as a testbed to evaluate different algorithms. Implementation results are provided showing the validity and utility of the proposed approach."/>
<attvalue for="2" value="10"/>
<attvalue for="degree" value="11"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.27324785260799206"/>
<attvalue for="harmonicclosnesscentrality" value="0.2789293849658326"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="13"/>
<attvalue for="pageranks" value="1.5252808547574413E-4"/>
</attvalues>
<viz:size value="43.92886"/>
<viz:position x="-2649.109" y="-12777.321"/>
<viz:color r="255" g="85" b="132"/>
</node>
<node id="abstract_191" label="abstract_191">
<attvalues>
<attvalue for="0" value="abstract"/>
<attvalue for="1" value="This paper presents a full pipeline to classify sample sets of corn kernels. The proposed approach follows a segmentation-classification scheme. The image segmentation is performed through a well known deep learningbased approach, the Mask R-CNN architecture, while the classification is performed hrough a novel-lightweight network specially designed for this taskgood corn kernel, defective corn kernel and impurity categories are considered. As a second contribution, a carefully annotated multitouching corn kernel dataset has been generated. This dataset has been used for training the segmentation and the classification modules. Quantitative evaluations have beenperformed and comparisons with other approaches are provided showing improvements with the proposed pipeline."/>
<attvalue for="2" value="7"/>
<attvalue for="degree" value="8"/>
<attvalue for="eccentricity" value="4.0"/>
<attvalue for="closnesscentrality" value="0.27297599801019773"/>
<attvalue for="harmonicclosnesscentrality" value="0.2784738041002289"/>
<attvalue for="betweenesscentrality" value="0.0"/>
<attvalue for="modularity_class" value="7"/>
<attvalue for="pageranks" value="1.678206721534514E-4"/>
</attvalues>
<viz:size value="44.642727"/>
<viz:position x="-2793.
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment