<html xmlns:v="urn:schemas-microsoft-com:vml" xmlns:o="urn:schemas-microsoft-com:office:office" xmlns:w="urn:schemas-microsoft-com:office:word" xmlns:m="http://schemas.microsoft.com/office/2004/12/omml" xmlns="http://www.w3.org/TR/REC-html40"><head><meta http-equiv=Content-Type content="text/html; charset=utf-8"><meta name=Generator content="Microsoft Word 15 (filtered medium)"><!--[if !mso]><style>v\:* {behavior:url(#default#VML);}
o\:* {behavior:url(#default#VML);}
w\:* {behavior:url(#default#VML);}
.shape {behavior:url(#default#VML);}
</style><![endif]--><style><!--
/* Font Definitions */
@font-face
        {font-family:"Cambria Math";
        panose-1:2 4 5 3 5 4 6 3 2 4;}
@font-face
        {font-family:Calibri;
        panose-1:2 15 5 2 2 2 4 3 2 4;}
@font-face
        {font-family:"Calibri Light";
        panose-1:2 15 3 2 2 2 4 3 2 4;}
@font-face
        {font-family:Roboto;}
/* Style Definitions */
p.MsoNormal, li.MsoNormal, div.MsoNormal
        {margin:0in;
        font-size:11.0pt;
        font-family:"Calibri",sans-serif;}
h4
        {mso-style-priority:9;
        mso-style-link:"Επικεφαλίδα 4 Char";
        margin-top:14.0pt;
        margin-right:0in;
        margin-bottom:4.0pt;
        margin-left:0in;
        line-height:115%;
        page-break-after:avoid;
        font-size:12.0pt;
        font-family:"Arial",sans-serif;
        color:#666666;
        mso-fareast-language:ZH-CN;
        font-weight:normal;}
a:link, span.MsoHyperlink
        {mso-style-priority:99;
        color:blue;
        text-decoration:underline;}
span.4Char
        {mso-style-name:"Επικεφαλίδα 4 Char";
        mso-style-priority:9;
        mso-style-link:"Επικεφαλίδα 4";
        font-family:"Calibri Light",sans-serif;
        color:#2F5496;
        font-style:italic;}
.MsoChpDefault
        {mso-style-type:export-only;
        font-size:10.0pt;}
@page WordSection1
        {size:8.5in 11.0in;
        margin:1.0in 1.25in 1.0in 1.25in;}
div.WordSection1
        {page:WordSection1;}
/* List Definitions */
@list l0
        {mso-list-id:619065925;
        mso-list-template-ids:1674757832;}
@list l0:level1
        {mso-level-number-format:bullet;
        mso-level-text:●;
        mso-level-tab-stop:none;
        mso-level-number-position:left;
        text-indent:-.25in;
        mso-text-animation:none;
        text-decoration:none;
        text-underline:none;
        text-decoration:none;
        text-line-through:none;}
@list l0:level2
        {mso-level-number-format:bullet;
        mso-level-text:○;
        mso-level-tab-stop:none;
        mso-level-number-position:left;
        text-indent:-.25in;
        mso-text-animation:none;
        text-decoration:none;
        text-underline:none;
        text-decoration:none;
        text-line-through:none;}
@list l0:level3
        {mso-level-number-format:bullet;
        mso-level-text:■;
        mso-level-tab-stop:none;
        mso-level-number-position:left;
        text-indent:-.25in;
        mso-text-animation:none;
        text-decoration:none;
        text-underline:none;
        text-decoration:none;
        text-line-through:none;}
@list l0:level4
        {mso-level-number-format:bullet;
        mso-level-text:●;
        mso-level-tab-stop:none;
        mso-level-number-position:left;
        text-indent:-.25in;
        mso-text-animation:none;
        text-decoration:none;
        text-underline:none;
        text-decoration:none;
        text-line-through:none;}
@list l0:level5
        {mso-level-number-format:bullet;
        mso-level-text:○;
        mso-level-tab-stop:none;
        mso-level-number-position:left;
        text-indent:-.25in;
        mso-text-animation:none;
        text-decoration:none;
        text-underline:none;
        text-decoration:none;
        text-line-through:none;}
@list l0:level6
        {mso-level-number-format:bullet;
        mso-level-text:■;
        mso-level-tab-stop:none;
        mso-level-number-position:left;
        text-indent:-.25in;
        mso-text-animation:none;
        text-decoration:none;
        text-underline:none;
        text-decoration:none;
        text-line-through:none;}
@list l0:level7
        {mso-level-number-format:bullet;
        mso-level-text:●;
        mso-level-tab-stop:none;
        mso-level-number-position:left;
        text-indent:-.25in;
        mso-text-animation:none;
        text-decoration:none;
        text-underline:none;
        text-decoration:none;
        text-line-through:none;}
@list l0:level8
        {mso-level-number-format:bullet;
        mso-level-text:○;
        mso-level-tab-stop:none;
        mso-level-number-position:left;
        text-indent:-.25in;
        mso-text-animation:none;
        text-decoration:none;
        text-underline:none;
        text-decoration:none;
        text-line-through:none;}
@list l0:level9
        {mso-level-number-format:bullet;
        mso-level-text:■;
        mso-level-tab-stop:none;
        mso-level-number-position:left;
        text-indent:-.25in;
        mso-text-animation:none;
        text-decoration:none;
        text-underline:none;
        text-decoration:none;
        text-line-through:none;}
@list l1
        {mso-list-id:1294629649;
        mso-list-template-ids:557997824;}
@list l1:level1
        {mso-level-number-format:bullet;
        mso-level-text:;
        mso-level-tab-stop:.5in;
        mso-level-number-position:left;
        text-indent:-.25in;
        mso-ansi-font-size:10.0pt;
        font-family:Symbol;}
@list l1:level2
        {mso-level-number-format:bullet;
        mso-level-text:;
        mso-level-tab-stop:1.0in;
        mso-level-number-position:left;
        text-indent:-.25in;
        mso-ansi-font-size:10.0pt;
        font-family:Symbol;}
@list l1:level3
        {mso-level-number-format:bullet;
        mso-level-text:;
        mso-level-tab-stop:1.5in;
        mso-level-number-position:left;
        text-indent:-.25in;
        mso-ansi-font-size:10.0pt;
        font-family:Symbol;}
@list l1:level4
        {mso-level-number-format:bullet;
        mso-level-text:;
        mso-level-tab-stop:2.0in;
        mso-level-number-position:left;
        text-indent:-.25in;
        mso-ansi-font-size:10.0pt;
        font-family:Symbol;}
@list l1:level5
        {mso-level-number-format:bullet;
        mso-level-text:;
        mso-level-tab-stop:2.5in;
        mso-level-number-position:left;
        text-indent:-.25in;
        mso-ansi-font-size:10.0pt;
        font-family:Symbol;}
@list l1:level6
        {mso-level-number-format:bullet;
        mso-level-text:;
        mso-level-tab-stop:3.0in;
        mso-level-number-position:left;
        text-indent:-.25in;
        mso-ansi-font-size:10.0pt;
        font-family:Symbol;}
@list l1:level7
        {mso-level-number-format:bullet;
        mso-level-text:;
        mso-level-tab-stop:3.5in;
        mso-level-number-position:left;
        text-indent:-.25in;
        mso-ansi-font-size:10.0pt;
        font-family:Symbol;}
@list l1:level8
        {mso-level-number-format:bullet;
        mso-level-text:;
        mso-level-tab-stop:4.0in;
        mso-level-number-position:left;
        text-indent:-.25in;
        mso-ansi-font-size:10.0pt;
        font-family:Symbol;}
@list l1:level9
        {mso-level-number-format:bullet;
        mso-level-text:;
        mso-level-tab-stop:4.5in;
        mso-level-number-position:left;
        text-indent:-.25in;
        mso-ansi-font-size:10.0pt;
        font-family:Symbol;}
@list l2
        {mso-list-id:1617953237;
        mso-list-template-ids:-457161634;}
@list l2:level1
        {mso-level-number-format:bullet;
        mso-level-text:●;
        mso-level-tab-stop:none;
        mso-level-number-position:left;
        text-indent:-.25in;
        mso-text-animation:none;
        text-decoration:none;
        text-underline:none;
        text-decoration:none;
        text-line-through:none;}
@list l2:level2
        {mso-level-number-format:bullet;
        mso-level-text:○;
        mso-level-tab-stop:none;
        mso-level-number-position:left;
        text-indent:-.25in;
        mso-text-animation:none;
        text-decoration:none;
        text-underline:none;
        text-decoration:none;
        text-line-through:none;}
@list l2:level3
        {mso-level-number-format:bullet;
        mso-level-text:■;
        mso-level-tab-stop:none;
        mso-level-number-position:left;
        text-indent:-.25in;
        mso-text-animation:none;
        text-decoration:none;
        text-underline:none;
        text-decoration:none;
        text-line-through:none;}
@list l2:level4
        {mso-level-number-format:bullet;
        mso-level-text:●;
        mso-level-tab-stop:none;
        mso-level-number-position:left;
        text-indent:-.25in;
        mso-text-animation:none;
        text-decoration:none;
        text-underline:none;
        text-decoration:none;
        text-line-through:none;}
@list l2:level5
        {mso-level-number-format:bullet;
        mso-level-text:○;
        mso-level-tab-stop:none;
        mso-level-number-position:left;
        text-indent:-.25in;
        mso-text-animation:none;
        text-decoration:none;
        text-underline:none;
        text-decoration:none;
        text-line-through:none;}
@list l2:level6
        {mso-level-number-format:bullet;
        mso-level-text:■;
        mso-level-tab-stop:none;
        mso-level-number-position:left;
        text-indent:-.25in;
        mso-text-animation:none;
        text-decoration:none;
        text-underline:none;
        text-decoration:none;
        text-line-through:none;}
@list l2:level7
        {mso-level-number-format:bullet;
        mso-level-text:●;
        mso-level-tab-stop:none;
        mso-level-number-position:left;
        text-indent:-.25in;
        mso-text-animation:none;
        text-decoration:none;
        text-underline:none;
        text-decoration:none;
        text-line-through:none;}
@list l2:level8
        {mso-level-number-format:bullet;
        mso-level-text:○;
        mso-level-tab-stop:none;
        mso-level-number-position:left;
        text-indent:-.25in;
        mso-text-animation:none;
        text-decoration:none;
        text-underline:none;
        text-decoration:none;
        text-line-through:none;}
@list l2:level9
        {mso-level-number-format:bullet;
        mso-level-text:■;
        mso-level-tab-stop:none;
        mso-level-number-position:left;
        text-indent:-.25in;
        mso-text-animation:none;
        text-decoration:none;
        text-underline:none;
        text-decoration:none;
        text-line-through:none;}
ol
        {margin-bottom:0in;}
ul
        {margin-bottom:0in;}
--></style><!--[if gte mso 9]><xml>
<o:shapedefaults v:ext="edit" spidmax="1026" />
</xml><![endif]--><!--[if gte mso 9]><xml>
<o:shapelayout v:ext="edit">
<o:idmap v:ext="edit" data="1" />
</o:shapelayout></xml><![endif]--></head><body lang=EL link=blue vlink=purple style='word-wrap:break-word'><div class=WordSection1><p class=MsoNormal><o:p> </o:p></p><p class=MsoNormal align=center style='text-align:center'><b><span lang=EN-US style='font-size:12.0pt;font-family:Roboto;color:#505050;background:white;mso-highlight:white'>Call for Papers 1st Autonomous Vehicle Vision (AVVision’21) Workshop<o:p></o:p></span></b></p><p class=MsoNormal align=center style='text-align:center'><b><span lang=EN-US style='font-size:10.0pt;font-family:Roboto;color:#505050;background:white;mso-highlight:white'>In conjunction with WACV 2021<o:p></o:p></span></b></p><p class=MsoNormal><span lang=EN-US style='font-size:10.0pt;font-family:Roboto;color:#505050;background:white;mso-highlight:white'><o:p> </o:p></span></p><p class=MsoNormal><span lang=EN-US style='font-size:10.0pt;font-family:Roboto;color:#505050;background:white;mso-highlight:white'><o:p> </o:p></span></p><p class=MsoNormal><span lang=EN-US style='font-size:10.0pt;font-family:Roboto;color:#505050;background:white;mso-highlight:white'>The Autonomous Vehicle Vision 2021 (AVVision’21) workshop (webpage: avvision.xyz) aims to bring together industry professionals and academics to brainstorm and exchange ideas on the advancement of visual environment perception for autonomous driving. In this one-day workshop, we will have regular paper presentations and invited speakers to present the state of the art as well as the challenges in autonomous driving. Furthemore, we have prepared several large-scale, synthetic and real-world datasets, which have been annotated by the Hong Kong University of Science and Technology (HKUST), UDI, CalmCar, ATG Robotics, etc. Based on these datasets, three challenges will be hosted to understand the current status of computer vision and machine/deep learning algorithms in solving the visual environment perception problems for autonomous driving: 1) CalmCar MTMC Challenge, 2) HKUST-UDI UDA Challenge, and 3) KITTI Object Detection Challenge.<o:p></o:p></span></p><p class=MsoNormal><span lang=EN-US style='font-size:10.0pt;font-family:Roboto;color:#505050;background:white;mso-highlight:white'><o:p> </o:p></span></p><p class=MsoNormal><b><span lang=EN-US style='font-size:10.0pt;font-family:Roboto;color:#505050;background:white;mso-highlight:white'>Keynote Speakers:<o:p></o:p></span></b></p><p class=MsoNormal><b><span lang=EN-US style='font-size:10.0pt;font-family:Roboto;color:#505050;background:white;mso-highlight:white'><o:p> </o:p></span></b></p><h4 style='mso-margin-top-alt:0in;margin-right:0in;margin-bottom:0in;margin-left:.5in;text-align:justify;text-indent:-.25in;page-break-after:auto;mso-list:l0 level1 lfo2'><a name="_hk8ropa1wdh0"></a><![if !supportLists]><span lang=EN style='font-size:10.0pt;line-height:115%;font-family:Roboto;color:#505050;background:white;mso-highlight:white'><span style='mso-list:Ignore'>●<span style='font:7.0pt "Times New Roman"'>      </span></span></span><![endif]><span lang=EN style='font-size:10.0pt;line-height:115%;font-family:Roboto;color:#505050;background:white;mso-highlight:white'>Andreas Geiger, University of Tübingen<o:p></o:p></span></h4><h4 style='mso-margin-top-alt:0in;margin-right:0in;margin-bottom:0in;margin-left:.5in;text-align:justify;text-indent:-.25in;page-break-after:auto;mso-list:l0 level1 lfo2'><a name="_xhlahxbhw52t"></a><![if !supportLists]><span lang=EN style='font-size:10.0pt;line-height:115%;font-family:Roboto;color:#505050;background:white;mso-highlight:white'><span style='mso-list:Ignore'>●<span style='font:7.0pt "Times New Roman"'>      </span></span></span><![endif]><span lang=EN style='font-size:10.0pt;line-height:115%;font-family:Roboto;color:#505050;background:white;mso-highlight:white'>Ioannis Pitas, Aristotle University of Thessaloniki<o:p></o:p></span></h4><h4 style='mso-margin-top-alt:0in;margin-right:0in;margin-bottom:0in;margin-left:.5in;text-align:justify;text-indent:-.25in;page-break-after:auto;mso-list:l0 level1 lfo2'><a name="_6rjz521tx44r"></a><![if !supportLists]><span lang=EN style='font-size:10.0pt;line-height:115%;font-family:Roboto;color:#505050;background:white;mso-highlight:white'><span style='mso-list:Ignore'>●<span style='font:7.0pt "Times New Roman"'>      </span></span></span><![endif]><span lang=EN style='font-size:10.0pt;line-height:115%;font-family:Roboto;color:#505050;background:white;mso-highlight:white'>Nemanja Djuric, Uber ATG<o:p></o:p></span></h4><h4 style='mso-margin-top-alt:0in;margin-right:0in;margin-bottom:0in;margin-left:.5in;text-align:justify;text-indent:-.25in;page-break-after:auto;mso-list:l0 level1 lfo2'><a name="_rph4c733b7sc"></a><![if !supportLists]><span lang=EN style='font-size:10.0pt;line-height:115%;font-family:Roboto;color:#505050;background:white;mso-highlight:white'><span style='mso-list:Ignore'>●<span style='font:7.0pt "Times New Roman"'>      </span></span></span><![endif]><span lang=EN style='font-size:10.0pt;line-height:115%;font-family:Roboto;color:#505050;background:white;mso-highlight:white'>Walterio Mayol-Cuevas, University of Bristol & Amazon<o:p></o:p></span></h4><p class=MsoNormal><span lang=EN-US style='font-size:10.0pt;font-family:Roboto;color:#505050;background:white;mso-highlight:white'><o:p> </o:p></span></p><p class=MsoNormal><b><span lang=EN-US style='font-size:10.0pt;font-family:Roboto;color:#505050;background:white;mso-highlight:white'>Call for Papers:<o:p></o:p></span></b></p><p class=MsoNormal><span lang=EN-US style='font-size:10.0pt;font-family:Roboto;color:#505050;background:white;mso-highlight:white'>With a number of breakthroughs in autonomous system technology over the past decade, the race to commercialize self-driving cars has become fiercer than ever. The integration of advanced sensing, computer vision, signal/image processing, and machine/deep learning into autonomous vehicles enables them to perceive the environment intelligently and navigate safely. Autonomous driving is required to ensure safe, reliable, and efficient automated mobility in complex uncontrolled real-world environments. Various applications range from automated transportation and farming to public safety and environment exploration. Visual perception is a critical component of autonomous driving. Enabling technologies include: a) affordable sensors that can acquire useful data under varying environmental conditions, b) reliable simultaneous localization and mapping, c) machine learning that can effectively handle varying real-world conditions and unforeseen events, as well as “machine-learning friendly” signal processing to enable more effective classification and decision making, d) hardware and software co-design for efficient real-time performance, e) resilient and robust platforms that can withstand adversarial attacks and failures, and f) end-to-end system integration of sensing, computer vision, signal/image processing and machine/deep learning. The AVVision'21 workshop will cover all these topics. Research papers are solicited in, but not limited to, the following topics:<o:p></o:p></span></p><ul style='margin-top:0in' type=disc><li class=MsoNormal style='color:#505050;margin-right:.25in;mso-list:l2 level1 lfo5'><span lang=EN-US style='font-size:10.0pt;font-family:Roboto;background:white;mso-highlight:white'>3D road/environment reconstruction and understanding;<o:p></o:p></span></li><li class=MsoNormal style='color:#505050;margin-right:.25in;mso-list:l2 level1 lfo5'><span lang=EN-US style='font-size:10.0pt;font-family:Roboto;background:white;mso-highlight:white'>Mapping and localization for autonomous cars;<o:p></o:p></span></li><li class=MsoNormal style='color:#505050;margin-right:.25in;mso-list:l2 level1 lfo5'><span lang=EN-US style='font-size:10.0pt;font-family:Roboto;background:white;mso-highlight:white'>Semantic/instance driving scene segmentation and semantic mapping;<o:p></o:p></span></li><li class=MsoNormal style='color:#505050;margin-right:.25in;mso-list:l2 level1 lfo5'><span lang=EN-US style='font-size:10.0pt;font-family:Roboto;background:white;mso-highlight:white'>Self-supervised/unsupervised visual environment perception;<o:p></o:p></span></li><li class=MsoNormal style='color:#505050;margin-right:.25in;mso-list:l2 level1 lfo5'><span lang=EN-US style='font-size:10.0pt;font-family:Roboto;background:white;mso-highlight:white'>Car/pedestrian/object/obstacle detection/tracking and 3D localization;<o:p></o:p></span></li><li class=MsoNormal style='color:#505050;margin-right:.25in;mso-list:l2 level1 lfo5'><span lang=EN-US style='font-size:10.0pt;font-family:Roboto;background:white;mso-highlight:white'>Car/license plate/road sign detection and recognition;<o:p></o:p></span></li><li class=MsoNormal style='color:#505050;margin-right:.25in;mso-list:l2 level1 lfo5'><span lang=EN-US style='font-size:10.0pt;font-family:Roboto;background:white;mso-highlight:white'>Driver status monitoring and human-car interfaces;<o:p></o:p></span></li><li class=MsoNormal style='color:#505050;margin-right:.25in;mso-list:l2 level1 lfo5'><span lang=EN-US style='font-size:10.0pt;font-family:Roboto;background:white;mso-highlight:white'>Deep/machine learning and image analysis for car perception;<o:p></o:p></span></li><li class=MsoNormal style='color:#505050;margin-right:.25in;mso-list:l2 level1 lfo5'><span lang=EN-US style='font-size:10.0pt;font-family:Roboto;background:white;mso-highlight:white'>Adversarial domain adaptation for autonomous driving;<o:p></o:p></span></li><li class=MsoNormal style='color:#505050;margin-right:.25in;mso-list:l2 level1 lfo5'><span lang=EN-US style='font-size:10.0pt;font-family:Roboto;background:white;mso-highlight:white'>On-board embedded visual perception systems;<o:p></o:p></span></li><li class=MsoNormal style='color:#505050;margin-right:.25in;line-height:115%;mso-list:l2 level1 lfo5'><span lang=EN-US style='font-size:10.0pt;line-height:115%;font-family:Roboto;background:white;mso-highlight:white'>Bio-inspired vision sensing for car perception;<o:p></o:p></span></li><li class=MsoNormal style='color:#505050;margin-right:.25in;line-height:115%;mso-list:l2 level1 lfo5'><span lang=EN-US style='font-size:10.0pt;line-height:115%;font-family:Roboto;background:white;mso-highlight:white'>Real-time deep learning inference.<o:p></o:p></span></li></ul><p class=MsoNormal><span lang=EN-US style='font-size:10.0pt;font-family:Roboto;color:#505050;background:white;mso-highlight:white'><o:p> </o:p></span></p><p class=MsoNormal><b><span lang=EN-US style='font-size:10.0pt;font-family:Roboto;color:#505050;background:white;mso-highlight:white'>Author Guidelines:<o:p></o:p></span></b></p><p class=MsoNormal><b><span lang=EN-US style='font-size:10.0pt;font-family:Roboto;color:#505050;background:white;mso-highlight:white'><o:p> </o:p></span></b></p><p class=MsoNormal><span lang=EN-US style='font-size:10.0pt;font-family:Roboto;color:#505050;background:white;mso-highlight:white'>Authors are encouraged to submit high-quality, original (i.e. not been previously published or accepted for publication in substantially similar form in any peer-reviewed venue including journal, conference or workshop) research.<o:p></o:p></span></p><p class=MsoNormal><span lang=EN-US style='font-size:10.0pt;font-family:Roboto;color:#505050;background:white;mso-highlight:white'><o:p> </o:p></span></p><p class=MsoNormal><span lang=EN-US style='font-size:10.0pt;font-family:Roboto;color:#505050;background:white;mso-highlight:white'>The paper template is identical to the WACV2020 main conference. The author toolkit (latex only) is available both on </span><span lang=EN-US><a href="https://www.overleaf.com/latex/templates/wacv-2021-author-kit-template/ndrtfkktpxjx"><span style='font-size:10.0pt;font-family:Roboto;color:#505050;background:white;mso-highlight:white;text-decoration:none'>Overleaf</span></a></span><span lang=EN-US style='font-size:10.0pt;font-family:Roboto;color:#505050;background:white;mso-highlight:white'> and in </span><span lang=EN-US><a href="https://github.com/wacv2021/WACV-2021-Author-Kit"><span style='font-size:10.0pt;font-family:Roboto;color:#505050;background:white;mso-highlight:white;text-decoration:none'>Github</span></a></span><span lang=EN-US style='font-size:10.0pt;font-family:Roboto;color:#505050;background:white;mso-highlight:white'>. The submissions are handled through the CMT submission website: </span><span lang=EN-US><a href="https://cmt3.research.microsoft.com/AVV2021/"><span style='font-size:10.0pt;font-family:Roboto;color:#505050;background:white;mso-highlight:white;text-decoration:none'>https://cmt3.research.microsoft.com/AVV2021/</span></a></span><span lang=EN-US style='font-size:10.0pt;font-family:Roboto;color:#505050;background:white;mso-highlight:white'>.<o:p></o:p></span></p><p class=MsoNormal><span lang=EN-US style='font-size:10.0pt;font-family:Roboto;color:#505050;background:white;mso-highlight:white'><o:p> </o:p></span></p><p class=MsoNormal><span lang=EN-US style='font-size:10.0pt;font-family:Roboto;color:#505050;background:white;mso-highlight:white'>Papers presented at the WACV workshops will be published as part of the "WACV Workshops Proceedings" and should, therefore, follow the same presentation guideliness as the main conference. Workshop papers will be included in IEEE Xplore, but will be indexed separatelly from the </span><span lang=EN-US><a href="http://wacv2021.thecvf.com/submission"><span style='font-size:10.0pt;font-family:Roboto;color:#505050;background:white;mso-highlight:white;text-decoration:none'>main conference</span></a></span><span lang=EN-US style='font-size:10.0pt;font-family:Roboto;color:#505050;background:white;mso-highlight:white'> papers.<o:p></o:p></span></p><p class=MsoNormal><span lang=EN-US style='font-size:10.0pt;font-family:Roboto;color:#505050;background:white;mso-highlight:white'><o:p> </o:p></span></p><p class=MsoNormal><span lang=EN-US style='font-size:10.0pt;font-family:Roboto;color:#505050;background:white;mso-highlight:white'>For questions/remarks regarding the submission e-mail: <a href="mailto:avv.workshop@gmail.com">avv.workshop@gmail.com</a>.<o:p></o:p></span></p><p class=MsoNormal><b><span lang=EN-US style='font-size:10.0pt;font-family:Roboto;color:#505050;background:white;mso-highlight:white'><o:p> </o:p></span></b></p><p class=MsoNormal><b><span lang=EN-US style='font-size:10.0pt;font-family:Roboto;color:#505050;background:white;mso-highlight:white'>Challenges:<o:p></o:p></span></b></p><p class=MsoNormal><b><span lang=EN-US style='font-size:10.0pt;font-family:Roboto;color:#505050;background:white;mso-highlight:white'><o:p> </o:p></span></b></p><p class=MsoNormal><b><span lang=EN-US style='font-size:10.0pt;font-family:Roboto;color:#505050;background:white;mso-highlight:white'>Challenge 1: CalmCar MTMC Challenge</span></b><b><span lang=EN-US style='font-size:24.0pt;font-family:"Times New Roman",serif;color:#0F0640;background:white;mso-highlight:white'><o:p></o:p></span></b></p><p class=MsoNormal><span lang=EN-US style='font-size:10.0pt;font-family:Roboto;color:#505050;background:white;mso-highlight:white'>Multi-target multi-camera (MTMC) tracking systems can automatically track multiple vehicles using an array of cameras. In this challenge, participants are required to design robust MTMC algorithms, which are targeted at vehicles, where the same vehicles captured by different cameras possess the same tracking IDs. The competitors will have access to four large-scale training datasets, each of which includes around 1200 annotated RGB images, where the labels cover the types of vehicles, tracking IDs and 2D bounding boxes. Identification precision (IDP) and identification recall (IDR) will be used as metrics to evaluate the performance of the implemented algorithms. The competitors are required to submit their pretrained models as well as the corresponding docker image files via the </span><span lang=EN-US><a href="https://cmt3.research.microsoft.com/AVV2021/"><span style='font-size:10.0pt;font-family:Roboto;color:#505050;background:white;mso-highlight:white;text-decoration:none'>CMT submission system</span></a></span><span lang=EN-US style='font-size:10.0pt;font-family:Roboto;color:#505050;background:white;mso-highlight:white'> for algorithm evaluation (in terms of both speed and accuracy). <b>The winner of the competition will receive a monetary prize (US$5000) and will give a keynote presentation at the workshop.<o:p></o:p></b></span></p><p class=MsoNormal><b><span lang=EN-US style='font-size:10.0pt;font-family:Roboto;color:#505050;background:white;mso-highlight:white'><o:p> </o:p></span></b></p><p class=MsoNormal><b><span lang=EN-US style='font-size:10.0pt;font-family:Roboto;color:#505050;background:white;mso-highlight:white'>Challenge 2: HKUST-UDI UDA Challenge</span></b><b><span lang=EN-US style='font-size:24.0pt;font-family:"Times New Roman",serif;color:#0F0640;background:white;mso-highlight:white'><o:p></o:p></span></b></p><p class=MsoNormal><span lang=EN-US style='font-size:10.0pt;font-family:Roboto;color:#505050;background:white;mso-highlight:white'>Deep neural networks excel at learning from large amounts of data but they can be inefficient when it comes to generalizing and applying learned knowledge to new datasets or environments. In this competition, participants need to develop an unsupervised domain adaptation (UDA) framework which can allow a model trained on a large synthetic dataset to generalize to real-world imagery. The tasks in this competition include: 1) UDA for monocular depth prediction and 2) UDA for semantic driving-scene segmentation. The competitors will have access to Ready to Drive (R2D) dataset, which is a large-scale synthetic driving scene dataset collected under different weather/illumination conditions using the Carla Simulator. In addition, competitors will also have access to a small amount of real-world data. The mean absolute value of the relative (mAbsRel) error and the mean intersection over union (mIoU) score will be used as metrics to evaluate the performance of UDA for monocular depth prediction and UDA for semantic driving scene segmentation, respectively. The competitors will be required to submit their pretrained models and docker image files via the </span><span lang=EN-US><a href="https://cmt3.research.microsoft.com/AVV2021/"><span style='font-size:10.0pt;font-family:Roboto;color:#505050;background:white;mso-highlight:white;text-decoration:none'>CMT submission system</span></a></span><span lang=EN-US style='font-size:10.0pt;font-family:Roboto;color:#505050;background:white;mso-highlight:white'>.<b><o:p></o:p></b></span></p><p class=MsoNormal><b><span lang=EN-US style='font-size:10.0pt;font-family:Roboto;color:#505050;background:white;mso-highlight:white'><o:p> </o:p></span></b></p><p class=MsoNormal><b><span lang=EN-US style='font-size:10.0pt;font-family:Roboto;color:#505050;background:white;mso-highlight:white'>Challenge 3: KITTI Object Detection Challenge</span></b><b><span lang=EN-US style='font-size:24.0pt;font-family:"Times New Roman",serif;color:#0F0640;background:white;mso-highlight:white'><o:p></o:p></span></b></p><p class=MsoNormal><span lang=EN-US style='font-size:10.0pt;font-family:Roboto;color:#505050;background:white;mso-highlight:white'>Researchers of top-ranked object detection algorithms submitted to the </span><span lang=EN-US><a href="http://www.cvlibs.net/datasets/kitti/eval_3dobject.php"><span style='font-size:10.0pt;font-family:Roboto;color:#505050;background:white;mso-highlight:white;text-decoration:none'>KITTI Object Detection Benchmarks</span></a></span><span lang=EN-US style='font-size:10.0pt;font-family:Roboto;color:#505050;background:white;mso-highlight:white'> will have the opportunity to present their work at AVVision'21, subject to space availability and approval by the workshop organizers. It should be noted that only the algorithms submitted before 12/20/2020 are eligible for presentation at AVVision'21.<b><o:p></o:p></b></span></p><p class=MsoNormal><b><span lang=EN-US style='font-size:10.0pt;font-family:Roboto;color:#505050;background:white;mso-highlight:white'><o:p> </o:p></span></b></p><p class=MsoNormal><b><span lang=EN-US style='font-size:10.0pt;font-family:Roboto;color:#505050;background:white;mso-highlight:white'>Important Dates:<o:p></o:p></span></b></p><p class=MsoNormal><span lang=EN-US style='font-size:10.0pt;font-family:Roboto;color:#505050;background:white;mso-highlight:white'>Full Paper Submission: 11/02/2020<o:p></o:p></span></p><p class=MsoNormal><span lang=EN-US style='font-size:10.0pt;font-family:Roboto;color:#505050;background:white;mso-highlight:white'>Notification of Acceptance: 11/23/2020<o:p></o:p></span></p><p class=MsoNormal><span lang=EN-US style='font-size:10.0pt;font-family:Roboto;color:#505050;background:white;mso-highlight:white'>Camera-Ready Paper Due: 11/30/2020<o:p></o:p></span></p><p class=MsoNormal><span lang=EN-US style='font-size:10.0pt;font-family:Roboto;color:#505050;background:white;mso-highlight:white'><o:p> </o:p></span></p><p class=MsoNormal><span lang=EN-US style='font-size:10.0pt;font-family:Roboto;color:#505050;background:white;mso-highlight:white'>HKUST-UDI UDA Challenge abstract and code submission: 12/13/2020<o:p></o:p></span></p><p class=MsoNormal><span lang=EN-US style='font-size:10.0pt;font-family:Roboto;color:#505050;background:white;mso-highlight:white'>Notification of HKUST-UDI UDA Challenge results: 12/20/2020<o:p></o:p></span></p><p class=MsoNormal><span lang=EN-US style='font-size:10.0pt;font-family:Roboto;color:#505050;background:white;mso-highlight:white'><o:p> </o:p></span></p><p class=MsoNormal><span lang=EN-US style='font-size:10.0pt;font-family:Roboto;color:#505050;background:white;mso-highlight:white'>CalmCar MTMC Challenge abstract and code submission: 12/13/2020<o:p></o:p></span></p><p class=MsoNormal><span lang=EN-US style='font-size:10.0pt;font-family:Roboto;color:#505050;background:white;mso-highlight:white'>Notification of CalmCar MTMC Challenge results: 12/20/2020<o:p></o:p></span></p><p class=MsoNormal><span lang=EN-US><o:p> </o:p></span></p><div id=DAB4FAD8-2DD7-40BB-A1B8-4E2AA1F9FDF2><p class=MsoNormal><span lang=EN-US><o:p> </o:p></span></p><table class=MsoNormalTable border=1 cellpadding=0 style='border:none;border-top:solid #D3D4DE 1.0pt'><tr><td width=55 style='width:41.25pt;border:none;padding:9.75pt .75pt .75pt .75pt'><p class=MsoNormal><a href="https://www.avast.com/sig-email?utm_medium=email&utm_source=link&utm_campaign=sig-email&utm_content=emailclient&utm_term=icon" target="_blank"><span style='text-decoration:none'><img border=0 width=46 height=29 style='width:.4791in;height:.302in' id="_x0000_i1025" src="https://ipmcdn.avast.com/images/icons/icon-envelope-tick-round-orange-animated-no-repeat-v1.gif"></span></a><o:p></o:p></p></td><td width=470 style='width:352.5pt;border:none;padding:9.0pt .75pt .75pt .75pt'><p class=MsoNormal style='line-height:13.5pt'><span style='font-size:10.0pt;font-family:"Arial",sans-serif;color:#41424E'>Virus-free. <a href="https://www.avast.com/sig-email?utm_medium=email&utm_source=link&utm_campaign=sig-email&utm_content=emailclient&utm_term=link" target="_blank"><span style='color:#4453EA'>www.avast.com</span></a> <o:p></o:p></span></p></td></tr></table><p class=MsoNormal><o:p> </o:p></p></div></div></body></html>