@ARTICLE{10.3389/frobt.2020.00109, AUTHOR={Su, Ke and Su, Hang and Li, Jianguo and Zhu, Jun}, TITLE={Toward Accurate Visual Reasoning With Dual-Path Neural Module Networks}, JOURNAL={Frontiers in Robotics and AI}, VOLUME={7}, YEAR={2020}, URL={https://www.frontiersin.org/articles/10.3389/frobt.2020.00109}, DOI={10.3389/frobt.2020.00109}, ISSN={2296-9144}, ABSTRACT={Visual reasoning is a critical stage in visual question answering (Antol et al., 2015), but most of the state-of-the-art methods categorized the VQA tasks as a classification problem without taking the reasoning process into account. Various approaches are proposed to solve this multi-modal task that requires both abilities of comprehension and reasoning. The recently proposed neural module network (Andreas et al., 2016b), which assembles the model with a few primitive modules, is capable of performing a spatial or arithmetical reasoning over the input image to answer the questions. Nevertheless, its performance is not satisfying especially in the real-world datasets (e.g., VQA 1.0& 2.0) due to its limited primitive modules and suboptimal layout. To address these issues, we propose a novel method of Dual-Path Neural Module Network which can implement complex visual reasoning by forming a more flexible layout regularized by the pairwise loss. Specifically, we first use the region proposal network to generate both visual and spatial information, which helps it perform spatial reasoning. Then, we advocate to process a pair of different images along with the same question simultaneously, named as a “complementary pair,” which encourages the model to learn a more reasonable layout by suppressing the overfitting to the language priors. The model can jointly learn the parameters in the primitive module and the layout generation policy, which is further boosted by introducing a novel pairwise reward. Extensive experiments show that our approach significantly improves the performance of neural module networks especially on the real-world datasets.} }