Aggregation of probabilisitic logically related judgments (bibtex)
@article{NIK2018,
	Abstract = {The need to make sure autonomous systems behave ethically is increasing with these systems becoming part of our society. Although there is no consensus to which actions an autonomous system should always be ethically obliged, preventing harm to people is an intuitive first candidate for a principle of behavior. Do not hurt a human or allow a human to be hurt by your inaction is Asimov's First Law of robotics. We consider the challenges that the implementation of this Law will incur. To unearth these challenges we constructed a simulation of a First Robot Law abiding agent and an accident prone Human. We used a classic two-dimensional grid environment and explored to which extent an agent can be programmed, using standard artificial intelligence methods, to prevent a human from making dangerous actions. We outline the drawbacks of using the Asimov's First Law of robotics as an underlying ethical theory the governs an autonomous system's behaviour.},
	Author = {Ivanovska. M. and	 Slavkovik, M.},
	Issn = {1892-0721},
	Journal = {Norsk Informatikkonferanse},
	Title = {Aggregation of probabilisitic logically related judgments},
	Url = {https://ojs.bibsys.no/index.php/NIK/article/view/396},
	Year = {2018},
	Bdsk-Url-1 = {https://ojs.bibsys.no/index.php/NIK/article/view/396}}
Powered by bibtexbrowser