Implementing Asimov's First Law of Robotics (bibtex)
@article{NIK,
	Abstract = {The need to make sure autonomous systems behave ethically is increasing with these systems becoming part of our society. Although there is no consensus to which actions an autonomous system should always be ethically obliged, preventing harm to people is an intuitive first candidate for a principle of behaviour. Do not hurt a human or allow a human to be hurt by your inaction is Asimov's First Law of robotics. We consider the challenges that the implementation of this Law will incur. To unearth these challenges we constructed a simulation of a First Robot Law abiding agent and an accident prone Human. We used a classic two-dimensional grid environment and explored to which extent an agent can be programmed, using standard artificial intelligence methods, to prevent a human from making dangerous actions. We outline the drawbacks of using the Asimov's First Law of robotics as an underlying ethical theory the governs an autonomous system's behaviour.},
	Author = {Mateo Alvarez and
{\O}yvind Berge and 
Audun Berget and
Eirin Bj{\o}rknes and
Dag V. K. Johnsen and
Fredrik Madsen and
Marija Slavkovik},
	Issn = {1892-0721},
	Journal = {Norsk Informatikkonferanse},
	Title = {Implementing {Asimov's} First Law of Robotics},
	Url = {https://ojs.bibsys.no/index.php/NIK/article/view/396},
	Year = {2017},
	Bdsk-Url-1 = {https://ojs.bibsys.no/index.php/NIK/article/view/396}}
Powered by bibtexbrowser