Implementing Asimov's First Law of Robotics (bibtex)
@article{NIK,
	abstract = {The need to make sure autonomous systems behave ethically is increasing with these systems becoming part of our society. Although there is no consensus to which actions an autonomous system should always be ethically obliged, preventing harm to people is an intuitive first candidate for a principle of behaviour. Do not hurt a human or allow a human to be hurt by your inaction is Asimov's First Law of robotics. We consider the challenges that the implementation of this Law will incur. To unearth these challenges we constructed a simulation of a First Robot Law abiding agent and an accident prone Human. We used a classic two-dimensional grid environment and explored to which extent an agent can be programmed, using standard artificial intelligence methods, to prevent a human from making dangerous actions. We outline the drawbacks of using the Asimov's First Law of robotics as an underlying ethical theory the governs an autonomous system's behaviour.},
	author = {Mateo Alvarez and {\O}yvind Berge and Audun Berget and Eirin Bj{\o}rknes and Dag V. K. Johnsen and Fredrik Madsen and Marija Slavkovik},
	issn = {1892-0721},
	journal = {Norsk Informatikkonferanse},
	title = {Implementing {Asimov's} First Law of Robotics},
	url = {https://ojs.bibsys.no/index.php/NIK/article/view/396},
	year = {2017},
	bdsk-url-1 = {https://ojs.bibsys.no/index.php/NIK/article/view/396}}
Powered by bibtexbrowser