See prompt injection on Wiktionary
{ "forms": [ { "form": "prompt injections", "tags": [ "plural" ] } ], "head_templates": [ { "args": { "1": "~" }, "expansion": "prompt injection (countable and uncountable, plural prompt injections)", "name": "en-noun" } ], "lang": "English", "lang_code": "en", "pos": "noun", "senses": [ { "categories": [ { "kind": "other", "name": "English entries with incorrect language header", "parents": [], "source": "w" }, { "kind": "other", "name": "Pages with 1 entry", "parents": [], "source": "w" }, { "kind": "other", "name": "Pages with entries", "parents": [], "source": "w" }, { "kind": "other", "langcode": "en", "name": "Artificial intelligence", "orig": "en:Artificial intelligence", "parents": [], "source": "w" }, { "kind": "other", "langcode": "en", "name": "Computer security", "orig": "en:Computer security", "parents": [], "source": "w" } ], "examples": [ { "bold_text_offsets": [ [ 299, 315 ] ], "ref": "2022 September 21, Alex Hern, “TechScape: AI's dark arts come into their own”, in The Guardian, London: Guardian News & Media, →ISSN, →OCLC, archived from the original on 05 Feb 2023:", "text": "Retomeli.io is a jobs board for remote workers, and the website runs a Twitter bot that spammed people who tweeted about remote working. The Twitter bot is explicitly labelled as being \"OpenAI-driven\", and within days of Goodside's proof-of-concept being published, thousands of users were throwing prompt injection attacks at the bot.", "type": "quote" }, { "bold_text_offsets": [ [ 127, 144 ] ], "ref": "2023 March 3, Chloe Xiang, “Hackers Can Turn Bing's AI Chatbot Into a Convincing Scammer, Researchers Say”, in VICE, archived from the original on 22 Mar 2023:", "text": "Yesterday, OpenAI announced an API for ChatGPT and posted an underlying format for the bot on GitHub, alluding to the issue of prompt injections.", "type": "quote" }, { "bold_text_offsets": [ [ 35, 51 ] ], "ref": "2023 February 14, Will Oremus, “Meet ChatGPT's evil twin, DAN”, in The Washington Post, Washington, D.C.: The Washington Post Company, →ISSN, →OCLC, archived from the original on 19 Mar 2023:", "text": "One category is what's known as a \"prompt injection attack,\" in which users trick the software into revealing its hidden data or instructions.", "type": "quote" }, { "bold_text_offsets": [ [ 211, 227 ] ], "ref": "2025 September 25, “How to stop AI’s “lethal trifecta””, in The Economist, →ISSN:", "text": "Large language models (LLMs), a trendy way of building artificial intelligence, have an inherent security problem: they cannot separate code from data. As a result, they are at risk of a type of attack called a prompt injection, in which they are tricked into following commands they should not.", "type": "quote" } ], "glosses": [ "A method of causing an artificial intelligence to ignore its initial instructions (often ethical restrictions) by giving it a certain prompt." ], "id": "en-prompt_injection-en-noun-kMerLXVb", "links": [ [ "artificial intelligence", "artificial intelligence" ], [ "computer security", "computer security" ], [ "artificial intelligence", "artificial intelligence#Noun" ], [ "ethical", "ethical#Adjective" ], [ "prompt", "prompt#Noun" ] ], "qualifier": "artificial intelligence; computer security; artificial intelligence; computer security", "raw_glosses": [ "(artificial intelligence, computer security) A method of causing an artificial intelligence to ignore its initial instructions (often ethical restrictions) by giving it a certain prompt." ], "related": [ { "word": "prompt engineering" }, { "word": "jailbreaking" } ], "tags": [ "countable", "uncountable" ] } ], "word": "prompt injection" }
{ "forms": [ { "form": "prompt injections", "tags": [ "plural" ] } ], "head_templates": [ { "args": { "1": "~" }, "expansion": "prompt injection (countable and uncountable, plural prompt injections)", "name": "en-noun" } ], "lang": "English", "lang_code": "en", "pos": "noun", "related": [ { "word": "prompt engineering" }, { "word": "jailbreaking" } ], "senses": [ { "categories": [ "English countable nouns", "English entries with incorrect language header", "English lemmas", "English multiword terms", "English nouns", "English terms with quotations", "English uncountable nouns", "Pages with 1 entry", "Pages with entries", "en:Artificial intelligence", "en:Computer security" ], "examples": [ { "bold_text_offsets": [ [ 299, 315 ] ], "ref": "2022 September 21, Alex Hern, “TechScape: AI's dark arts come into their own”, in The Guardian, London: Guardian News & Media, →ISSN, →OCLC, archived from the original on 05 Feb 2023:", "text": "Retomeli.io is a jobs board for remote workers, and the website runs a Twitter bot that spammed people who tweeted about remote working. The Twitter bot is explicitly labelled as being \"OpenAI-driven\", and within days of Goodside's proof-of-concept being published, thousands of users were throwing prompt injection attacks at the bot.", "type": "quote" }, { "bold_text_offsets": [ [ 127, 144 ] ], "ref": "2023 March 3, Chloe Xiang, “Hackers Can Turn Bing's AI Chatbot Into a Convincing Scammer, Researchers Say”, in VICE, archived from the original on 22 Mar 2023:", "text": "Yesterday, OpenAI announced an API for ChatGPT and posted an underlying format for the bot on GitHub, alluding to the issue of prompt injections.", "type": "quote" }, { "bold_text_offsets": [ [ 35, 51 ] ], "ref": "2023 February 14, Will Oremus, “Meet ChatGPT's evil twin, DAN”, in The Washington Post, Washington, D.C.: The Washington Post Company, →ISSN, →OCLC, archived from the original on 19 Mar 2023:", "text": "One category is what's known as a \"prompt injection attack,\" in which users trick the software into revealing its hidden data or instructions.", "type": "quote" }, { "bold_text_offsets": [ [ 211, 227 ] ], "ref": "2025 September 25, “How to stop AI’s “lethal trifecta””, in The Economist, →ISSN:", "text": "Large language models (LLMs), a trendy way of building artificial intelligence, have an inherent security problem: they cannot separate code from data. As a result, they are at risk of a type of attack called a prompt injection, in which they are tricked into following commands they should not.", "type": "quote" } ], "glosses": [ "A method of causing an artificial intelligence to ignore its initial instructions (often ethical restrictions) by giving it a certain prompt." ], "links": [ [ "artificial intelligence", "artificial intelligence" ], [ "computer security", "computer security" ], [ "artificial intelligence", "artificial intelligence#Noun" ], [ "ethical", "ethical#Adjective" ], [ "prompt", "prompt#Noun" ] ], "qualifier": "artificial intelligence; computer security; artificial intelligence; computer security", "raw_glosses": [ "(artificial intelligence, computer security) A method of causing an artificial intelligence to ignore its initial instructions (often ethical restrictions) by giving it a certain prompt." ], "tags": [ "countable", "uncountable" ] } ], "word": "prompt injection" }
Download raw JSONL data for prompt injection meaning in All languages combined (3.2kB)
This page is a part of the kaikki.org machine-readable All languages combined dictionary. This dictionary is based on structured data extracted on 2025-10-07 from the enwiktionary dump dated 2025-10-01 using wiktextract (19bd8d3 and 1ab82da). The data shown on this site has been post-processed and various details (e.g., extra categories) removed, some information disambiguated, and additional data merged from other sources. See the raw data download page for the unprocessed wiktextract data.
If you use this data in academic research, please cite Tatu Ylonen: Wiktextract: Wiktionary as Machine-Readable Structured Data, Proceedings of the 13th Conference on Language Resources and Evaluation (LREC), pp. 1317-1325, Marseille, 20-25 June 2022. Linking to the relevant page(s) under https://kaikki.org would also be greatly appreciated.