See few-shot in All languages combined, or Wiktionary
{ "head_templates": [ { "args": { "1": "-" }, "expansion": "few-shot (not comparable)", "name": "en-adj" } ], "lang": "English", "lang_code": "en", "pos": "adj", "senses": [ { "categories": [ { "kind": "other", "name": "English entries with incorrect language header", "parents": [ "Entries with incorrect language header", "Entry maintenance" ], "source": "w" }, { "kind": "other", "name": "Pages with 1 entry", "parents": [], "source": "w" }, { "kind": "other", "name": "Pages with entries", "parents": [], "source": "w" }, { "kind": "topical", "langcode": "en", "name": "Machine learning", "orig": "en:Machine learning", "parents": [ "Artificial intelligence", "Computer science", "Cybernetics", "Computing", "Sciences", "Applied mathematics", "Systems theory", "Technology", "All topics", "Mathematics", "Systems", "Fundamental", "Formal sciences", "Interdisciplinary fields", "Society" ], "source": "w" } ], "examples": [ { "ref": "2023 October 30, Herbold et al., “A large-scale comparison of human-written versus ChatGPT-generated essays”, in Scientific Reports, volume 13, page 4:", "text": "No additional prompts for getting the responses were used, i.e. the data was created with a basic prompt in a zero-shot scenario. This is in contrast to the benchmarks by OpenAI, who used an engineered prompt in a few-shot scenario to guide the generation of essays.", "type": "quote" } ], "glosses": [ "A machine learning paradigm where a model is trained on a very small amount of data, typically less than that required for traditional machine learning approaches." ], "id": "en-few-shot-en-adj-T-8OHTZA", "links": [ [ "machine learning", "machine learning" ], [ "paradigm", "paradigm" ], [ "model", "model" ], [ "data", "data" ], [ "approach", "approach" ] ], "qualifier": "machine learning", "raw_glosses": [ "(machine learning, attributive) A machine learning paradigm where a model is trained on a very small amount of data, typically less than that required for traditional machine learning approaches." ], "tags": [ "attributive", "not-comparable" ] } ], "word": "few-shot" }
{ "head_templates": [ { "args": { "1": "-" }, "expansion": "few-shot (not comparable)", "name": "en-adj" } ], "lang": "English", "lang_code": "en", "pos": "adj", "senses": [ { "categories": [ "English adjectives", "English entries with incorrect language header", "English lemmas", "English multiword terms", "English terms with quotations", "English uncomparable adjectives", "Pages with 1 entry", "Pages with entries", "en:Machine learning" ], "examples": [ { "ref": "2023 October 30, Herbold et al., “A large-scale comparison of human-written versus ChatGPT-generated essays”, in Scientific Reports, volume 13, page 4:", "text": "No additional prompts for getting the responses were used, i.e. the data was created with a basic prompt in a zero-shot scenario. This is in contrast to the benchmarks by OpenAI, who used an engineered prompt in a few-shot scenario to guide the generation of essays.", "type": "quote" } ], "glosses": [ "A machine learning paradigm where a model is trained on a very small amount of data, typically less than that required for traditional machine learning approaches." ], "links": [ [ "machine learning", "machine learning" ], [ "paradigm", "paradigm" ], [ "model", "model" ], [ "data", "data" ], [ "approach", "approach" ] ], "qualifier": "machine learning", "raw_glosses": [ "(machine learning, attributive) A machine learning paradigm where a model is trained on a very small amount of data, typically less than that required for traditional machine learning approaches." ], "tags": [ "attributive", "not-comparable" ] } ], "word": "few-shot" }
Download raw JSONL data for few-shot meaning in English (1.5kB)
This page is a part of the kaikki.org machine-readable English dictionary. This dictionary is based on structured data extracted on 2024-12-15 from the enwiktionary dump dated 2024-12-04 using wiktextract (8a39820 and 4401a4c). The data shown on this site has been post-processed and various details (e.g., extra categories) removed, some information disambiguated, and additional data merged from other sources. See the raw data download page for the unprocessed wiktextract data.
If you use this data in academic research, please cite Tatu Ylonen: Wiktextract: Wiktionary as Machine-Readable Structured Data, Proceedings of the 13th Conference on Language Resources and Evaluation (LREC), pp. 1317-1325, Marseille, 20-25 June 2022. Linking to the relevant page(s) under https://kaikki.org would also be greatly appreciated.