<?xml version="1.0" encoding="UTF-8"?><rdf:RDF xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:dcq="http://purl.org/dc/terms/"><records count="1" morepages="false" start="1" end="1"><record rownumber="1"><dc:product_type>Conference Paper</dc:product_type><dc:title>Use of Large Language Models for Extracting Knowledge Components in CS1 Programming Exercises</dc:title><dc:creator>Niousha, Rose; Hoq, Muntasir; Akram, Bita; Norouzi, Narges</dc:creator><dc:corporate_author/><dc:editor/><dc:description>programming concepts in programming assignments in a CS1
course. We seek to answer the following research questions: RQ1.
How effectively can large language models identify knowledge
components in a CS1 course from programming assignments? RQ2.
Can large language models be used to extract program-level knowledge
components, and how can the information be used to identify
students’ misconceptions? Preliminary results demonstrated a high
similarity between course-level knowledge components retrieved
from a large language model and that of an expert-generated list.</dc:description><dc:publisher>ACM</dc:publisher><dc:date>2024-03-14</dc:date><dc:nsf_par_id>10501958</dc:nsf_par_id><dc:journal_name>Special Interest Group on Computer Science Education bulletin</dc:journal_name><dc:journal_volume/><dc:journal_issue/><dc:page_range_or_elocation>1762 to 1763</dc:page_range_or_elocation><dc:issn/><dc:isbn>9798400704246</dc:isbn><dc:doi>https://doi.org/10.1145/3626253.3635592</dc:doi><dcq:identifierAwardId>2236195; 2331965</dcq:identifierAwardId><dc:subject/><dc:version_number/><dc:location>Portland OR USA</dc:location><dc:rights/><dc:institution/><dc:sponsoring_org>National Science Foundation</dc:sponsoring_org></record></records></rdf:RDF>