id
stringlengths 1
4
| tokens
sequence | ner_tags
sequence |
---|---|---|
2400 | [
"We",
"come",
"to",
"a",
"similar",
"conclusion",
"that",
"multi-layer",
"instancedependent",
"prompt",
"tuning",
"model",
"(M-IDPG)",
"is",
"significantly",
"better",
"than",
"the",
"single-layer",
"method",
"(SIDPG)",
"in",
"both",
"evaluation",
"settings."
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0
] |
2401 | [
"To",
"boost",
"single-layer",
"IDPG",
"performance,",
"we",
"add",
"supplementary",
"training",
"(cf.",
"Appendix",
"A.4)",
"and",
"conduct",
"ablation",
"studies",
"in",
"Appendix",
"A.5."
] | [
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] |
2402 | [
"P-tuning",
"v2",
"(Liu",
"et",
"al.,",
"2021a)",
"conducted",
"substantial",
"ablation",
"studies",
"on",
"the",
"influence",
"of",
"inserting",
"prompt",
"into",
"different",
"transformer",
"layers."
] | [
1,
2,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] |
2403 | [
"Multi-layer?"
] | [
0
] |
2404 | [
"4.5.4",
"Prompt",
"Insertion:",
"Single-layer",
"or"
] | [
0,
0,
0,
0,
0
] |
2405 | [
"Detailed",
"information",
"for",
"all",
"models’",
"performance",
"on",
"each",
"task",
"can",
"be",
"found",
"in",
"Appendix",
"A.3."
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] |
2406 | [
"In",
"Table",
"1,",
"M-IDPG-PHM",
"uses",
"the",
"previous",
"layer’s",
"output",
"as",
"input,",
"M",
"version",
"as",
"the",
"generator,",
"and",
"16",
"as",
"the",
"generator",
"hidden",
"size."
] | [
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
5,
0,
0,
0,
3,
4
] |
2407 | [
"As",
"for",
"the",
"generator",
"selection,",
"the",
"three",
"models",
"perform",
"as",
"expected",
"(S",
"version",
"<",
"M",
"version",
"<",
"L",
"version)."
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] |
2408 | [
"However,",
"as",
"shown",
"in",
"Figure",
"3,",
"the",
"experiment",
"results",
"suggest",
"no",
"significant",
"difference",
"between",
"the",
"two",
"input",
"ways."
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] |
2409 | [
"In",
"a",
"multi-layer",
"case,",
"the",
"input",
"to",
"each",
"layer",
"generator",
"has",
"another",
"option,",
"i.e.,",
"the",
"previous",
"layer’s",
"output."
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] |
2410 | [
"in",
"single-layer",
"prompt",
"generation",
"model,",
"the",
"input",
"to",
"G",
"is",
"M(xi)",
"-",
"the",
"representation",
"of",
"input",
"sequence",
"xi."
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] |
2411 | [
"We",
"hypothesize",
"that",
"the",
"smaller",
"hidden",
"size",
"of",
"16",
"is",
"already",
"enough",
"to",
"store",
"useful",
"instance",
"information,",
"and",
"setting",
"m",
"too",
"large",
"may",
"be",
"less",
"efficient."
] | [
0,
0,
0,
0,
0,
3,
4,
0,
5,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] |
2412 | [
"Surprisingly,",
"we",
"find",
"that",
"generator",
"with",
"a",
"hidden",
"size",
"16",
"is",
"not",
"far",
"from",
"the",
"large",
"model",
"(92.0",
"vs.",
"92.1,",
"respectively,",
"in",
"M",
"version)."
] | [
0,
0,
0,
0,
0,
0,
0,
3,
4,
5,
0,
0,
0,
0,
0,
0,
0,
9,
0,
9,
0,
0,
0,
0
] |
2413 | [
"We",
"compare",
"two",
"models",
"with",
"m",
"=",
"16",
"and",
"m",
"=",
"256."
] | [
0,
0,
0,
0,
0,
3,
0,
5,
0,
3,
0,
5
] |
2414 | [
"Another",
"way",
"to",
"reduce",
"the",
"training",
"parameters",
"is",
"by",
"adjusting",
"the",
"hidden",
"size",
"m",
"of",
"the",
"generator."
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
3,
4,
3,
0,
0,
0
] |
2415 | [
"If",
"each",
"transformer",
"layer",
"requires",
"an",
"independent",
"generator",
"Gi,",
"the",
"number",
"of",
"training"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] |
2416 | [
"When",
"applying",
"the",
"instance-dependent",
"generation",
"model",
"G",
"into",
"a",
"multi-layer",
"case,",
"the",
"first",
"challenge",
"we",
"face",
"is",
"the",
"considerable",
"increase",
"in",
"training",
"parameters."
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] |
2417 | [
"4.5.3",
"Multi-layer",
"Architecture",
"Exploration"
] | [
0,
0,
0,
0
] |
2418 | [
"On",
"the",
"other",
"hand,",
"this",
"ablation",
"study",
"further",
"verifies",
"PHM",
"layers’",
"efficiency",
"in",
"the",
"generation",
"model."
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] |
2419 | [
"We",
"observe",
"that",
"including",
"DNN",
"as",
"a",
"generator",
"doesn’t",
"improve",
"performance",
"signif"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] |
2420 | [
"Hence,",
"we",
"compare",
"the",
"PHM-based",
"prompt",
"generator",
"with",
"the",
"DNN-based",
"prompt",
"generator,",
"as",
"shown",
"in",
"Table",
"1."
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] |
2421 | [
"An",
"open",
"question",
"we",
"seek",
"to",
"answer",
"is",
"what",
"is",
"the",
"best",
"generation",
"model",
"for",
"prompt",
"regardless",
"of",
"training",
"parameters."
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] |
2422 | [
"To",
"reduce",
"the",
"tuning",
"parameters,",
"we",
"substitute",
"the",
"DNN",
"layers",
"with",
"PHM",
"layers."
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] |
2423 | [
"4.5.2",
"Prompt",
"Generator:",
"PHM",
"or",
"DNN?"
] | [
0,
0,
0,
0,
0,
0
] |
2424 | [
"Adopting",
"GloVe",
"as",
"sentence",
"encoder",
"would",
"avoid",
"going",
"through",
"the",
"LM",
"twice,",
"thus",
"effectively",
"reducing",
"IDPG’s",
"run-time",
"complexity",
"by",
"half."
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0
] |
2425 | [
"One",
"of",
"the",
"drawbacks",
"of",
"our",
"method",
"is",
"that",
"it",
"is",
"twice",
"as",
"expensive",
"to",
"run",
"compared",
"to",
"Compacter,",
"even",
"though",
"it",
"uses",
"slightly",
"fewer",
"parameters."
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0
] |
2426 | [
"According",
"to",
"Table",
"1,",
"using",
"GloVe",
"as",
"sentence",
"encoder",
"to",
"generate",
"prompts",
"doesn’t",
"sacrifice",
"much",
"performance",
"over",
"the",
"ten",
"tasks",
"and",
"outperforms",
"prompt",
"tuning",
"and",
"P-tuning",
"v2."
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
2,
0,
1,
2
] |
2427 | [
"Method"
] | [
0
] |
2428 | [
"For",
"all",
"other",
"tasks,",
"we",
"report",
"accuracy."
] | [
0,
0,
0,
0,
0,
0,
7
] |
2429 | [
"We",
"report",
"the",
"average",
"of",
"accuracy",
"and",
"F1",
"for",
"both",
"MRPC",
"and",
"QQP,",
"and",
"average",
"of",
"Pearson",
"and",
"Spearman",
"correlation",
"coefficients",
"for",
"STS-B."
] | [
0,
0,
0,
0,
0,
7,
0,
7,
0,
0,
13,
0,
13,
0,
0,
0,
7,
8,
8,
8,
8,
0,
13
] |
2430 | [
"Underline",
"marks",
"the",
"best",
"result",
"among",
"all",
"prompt",
"tuning",
"methods."
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] |
2431 | [
"Bold",
"marks",
"the",
"best",
"result",
"among",
"all",
"competing",
"methods."
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0
] |
2432 | [
"We",
"report",
"average",
"results",
"across",
"5",
"runs",
"with",
"different",
"initialization."
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] |
2433 | [
"Specifically,",
"we",
"take",
"the",
"average",
"of",
"word",
"vectors",
"as",
"the",
"sentence",
"embeddings:"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] |
2434 | [
"To",
"answer",
"this",
"question,",
"we",
"apply",
"the",
"pre-trained",
"GloVe",
"word",
"vectors2",
"to",
"extract",
"the",
"sentence",
"representation."
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] |
2435 | [
"One",
"open",
"question",
"is",
"to",
"explore",
"reliability",
"on",
"lightweight",
"sentence",
"representations",
"such",
"as",
"GloVe",
"embedding",
"(Pennington",
"et",
"al.,",
"2014)",
"or",
"token",
"embedding",
"of",
"pre-trained",
"language",
"models."
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] |
2436 | [
"Obtaining",
"contextualized",
"transformer",
"sentence",
"embedding",
"is",
"often",
"expensive",
"if",
"it",
"is",
"not",
"pre-computed."
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] |
2437 | [
"The",
"proposed",
"IDPG",
"method",
"relies",
"on",
"pre-trained",
"LM",
"to",
"extract",
"sentence",
"representation,",
"i.e.,",
"[CLS]",
"token",
"embedding."
] | [
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] |
2438 | [
"4.5.1",
"Sentence",
"Encoder:",
"GloVe",
"or",
"LMs?"
] | [
0,
0,
0,
0,
0,
0
] |
2439 | [
"We",
"conduct",
"several",
"ablation",
"studies",
"including",
"exploration",
"of",
"different",
"generator",
"architectures",
"and",
"impact",
"of",
"selecting",
"different",
"prompt",
"positions."
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] |
2440 | [
"4.5",
"Intrinsic",
"Study"
] | [
0,
0,
0
] |
2441 | [
"Testing",
"the",
"limitation",
"of",
"our",
"model",
"without",
"freezing",
"any",
"parameters",
"would",
"be",
"an",
"interesting",
"investigation,",
"but",
"is",
"not",
"the",
"main",
"focus",
"of",
"this",
"paper."
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] |
2442 | [
"We",
"want",
"to",
"highlight",
"that",
"we",
"are",
"exploring",
"a",
"solution",
"by",
"training",
"as",
"few",
"parameters",
"as",
"possible",
"while",
"maintaining",
"good",
"performance."
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] |
2443 | [
"We",
"also",
"note",
"that",
"other",
"state-of-the-art",
"models,",
"such",
"as",
"LM-BFF",
"(Gao",
"et",
"al.,",
"2021a),",
"attempt",
"to",
"address",
"the",
"few-shot",
"learning",
"problem",
"from",
"a",
"different",
"perspective."
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
11,
12,
0,
0,
0,
0,
0
] |
2444 | [
"We",
"suspect",
"that",
"this",
"may",
"be",
"due",
"to",
"poor",
"initialization",
"leading",
"the",
"model",
"to",
"non-optimal",
"parameters."
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] |
2445 | [
"We",
"also",
"observe",
"that",
"sometimes",
"when",
"K",
"is",
"small,",
"our",
"method",
"results",
"have",
"high",
"variance",
"(e.g.,",
"4.6",
"on",
"MPQA,",
"when",
"K",
"=",
"100)."
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
13,
0,
0,
0,
0
] |
2446 | [
"When",
"K",
"becomes",
"larger,",
"IDPG-PHM",
"still",
"maintains",
"good",
"results",
"with",
"1.9pt",
"and",
"0.2pt",
"improvement",
"(K=500);",
"and",
"2.0pt",
"and",
"0.2pt",
"improvement",
"(K=1000)",
"in",
"accuracy",
"with",
"traditional",
"prompt",
"tuning",
"and",
"P-tuning",
"v2",
"approaches,",
"respectively."
] | [
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
9,
0,
9,
0,
0,
0,
9,
0,
9,
0,
0,
0,
7,
0,
1,
2,
2,
0,
1,
2,
0,
0
] |
2447 | [
"This",
"improvement",
"illustrates",
"that",
"our",
"method",
"has",
"better",
"generalization",
"in",
"few-shot",
"settings."
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] |
2448 | [
"In",
"the",
"extreme",
"low-resource",
"case",
"when",
"K=100,",
"M-IDPG-PHM",
"performs",
"2.5pt",
"better",
"than",
"the",
"traditional",
"prompt",
"tuning",
"method",
"and",
"0.5pt",
"better",
"than",
"the",
"multi-layer",
"P-Tuning",
"v2",
"method."
] | [
0,
0,
0,
0,
0,
0,
0,
1,
0,
9,
0,
0,
0,
0,
0,
0,
0,
0,
9,
0,
0,
0,
0,
1,
2,
0
] |
2449 | [
"Following",
"the",
"existing",
"evaluation",
"protocols",
"in",
"the",
"few-shot",
"setting",
"(He",
"et",
"al.,",
"2021),",
"we",
"sample",
"a",
"subset",
"of",
"the",
"training",
"data",
"for",
"each",
"task",
"with",
"size",
"K",
"as",
"our",
"∈",
"{",
"training",
"data",
"and",
"another",
"subset",
"with",
"size",
"1000",
"as",
"a",
"development",
"set."
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
3,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
3,
5,
0,
0,
0,
0
] |
2450 | [
"4.4",
"Performance",
"in",
"low-resource",
"scenario"
] | [
0,
0,
0,
0,
0
] |
2451 | [
"#",
"Parameters"
] | [
0,
0
] |
2452 | [
"Method"
] | [
0
] |
2453 | [
"Our",
"proposed",
"method,",
"especially",
"the",
"M-IDPGPHM,",
"falls",
"in",
"the",
"gap",
"between",
"prompt-tuning",
"and",
"adapter",
"model,",
"since",
"it",
"only",
"requires",
"training",
"134K",
"parameters",
"and",
"performs",
"on",
"par",
"with",
"Compacter."
] | [
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1
] |
2454 | [
"However,",
"its",
"performance",
"is",
"worse",
"than",
"a",
"lightweight",
"adapter",
"model",
"(e.g.,",
"Compacter",
"with",
"149K",
"parameters)."
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0
] |
2455 | [
"Traditional",
"prompt-tuning",
"method",
"only",
"requires",
"training",
"a",
"token",
"embedding",
"table",
"with",
"a",
"few",
"thousand",
"parameters."
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] |
2456 | [
"The",
"general",
"goal",
"for",
"efficient",
"transfer",
"learning",
"is",
"to",
"train",
"models",
"with",
"fewer",
"parameters",
"while",
"achieving",
"better",
"performance."
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] |
2457 | [
"Table",
"2",
"lists",
"the",
"number",
"of",
"trainable",
"parameters",
"for",
"different",
"methods",
"excluding",
"the",
"classification",
"head."
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] |
2458 | [
"4.3",
"Efficiency"
] | [
0,
0
] |
2459 | [
"(vi)",
"When",
"we",
"fix",
"the",
"training",
"parameters",
"to",
"be",
"the",
"same,",
"the",
"comparison",
"between",
"Prompttuning-134",
"and",
"M-IDPG-PHM",
"illustrates",
"that",
"our",
"approach",
"works",
"better",
"than",
"prompt",
"tuning",
"not",
"just",
"because",
"of",
"using",
"more",
"parameters."
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] |
2460 | [
"(v)",
"GloVe-based",
"sentence",
"encoder",
"also",
"performs",
"similar",
"to",
"LMbased",
"sentence",
"encoder,",
"indicating",
"the",
"advancement",
"of",
"instance-dependent",
"prompt",
"generation",
"does",
"not",
"rely",
"on",
"a",
"robust",
"contextual",
"sentence",
"encoder."
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] |
2461 | [
"based",
"generator",
"performs",
"on",
"par",
"with",
"the",
"DNNbased",
"generator",
"while",
"having",
"a",
"significantly",
"lower",
"number",
"of",
"trainable",
"parameters."
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] |
2462 | [
"Specifically,",
"M-IDPG-PHM",
"performs",
"0.84pt",
"and",
"0.36pt",
"better",
"than",
"RoBERTa",
"and",
"EFL,",
"respectively."
] | [
0,
1,
0,
9,
0,
9,
0,
0,
1,
0,
1,
0
] |
2463 | [
"The",
"four",
"best",
"results",
"(MPQA,",
"Subj,",
"CR,",
"MR)",
"among",
"all",
"competing",
"methods",
"in",
"single-sentence",
"classification",
"tasks",
"are",
"made",
"by",
"IDPG",
"models."
] | [
0,
0,
0,
0,
13,
13,
13,
13,
0,
0,
0,
0,
0,
11,
12,
0,
0,
0,
0,
1,
0
] |
2464 | [
"The",
"improvement",
"of",
"our",
"method",
"is",
"more",
"prominent",
"in",
"the",
"single-sentence",
"classification",
"task."
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
11,
12,
0
] |
2465 | [
"M-IDPG-PHM",
"is",
"better",
"than",
"Compacter",
"on",
"four",
"tasks",
"and",
"has",
"the",
"same",
"performance",
"on",
"three",
"tasks."
] | [
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] |
2466 | [
"Note",
"that",
"IDPG",
"uses",
"15K",
"fewer",
"parameters",
"than",
"the",
"Compacter."
] | [
0,
0,
1,
0,
0,
0,
0,
0,
0,
1
] |
2467 | [
"However,",
"the",
"gap",
"is",
"mostly",
"from",
"RTE",
"and",
"QQP."
] | [
0,
0,
0,
0,
0,
0,
13,
0,
13
] |
2468 | [
"(ii)",
"Compared",
"with",
"other",
"efficient",
"transfer",
"learning",
"methods,",
"IDPG",
"performs",
"slightly",
"worse",
"than",
"the",
"Compacter",
"(Mahabadi",
"et",
"al.,",
"2021)",
"and",
"Adapter",
"(Houlsby",
"et",
"al.,",
"2019),",
"across",
"the",
"ten",
"tasks."
] | [
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0
] |
2469 | [
"We",
"observe",
"that:",
"(i)",
"Our",
"proposed",
"method",
"M-IDPG-PHM",
"consistently",
"outperforms",
"the",
"prompt",
"tuning",
"method",
"and",
"Ptuning",
"v2",
"by",
"average",
"3.1pt",
"and",
"1.6pt,",
"respectively",
"(except",
"on",
"the",
"RTE",
"dataset)."
] | [
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
2,
0,
0,
9,
0,
9,
0,
0,
0,
0,
13,
0
] |
2470 | [
"Table",
"1",
"shows",
"the",
"results",
"of",
"all",
"the",
"methods",
"on",
"full",
"datasets",
"across",
"10",
"NLU",
"tasks."
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
11,
0
] |
2471 | [
"4.2",
"Performance",
"in",
"high-resource",
"scenario"
] | [
0,
0,
0,
0,
0
] |
2472 | [
"Notably,",
"Prompt-tuning134",
"uses",
"134",
"prompt",
"lengths",
"in",
"Table",
"1,",
"and",
"it",
"is",
"set",
"so",
"to",
"match",
"the",
"training",
"parameters",
"of",
"the",
"proposed",
"method,",
"M-IDPG-PHM."
] | [
0,
1,
0,
0,
3,
4,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1
] |
2473 | [
"Additional",
"training",
"details",
"can",
"be",
"found",
"in",
"Appendix",
"A.1."
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0
] |
2474 | [
"For",
"a",
"fair",
"comparison,",
"all",
"the",
"pre-trained",
"LMs",
"are",
"24-layer",
"16-head",
"RoBERTa-Large",
"models",
"(Liu",
"et",
"al.,",
"2019)."
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
5,
5,
0,
0,
0,
0,
0,
0
] |
2475 | [
"Again,",
"the",
"difference",
"between",
"the",
"first",
"two",
"is",
"in",
"the",
"prompt",
"generator,",
"while",
"M-IDPG-PHMGloVe",
"uses",
"GloVe",
"to",
"encode",
"input",
"sequences."
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0
] |
2476 | [
"We",
"also",
"explore",
"three",
"versions",
"of",
"multilayer",
"instance-dependent",
"generation",
"methods:",
"M-IDPG-DNN,",
"M-IDPG-PHM,",
"M-IDPG-PHM"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1
] |
2477 | [
"The",
"second",
"one",
"uses",
"the",
"PHM",
"layer",
"and",
"only",
"contains",
"105K",
"parameters."
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] |
2478 | [
"The",
"first",
"version",
"is",
"based",
"on",
"a",
"2-layer",
"perceptron",
"generator,",
"which",
"contains",
"1.5M",
"parameters."
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] |
2479 | [
"We",
"compare",
"these",
"against",
"two",
"versions",
"of",
"singlelayer",
"instance-dependent",
"generation",
"methods:",
"SIDPG-DNN",
"and",
"S-IDPG-PHM."
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1
] |
2480 | [
"Adapter-based",
"fine-tuning:",
"This",
"efficient",
"transfer",
"learning",
"method",
"inserts",
"an",
"adaptation",
"module",
"inside",
"each",
"transformer",
"layer",
"including",
"Compactor",
"(Mahabadi",
"et",
"al.,",
"2021)",
"and",
"Adapter",
"(Houlsby",
"et",
"al.,",
"2019)."
] | [
1,
2,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0
] |
2481 | [
"Prompt",
"tuning:",
"We",
"implemented",
"two",
"versions",
"–",
"standard",
"prompt",
"tuning",
"(Lester",
"et",
"al.,",
"2021)",
"and",
"multi-layer",
"prompt",
"tuning",
"(Li",
"and",
"Liang,",
"2021;",
"Liu",
"et",
"al.,",
"2021a)."
] | [
1,
2,
0,
0,
0,
0,
0,
1,
2,
2,
0,
0,
0,
0,
0,
1,
2,
2,
0,
0,
0,
0,
0,
0,
0,
0
] |
2482 | [
"Transformer",
"fine-tuning:",
"We",
"instantiated",
"two",
"versions",
"–",
"a",
"vanilla",
"transformer",
"fine-tuning",
"(Liu",
"et",
"al.,",
"2019)",
"and",
"the",
"entailment-based",
"finetuning",
"(Wang",
"et",
"al.,",
"2021)."
] | [
1,
2,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] |
2483 | [
"We",
"compare",
"our",
"proposed",
"method",
"with",
"a",
"wide",
"range",
"of",
"methods,",
"as",
"follows:"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] |
2484 | [
"SST-2,",
"QNLI,",
"RTE,",
"MRPC,",
"STS-B",
"(Cer",
"et",
"al.,",
"2017)",
"and",
"QQP."
] | [
1,
1,
1,
1,
1,
0,
0,
0,
0,
0,
1
] |
2485 | [
"We",
"evaluate",
"on",
"ten",
"standard",
"natural",
"language",
"understanding",
"(NLU)",
"datasets",
"–",
"MPQA",
"(Wiebe",
"et",
"al.,",
"2005),",
"Subj",
"(Pang",
"and",
"Lee,",
"2004),",
"CR",
"(Hu",
"and",
"Liu,",
"2004),",
"MR",
"(Pang",
"and",
"Lee,",
"2005),",
"and",
"six",
"tasks",
"from",
"GLUE",
"(Wang",
"et",
"al.,",
"2019),",
"viz."
] | [
0,
0,
0,
0,
0,
11,
12,
12,
11,
0,
0,
13,
0,
0,
0,
0,
13,
0,
0,
0,
0,
13,
0,
0,
0,
0,
13,
0,
0,
0,
0,
0,
0,
0,
0,
13,
0,
0,
0,
0,
0
] |
2486 | [
"4.1",
"Experimental",
"Setup"
] | [
0,
0,
0
] |
2487 | [
"4",
"Experiment",
"Results"
] | [
0,
0,
0
] |
2488 | [
"Method"
] | [
0
] |
2489 | [
"For",
"all",
"the",
"other",
"tasks,",
"we",
"report",
"accuracy."
] | [
0,
0,
0,
0,
0,
0,
0,
7
] |
2490 | [
"We",
"report",
"the",
"average",
"of",
"accuracy",
"and",
"F1",
"for",
"both",
"MRPC",
"and",
"QQP,",
"and",
"average",
"of",
"Pearson",
"and",
"Spearman",
"correlation",
"coefficients",
"for",
"STS-B."
] | [
0,
0,
0,
0,
0,
7,
0,
7,
0,
0,
13,
0,
13,
0,
0,
0,
7,
8,
8,
8,
8,
0,
13
] |
2491 | [
"Underline",
"marks",
"the",
"best",
"result",
"among",
"all",
"prompt",
"tuning",
"methods."
] | [
0,
0,
0,
0,
0,
0,
0,
11,
12,
0
] |
2492 | [
"Bold",
"marks",
"the",
"best",
"result",
"among",
"all",
"competing",
"methods."
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0
] |
2493 | [
"We",
"report",
"average",
"results",
"across",
"5",
"runs",
"with",
"different",
"initialization."
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] |
2494 | [
"Each",
"methods",
"are",
"evaluated",
"on",
"full",
"test",
"sets",
"(dev",
"sets",
"for",
"GLUE",
"tasks)."
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
13,
0
] |
2495 | [
"In",
"short,",
"assuming",
"each",
"layer",
"generator",
"Gi",
"has",
"form",
"y",
"=",
"Wx",
"+",
"bi,",
"we",
"share",
"the",
"weight",
"matrix",
"Rm",
"W",
"across",
"generators",
"and",
"set",
"the",
"bias",
"term",
"bi",
"∈",
"to",
"be",
"layer-specific,",
"where",
"i",
"=",
"1,",
".",
".",
"."
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] |
2496 | [
"However,",
"simply",
"generalizing",
"our",
"model",
"(IDPG)",
"to",
"a",
"multi-layer",
"version",
"(M-IDPG),",
"will",
"significantly",
"increase",
"the",
"number",
"of",
"training",
"parameters,",
"since",
"each",
"layer",
"requires",
"an",
"independent",
"generator",
"G.",
"Instead,",
"we",
"explore",
"different",
"architectures",
"in",
"Section",
"4.5.3",
"to",
"balance",
"the",
"number",
"of",
"tuned",
"parameters",
"against",
"model",
"performance."
] | [
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] |
2497 | [
"Following",
"Prefix",
"tuning",
"(Li",
"and",
"Liang,",
"2021)",
"and",
"P-tuning",
"v2",
"(Liu",
"et",
"al.,",
"2021a),",
"we",
"prepend",
"our",
"generated",
"prompts",
"at",
"each",
"transformer",
"layer",
"to",
"address",
"the",
"above",
"issues."
] | [
0,
1,
2,
0,
0,
0,
0,
0,
1,
2,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] |
2498 | [
"(ii)",
"Generalizing",
"to",
"long",
"sequence",
"tasks:",
"it",
"is",
"unclear",
"that",
"prompt",
"tuning",
"can",
"perform",
"well",
"in",
"tasks",
"with",
"long",
"input",
"when",
"only",
"a",
"limited",
"number",
"of",
"parameters",
"can",
"be",
"inserted",
"in",
"single",
"layer."
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] |
2499 | [
"While",
"proven",
"efficient",
"in",
"some",
"specific",
"settings,",
"single",
"layer",
"prompt",
"tuning",
"has",
"two",
"main",
"limitations:",
"(i)",
"Capturing",
"deep",
"contextual",
"information:",
"the",
"impact",
"of",
"the",
"first-layer",
"prompts",
"on",
"final",
"prediction",
"is",
"low",
"when",
"transformer",
"goes",
"deeper."
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] |