{
    "author": null,
    "date_published": "2022-04-05T20:22:00.000Z",
    "dek": null,
    "direction": "ltr",
    "domain": "arxiv.org",
    "excerpt": "Large language models have been shown to achieve remarkable performance across a variety of natural language tasks using few-shot learning, which drastically reduces the number of task-specific&hellip;",
    "lead_image_url": "https://static.arxiv.org/icons/twitter/arxiv-logo-twitter-square.png",
    "next_page_url": null,
    "rendered_pages": 1,
    "title": "PaLM: Scaling Language Modeling with Pathways",
    "total_pages": 1,
    "url": "https://arxiv.org/abs/2204.02311v3",
    "word_count": 261
}