@@ -113,7 +127,7 @@ export const LandingBanner = ({ title, label, to, button, small, background, col
export const LandingBannerButton = ({ to, small, children }) => (
-
diff --git a/website/src/images/spacy-irl.jpg b/website/src/images/spacy-irl.jpg
new file mode 100644
index 000000000..ee8f4bdc9
Binary files /dev/null and b/website/src/images/spacy-irl.jpg differ
diff --git a/website/src/styles/landing.module.sass b/website/src/styles/landing.module.sass
index efe3d3e5a..d7340229b 100644
--- a/website/src/styles/landing.module.sass
+++ b/website/src/styles/landing.module.sass
@@ -73,6 +73,7 @@
color: var(--color-back)
padding: 5rem
margin-bottom: var(--spacing-md)
+ background-size: cover
.banner-content
margin-bottom: 0
@@ -100,7 +101,7 @@
.banner-text-small p
font-size: 1.35rem
- margin-bottom: 1rem
+ margin-bottom: 1.5rem
@include breakpoint(min, md)
.banner-content
@@ -134,6 +135,9 @@
margin-bottom: var(--spacing-sm)
text-align: right
+.banner-button-element
+ background: var(--color-theme)
+
.logos
text-align: center
padding-bottom: 1rem
diff --git a/website/src/widgets/landing.js b/website/src/widgets/landing.js
index 6905d46d0..9e6e95c2d 100644
--- a/website/src/widgets/landing.js
+++ b/website/src/widgets/landing.js
@@ -19,6 +19,7 @@ import { H2 } from '../components/typography'
import { Ul, Li } from '../components/list'
import Button from '../components/button'
import Link from '../components/link'
+import irlBackground from '../images/spacy-irl.jpg'
import BenchmarksChoi from 'usage/_benchmarks-choi.md'
@@ -151,19 +152,21 @@ const Landing = ({ data }) => {
- Learn more from small training corpora by initializing your models with{' '}
- knowledge from raw text. The new pretrain command teaches
- spaCy's CNN model to predict words based on their context, producing
- representations of words in contexts. If you've seen Google's BERT system or
- fast.ai's ULMFiT, spaCy's pretraining is similar – but much more efficient. It's
- still experimental, but users are already reporting good results, so give it a
- try!
+ We're pleased to invite the spaCy community and other folks working on Natural
+ Language Processing to Berlin this summer for a small and intimate event{' '}
+ July 5-6, 2019. The event includes a hands-on training day for
+ teams using spaCy in production, followed by a one-track conference. We booked a
+ beautiful venue, hand-picked an awesome lineup of speakers and scheduled plenty
+ of social time to get to know each other and exchange ideas.
{
- spaCy v2.0 features new neural models for tagging,{' '}
- parsing and entity recognition. The models have
- been designed and implemented from scratch specifically for spaCy, to give you an
- unmatched balance of speed, size and accuracy. A novel bloom embedding strategy with
- subword features is used to support huge vocabularies in tiny tables. Convolutional
- layers with residual connections, layer normalization and maxout non-linearity are
- used, giving much better efficiency than the standard BiLSTM solution. Finally, the
- parser and NER use an imitation learning objective to deliver accuracy in-line with
- the latest research systems, even when evaluated from raw text. With these
- innovations, spaCy v2.0's models are 10× smaller,{' '}
- 20% more accurate, and
- even cheaper to run than the previous generation.
+ Learn more from small training corpora by initializing your models with{' '}
+ knowledge from raw text. The new pretrain command teaches spaCy's
+ CNN model to predict words based on their context, producing representations of
+ words in contexts. If you've seen Google's BERT system or fast.ai's ULMFiT, spaCy's
+ pretraining is similar – but much more efficient. It's still experimental, but users
+ are already reporting good results, so give it a try!