@@ -648,7 +648,7 @@ tok_get_normal_mode(struct tok_state *tok, tokenizer_mode* current_tok, struct t
648648 /* Process the various legal combinations of b"", r"", u"", and f"". */
649649 int saw_b = 0 , saw_r = 0 , saw_u = 0 , saw_f = 0 , saw_t = 0 ;
650650 while (1 ) {
651- if (!(saw_b || saw_u || saw_f || saw_t ) && (c == 'b' || c == 'B' ))
651+ if (!(saw_b || saw_u || saw_f ) && (c == 'b' || c == 'B' ))
652652 saw_b = 1 ;
653653 /* Since this is a backwards compatibility support literal we don't
654654 want to support it in arbitrary order like byte literals. */
@@ -660,17 +660,31 @@ tok_get_normal_mode(struct tok_state *tok, tokenizer_mode* current_tok, struct t
660660 else if (!(saw_r || saw_u ) && (c == 'r' || c == 'R' )) {
661661 saw_r = 1 ;
662662 }
663- else if (!(saw_f || saw_b || saw_u || saw_t ) && (c == 'f' || c == 'F' )) {
663+ else if (!(saw_f || saw_b || saw_u ) && (c == 'f' || c == 'F' )) {
664664 saw_f = 1 ;
665665 }
666- else if (!(saw_t || saw_b || saw_u || saw_f ) && (c == 't' || c == 'T' )) {
666+ else if (!(saw_t || saw_u ) && (c == 't' || c == 'T' )) {
667667 saw_t = 1 ;
668668 }
669669 else {
670670 break ;
671671 }
672672 c = tok_nextc (tok );
673673 if (c == '"' || c == '\'' ) {
674+ if (saw_b && saw_t ) {
675+ return MAKE_TOKEN (_PyTokenizer_syntaxerror_known_range (
676+ tok , (int )(tok -> start + 1 - tok -> line_start ),
677+ (int )(tok -> cur - tok -> line_start ),
678+ "can't use 'b' and 't' string prefixes together" ));
679+ }
680+ if (saw_f && saw_t ) {
681+ return MAKE_TOKEN (_PyTokenizer_syntaxerror_known_range (
682+ tok , (int )(tok -> start + 1 - tok -> line_start ),
683+ (int )(tok -> cur - tok -> line_start ),
684+ "can't use 'f' and 't' string prefixes together" ));
685+ }
686+
687+ // Handle valid f or t string creation:
674688 if (saw_f || saw_t ) {
675689 goto f_string_quote ;
676690 }
0 commit comments